summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2023-10-27 20:58:03 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2023-10-27 20:58:03 +1100
commit303d77a6e1707498f09c9d8ee91b1dc07ca315a5 (patch)
tree32832416e9a727c4f6c17b1500c28cf117ea4cea
parent36e826b568e412f61d68fedc02a67b4d8b7583cc (diff)
parentb7bce570430e42229fb63f775fcbb10f38b83c71 (diff)
Merge branch 'topic/ppc-kvm' into next
Merge our KVM topic branch, this has been independently included in linux-next for most of the development cycle.
-rw-r--r--Documentation/powerpc/index.rst1
-rw-r--r--Documentation/powerpc/kvm-nested.rst634
-rw-r--r--arch/powerpc/Kconfig.debug12
-rw-r--r--arch/powerpc/include/asm/guest-state-buffer.h995
-rw-r--r--arch/powerpc/include/asm/hvcall.h30
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h220
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h10
-rw-r--r--arch/powerpc/include/asm/kvm_host.h22
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h110
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h267
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/book3s.c38
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c31
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c358
-rw-r--r--arch/powerpc/kvm/book3s_hv.h76
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c44
-rw-r--r--arch/powerpc/kvm/book3s_hv_nestedv2.c994
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c2
-rw-r--r--arch/powerpc/kvm/book3s_xive.c12
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c6
-rw-r--r--arch/powerpc/kvm/guest-state-buffer.c621
-rw-r--r--arch/powerpc/kvm/powerpc.c76
-rw-r--r--arch/powerpc/kvm/test-guest-state-buffer.c328
31 files changed, 4674 insertions, 267 deletions
diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst
index a50834798454..9749f6dc258f 100644
--- a/Documentation/powerpc/index.rst
+++ b/Documentation/powerpc/index.rst
@@ -26,6 +26,7 @@ powerpc
isa-versions
kaslr-booke32
mpc52xx
+ kvm-nested
papr_hcalls
pci_iov_resource_on_powernv
pmu-ebb
diff --git a/Documentation/powerpc/kvm-nested.rst b/Documentation/powerpc/kvm-nested.rst
new file mode 100644
index 000000000000..630602a8aa00
--- /dev/null
+++ b/Documentation/powerpc/kvm-nested.rst
@@ -0,0 +1,634 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================================
+Nested KVM on POWER
+====================================
+
+Introduction
+============
+
+This document explains how a guest operating system can act as a
+hypervisor and run nested guests through the use of hypercalls, if the
+hypervisor has implemented them. The terms L0, L1, and L2 are used to
+refer to different software entities. L0 is the hypervisor mode entity
+that would normally be called the "host" or "hypervisor". L1 is a
+guest virtual machine that is directly run under L0 and is initiated
+and controlled by L0. L2 is a guest virtual machine that is initiated
+and controlled by L1 acting as a hypervisor.
+
+Existing API
+============
+
+Linux/KVM has had support for Nesting as an L0 or L1 since 2018
+
+The L0 code was added::
+
+ commit 8e3f5fc1045dc49fd175b978c5457f5f51e7a2ce
+ Author: Paul Mackerras <paulus@ozlabs.org>
+ Date: Mon Oct 8 16:31:03 2018 +1100
+ KVM: PPC: Book3S HV: Framework and hcall stubs for nested virtualization
+
+The L1 code was added::
+
+ commit 360cae313702cdd0b90f82c261a8302fecef030a
+ Author: Paul Mackerras <paulus@ozlabs.org>
+ Date: Mon Oct 8 16:31:04 2018 +1100
+ KVM: PPC: Book3S HV: Nested guest entry via hypercall
+
+This API works primarily using a single hcall h_enter_nested(). This
+call made by the L1 to tell the L0 to start an L2 vCPU with the given
+state. The L0 then starts this L2 and runs until an L2 exit condition
+is reached. Once the L2 exits, the state of the L2 is given back to
+the L1 by the L0. The full L2 vCPU state is always transferred from
+and to L1 when the L2 is run. The L0 doesn't keep any state on the L2
+vCPU (except in the short sequence in the L0 on L1 -> L2 entry and L2
+-> L1 exit).
+
+The only state kept by the L0 is the partition table. The L1 registers
+it's partition table using the h_set_partition_table() hcall. All
+other state held by the L0 about the L2s is cached state (such as
+shadow page tables).
+
+The L1 may run any L2 or vCPU without first informing the L0. It
+simply starts the vCPU using h_enter_nested(). The creation of L2s and
+vCPUs is done implicitly whenever h_enter_nested() is called.
+
+In this document, we call this existing API the v1 API.
+
+New PAPR API
+===============
+
+The new PAPR API changes from the v1 API such that the creating L2 and
+associated vCPUs is explicit. In this document, we call this the v2
+API.
+
+h_enter_nested() is replaced with H_GUEST_VCPU_RUN(). Before this can
+be called the L1 must explicitly create the L2 using h_guest_create()
+and any associated vCPUs() created with h_guest_create_vCPU(). Getting
+and setting vCPU state can also be performed using h_guest_{g|s}et
+hcall.
+
+The basic execution flow is for an L1 to create an L2, run it, and
+delete it is:
+
+- L1 and L0 negotiate capabilities with H_GUEST_{G,S}ET_CAPABILITIES()
+ (normally at L1 boot time).
+
+- L1 requests the L0 create an L2 with H_GUEST_CREATE() and receives a token
+
+- L1 requests the L0 create an L2 vCPU with H_GUEST_CREATE_VCPU()
+
+- L1 and L0 communicate the vCPU state using the H_GUEST_{G,S}ET() hcall
+
+- L1 requests the L0 runs the vCPU running H_GUEST_VCPU_RUN() hcall
+
+- L1 deletes L2 with H_GUEST_DELETE()
+
+More details of the individual hcalls follows:
+
+HCALL Details
+=============
+
+This documentation is provided to give an overall understating of the
+API. It doesn't aim to provide all the details required to implement
+an L1 or L0. Latest version of PAPR can be referred to for more details.
+
+All these HCALLs are made by the L1 to the L0.
+
+H_GUEST_GET_CAPABILITIES()
+--------------------------
+
+This is called to get the capabilities of the L0 nested
+hypervisor. This includes capabilities such the CPU versions (eg
+POWER9, POWER10) that are supported as L2s::
+
+ H_GUEST_GET_CAPABILITIES(uint64 flags)
+
+ Parameters:
+ Input:
+ flags: Reserved
+ Output:
+ R3: Return code
+ R4: Hypervisor Supported Capabilities bitmap 1
+
+H_GUEST_SET_CAPABILITIES()
+--------------------------
+
+This is called to inform the L0 of the capabilities of the L1
+hypervisor. The set of flags passed here are the same as
+H_GUEST_GET_CAPABILITIES()
+
+Typically, GET will be called first and then SET will be called with a
+subset of the flags returned from GET. This process allows the L0 and
+L1 to negotiate an agreed set of capabilities::
+
+ H_GUEST_SET_CAPABILITIES(uint64 flags,
+ uint64 capabilitiesBitmap1)
+ Parameters:
+ Input:
+ flags: Reserved
+ capabilitiesBitmap1: Only capabilities advertised through
+ H_GUEST_GET_CAPABILITIES
+ Output:
+ R3: Return code
+ R4: If R3 = H_P2: The number of invalid bitmaps
+ R5: If R3 = H_P2: The index of first invalid bitmap
+
+H_GUEST_CREATE()
+----------------
+
+This is called to create an L2. A unique ID of the L2 created
+(similar to an LPID) is returned, which can be used on subsequent HCALLs to
+identify the L2::
+
+ H_GUEST_CREATE(uint64 flags,
+ uint64 continueToken);
+ Parameters:
+ Input:
+ flags: Reserved
+ continueToken: Initial call set to -1. Subsequent calls,
+ after H_Busy or H_LongBusyOrder has been
+ returned, value that was returned in R4.
+ Output:
+ R3: Return code. Notable:
+ H_Not_Enough_Resources: Unable to create Guest VCPU due to not
+ enough Hypervisor memory. See H_GUEST_CREATE_GET_STATE(flags =
+ takeOwnershipOfVcpuState)
+ R4: If R3 = H_Busy or_H_LongBusyOrder -> continueToken
+
+H_GUEST_CREATE_VCPU()
+---------------------
+
+This is called to create a vCPU associated with an L2. The L2 id
+(returned from H_GUEST_CREATE()) should be passed it. Also passed in
+is a unique (for this L2) vCPUid. This vCPUid is allocated by the
+L1::
+
+ H_GUEST_CREATE_VCPU(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId);
+ Parameters:
+ Input:
+ flags: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU to be created. This must be within the
+ range of 0 to 2047
+ Output:
+ R3: Return code. Notable:
+ H_Not_Enough_Resources: Unable to create Guest VCPU due to not
+ enough Hypervisor memory. See H_GUEST_CREATE_GET_STATE(flags =
+ takeOwnershipOfVcpuState)
+
+H_GUEST_GET_STATE()
+-------------------
+
+This is called to get state associated with an L2 (Guest-wide or vCPU specific).
+This info is passed via the Guest State Buffer (GSB), a standard format as
+explained later in this doc, necessary details below:
+
+This can get either L2 wide or vcpu specific information. Examples of
+L2 wide is the timebase offset or process scoped page table
+info. Examples of vCPU specific are GPRs or VSRs. A bit in the flags
+parameter specifies if this call is L2 wide or vCPU specific and the
+IDs in the GSB must match this.
+
+The L1 provides a pointer to the GSB as a parameter to this call. Also
+provided is the L2 and vCPU IDs associated with the state to set.
+
+The L1 writes only the IDs and sizes in the GSB. L0 writes the
+associated values for each ID in the GSB::
+
+ H_GUEST_GET_STATE(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId,
+ uint64 dataBuffer,
+ uint64 dataBufferSizeInBytes);
+ Parameters:
+ Input:
+ flags:
+ Bit 0: getGuestWideState: Request state of the Guest instead
+ of an individual VCPU.
+ Bit 1: takeOwnershipOfVcpuState Indicate the L1 is taking
+ over ownership of the VCPU state and that the L0 can free
+ the storage holding the state. The VCPU state will need to
+ be returned to the Hypervisor via H_GUEST_SET_STATE prior
+ to H_GUEST_RUN_VCPU being called for this VCPU. The data
+ returned in the dataBuffer is in a Hypervisor internal
+ format.
+ Bits 2-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU
+ dataBuffer: A L1 real address of the GSB.
+ If takeOwnershipOfVcpuState, size must be at least the size
+ returned by ID=0x0001
+ dataBufferSizeInBytes: Size of dataBuffer
+ Output:
+ R3: Return code
+ R4: If R3 = H_Invalid_Element_Id: The array index of the bad
+ element ID.
+ If R3 = H_Invalid_Element_Size: The array index of the bad
+ element size.
+ If R3 = H_Invalid_Element_Value: The array index of the bad
+ element value.
+
+H_GUEST_SET_STATE()
+-------------------
+
+This is called to set L2 wide or vCPU specific L2 state. This info is
+passed via the Guest State Buffer (GSB), necessary details below:
+
+This can set either L2 wide or vcpu specific information. Examples of
+L2 wide is the timebase offset or process scoped page table
+info. Examples of vCPU specific are GPRs or VSRs. A bit in the flags
+parameter specifies if this call is L2 wide or vCPU specific and the
+IDs in the GSB must match this.
+
+The L1 provides a pointer to the GSB as a parameter to this call. Also
+provided is the L2 and vCPU IDs associated with the state to set.
+
+The L1 writes all values in the GSB and the L0 only reads the GSB for
+this call::
+
+ H_GUEST_SET_STATE(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId,
+ uint64 dataBuffer,
+ uint64 dataBufferSizeInBytes);
+ Parameters:
+ Input:
+ flags:
+ Bit 0: getGuestWideState: Request state of the Guest instead
+ of an individual VCPU.
+ Bit 1: returnOwnershipOfVcpuState Return Guest VCPU state. See
+ GET_STATE takeOwnershipOfVcpuState
+ Bits 2-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU
+ dataBuffer: A L1 real address of the GSB.
+ If takeOwnershipOfVcpuState, size must be at least the size
+ returned by ID=0x0001
+ dataBufferSizeInBytes: Size of dataBuffer
+ Output:
+ R3: Return code
+ R4: If R3 = H_Invalid_Element_Id: The array index of the bad
+ element ID.
+ If R3 = H_Invalid_Element_Size: The array index of the bad
+ element size.
+ If R3 = H_Invalid_Element_Value: The array index of the bad
+ element value.
+
+H_GUEST_RUN_VCPU()
+------------------
+
+This is called to run an L2 vCPU. The L2 and vCPU IDs are passed in as
+parameters. The vCPU runs with the state set previously using
+H_GUEST_SET_STATE(). When the L2 exits, the L1 will resume from this
+hcall.
+
+This hcall also has associated input and output GSBs. Unlike
+H_GUEST_{S,G}ET_STATE(), these GSB pointers are not passed in as
+parameters to the hcall (This was done in the interest of
+performance). The locations of these GSBs must be preregistered using
+the H_GUEST_SET_STATE() call with ID 0x0c00 and 0x0c01 (see table
+below).
+
+The input GSB may contain only VCPU specific elements to be set. This
+GSB may also contain zero elements (ie 0 in the first 4 bytes of the
+GSB) if nothing needs to be set.
+
+On exit from the hcall, the output buffer is filled with elements
+determined by the L0. The reason for the exit is contained in GPR4 (ie
+NIP is put in GPR4). The elements returned depend on the exit
+type. For example, if the exit reason is the L2 doing a hcall (GPR4 =
+0xc00), then GPR3-12 are provided in the output GSB as this is the
+state likely needed to service the hcall. If additional state is
+needed, H_GUEST_GET_STATE() may be called by the L1.
+
+To synthesize interrupts in the L2, when calling H_GUEST_RUN_VCPU()
+the L1 may set a flag (as a hcall parameter) and the L0 will
+synthesize the interrupt in the L2. Alternatively, the L1 may
+synthesize the interrupt itself using H_GUEST_SET_STATE() or the
+H_GUEST_RUN_VCPU() input GSB to set the state appropriately::
+
+ H_GUEST_RUN_VCPU(uint64 flags,
+ uint64 guestId,
+ uint64 vcpuId,
+ uint64 dataBuffer,
+ uint64 dataBufferSizeInBytes);
+ Parameters:
+ Input:
+ flags:
+ Bit 0: generateExternalInterrupt: Generate an external interrupt
+ Bit 1: generatePrivilegedDoorbell: Generate a Privileged Doorbell
+ Bit 2: sendToSystemReset”: Generate a System Reset Interrupt
+ Bits 3-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ vcpuId: ID of the vCPU pass to H_GUEST_CREATE_VCPU
+ Output:
+ R3: Return code
+ R4: If R3 = H_Success: The reason L1 VCPU exited (ie. NIA)
+ 0x000: The VCPU stopped running for an unspecified reason. An
+ example of this is the Hypervisor stopping a VCPU running
+ due to an outstanding interrupt for the Host Partition.
+ 0x980: HDEC
+ 0xC00: HCALL
+ 0xE00: HDSI
+ 0xE20: HISI
+ 0xE40: HEA
+ 0xF80: HV Fac Unavail
+ If R3 = H_Invalid_Element_Id, H_Invalid_Element_Size, or
+ H_Invalid_Element_Value: R4 is offset of the invalid element
+ in the input buffer.
+
+H_GUEST_DELETE()
+----------------
+
+This is called to delete an L2. All associated vCPUs are also
+deleted. No specific vCPU delete call is provided.
+
+A flag may be provided to delete all guests. This is used to reset the
+L0 in the case of kdump/kexec::
+
+ H_GUEST_DELETE(uint64 flags,
+ uint64 guestId)
+ Parameters:
+ Input:
+ flags:
+ Bit 0: deleteAllGuests: deletes all guests
+ Bits 1-63: Reserved
+ guestId: ID obtained from H_GUEST_CREATE
+ Output:
+ R3: Return code
+
+Guest State Buffer
+==================
+
+The Guest State Buffer (GSB) is the main method of communicating state
+about the L2 between the L1 and L0 via H_GUEST_{G,S}ET() and
+H_GUEST_VCPU_RUN() calls.
+
+State may be associated with a whole L2 (eg timebase offset) or a
+specific L2 vCPU (eg. GPR state). Only L2 VCPU state maybe be set by
+H_GUEST_VCPU_RUN().
+
+All data in the GSB is big endian (as is standard in PAPR)
+
+The Guest state buffer has a header which gives the number of
+elements, followed by the GSB elements themselves.
+
+GSB header:
+
++----------+----------+-------------------------------------------+
+| Offset | Size | Purpose |
+| Bytes | Bytes | |
++==========+==========+===========================================+
+| 0 | 4 | Number of elements |
++----------+----------+-------------------------------------------+
+| 4 | | Guest state buffer elements |
++----------+----------+-------------------------------------------+
+
+GSB element:
+
++----------+----------+-------------------------------------------+
+| Offset | Size | Purpose |
+| Bytes | Bytes | |
++==========+==========+===========================================+
+| 0 | 2 | ID |
++----------+----------+-------------------------------------------+
+| 2 | 2 | Size of Value |
++----------+----------+-------------------------------------------+
+| 4 | As above | Value |
++----------+----------+-------------------------------------------+
+
+The ID in the GSB element specifies what is to be set. This includes
+archtected state like GPRs, VSRs, SPRs, plus also some meta data about
+the partition like the timebase offset and partition scoped page
+table information.
+
++--------+-------+----+--------+----------------------------------+
+| ID | Size | RW | Thread | Details |
+| | Bytes | | Guest | |
+| | | | Scope | |
++========+=======+====+========+==================================+
+| 0x0000 | | RW | TG | NOP element |
++--------+-------+----+--------+----------------------------------+
+| 0x0001 | 0x08 | R | G | Size of L0 vCPU state. See: |
+| | | | | H_GUEST_GET_STATE: |
+| | | | | flags = takeOwnershipOfVcpuState |
++--------+-------+----+--------+----------------------------------+
+| 0x0002 | 0x08 | R | G | Size Run vCPU out buffer |
++--------+-------+----+--------+----------------------------------+
+| 0x0003 | 0x04 | RW | G | Logical PVR |
++--------+-------+----+--------+----------------------------------+
+| 0x0004 | 0x08 | RW | G | TB Offset (L1 relative) |
++--------+-------+----+--------+----------------------------------+
+| 0x0005 | 0x18 | RW | G |Partition scoped page tbl info: |
+| | | | | |
+| | | | |- 0x00 Addr part scope table |
+| | | | |- 0x08 Num addr bits |
+| | | | |- 0x10 Size root dir |
++--------+-------+----+--------+----------------------------------+
+| 0x0006 | 0x10 | RW | G |Process Table Information: |
+| | | | | |
+| | | | |- 0x0 Addr proc scope table |
+| | | | |- 0x8 Table size. |
++--------+-------+----+--------+----------------------------------+
+| 0x0007-| | | | Reserved |
+| 0x0BFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x0C00 | 0x10 | RW | T |Run vCPU Input Buffer: |
+| | | | | |
+| | | | |- 0x0 Addr of buffer |
+| | | | |- 0x8 Buffer Size. |
++--------+-------+----+--------+----------------------------------+
+| 0x0C01 | 0x10 | RW | T |Run vCPU Output Buffer: |
+| | | | | |
+| | | | |- 0x0 Addr of buffer |
+| | | | |- 0x8 Buffer Size. |
++--------+-------+----+--------+----------------------------------+
+| 0x0C02 | 0x08 | RW | T | vCPU VPA Address |
++--------+-------+----+--------+----------------------------------+
+| 0x0C03-| | | | Reserved |
+| 0x0FFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x1000-| 0x08 | RW | T | GPR 0-31 |
+| 0x101F | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x1020 | 0x08 | T | T | HDEC expiry TB |
++--------+-------+----+--------+----------------------------------+
+| 0x1021 | 0x08 | RW | T | NIA |
++--------+-------+----+--------+----------------------------------+
+| 0x1022 | 0x08 | RW | T | MSR |
++--------+-------+----+--------+----------------------------------+
+| 0x1023 | 0x08 | RW | T | LR |
++--------+-------+----+--------+----------------------------------+
+| 0x1024 | 0x08 | RW | T | XER |
++--------+-------+----+--------+----------------------------------+
+| 0x1025 | 0x08 | RW | T | CTR |
++--------+-------+----+--------+----------------------------------+
+| 0x1026 | 0x08 | RW | T | CFAR |
++--------+-------+----+--------+----------------------------------+
+| 0x1027 | 0x08 | RW | T | SRR0 |
++--------+-------+----+--------+----------------------------------+
+| 0x1028 | 0x08 | RW | T | SRR1 |
++--------+-------+----+--------+----------------------------------+
+| 0x1029 | 0x08 | RW | T | DAR |
++--------+-------+----+--------+----------------------------------+
+| 0x102A | 0x08 | RW | T | DEC expiry TB |
++--------+-------+----+--------+----------------------------------+
+| 0x102B | 0x08 | RW | T | VTB |
++--------+-------+----+--------+----------------------------------+
+| 0x102C | 0x08 | RW | T | LPCR |
++--------+-------+----+--------+----------------------------------+
+| 0x102D | 0x08 | RW | T | HFSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x102E | 0x08 | RW | T | FSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x102F | 0x08 | RW | T | FPSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x1030 | 0x08 | RW | T | DAWR0 |
++--------+-------+----+--------+----------------------------------+
+| 0x1031 | 0x08 | RW | T | DAWR1 |
++--------+-------+----+--------+----------------------------------+
+| 0x1032 | 0x08 | RW | T | CIABR |
++--------+-------+----+--------+----------------------------------+
+| 0x1033 | 0x08 | RW | T | PURR |
++--------+-------+----+--------+----------------------------------+
+| 0x1034 | 0x08 | RW | T | SPURR |
++--------+-------+----+--------+----------------------------------+
+| 0x1035 | 0x08 | RW | T | IC |
++--------+-------+----+--------+----------------------------------+
+| 0x1036-| 0x08 | RW | T | SPRG 0-3 |
+| 0x1039 | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x103A | 0x08 | W | T | PPR |
++--------+-------+----+--------+----------------------------------+
+| 0x103B | 0x08 | RW | T | MMCR 0-3 |
+| 0x103E | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x103F | 0x08 | RW | T | MMCRA |
++--------+-------+----+--------+----------------------------------+
+| 0x1040 | 0x08 | RW | T | SIER |
++--------+-------+----+--------+----------------------------------+
+| 0x1041 | 0x08 | RW | T | SIER 2 |
++--------+-------+----+--------+----------------------------------+
+| 0x1042 | 0x08 | RW | T | SIER 3 |
++--------+-------+----+--------+----------------------------------+
+| 0x1043 | 0x08 | RW | T | BESCR |
++--------+-------+----+--------+----------------------------------+
+| 0x1044 | 0x08 | RW | T | EBBHR |
++--------+-------+----+--------+----------------------------------+
+| 0x1045 | 0x08 | RW | T | EBBRR |
++--------+-------+----+--------+----------------------------------+
+| 0x1046 | 0x08 | RW | T | AMR |
++--------+-------+----+--------+----------------------------------+
+| 0x1047 | 0x08 | RW | T | IAMR |
++--------+-------+----+--------+----------------------------------+
+| 0x1048 | 0x08 | RW | T | AMOR |
++--------+-------+----+--------+----------------------------------+
+| 0x1049 | 0x08 | RW | T | UAMOR |
++--------+-------+----+--------+----------------------------------+
+| 0x104A | 0x08 | RW | T | SDAR |
++--------+-------+----+--------+----------------------------------+
+| 0x104B | 0x08 | RW | T | SIAR |
++--------+-------+----+--------+----------------------------------+
+| 0x104C | 0x08 | RW | T | DSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x104D | 0x08 | RW | T | TAR |
++--------+-------+----+--------+----------------------------------+
+| 0x104E | 0x08 | RW | T | DEXCR |
++--------+-------+----+--------+----------------------------------+
+| 0x104F | 0x08 | RW | T | HDEXCR |
++--------+-------+----+--------+----------------------------------+
+| 0x1050 | 0x08 | RW | T | HASHKEYR |
++--------+-------+----+--------+----------------------------------+
+| 0x1051 | 0x08 | RW | T | HASHPKEYR |
++--------+-------+----+--------+----------------------------------+
+| 0x1052 | 0x08 | RW | T | CTRL |
++--------+-------+----+--------+----------------------------------+
+| 0x1053-| | | | Reserved |
+| 0x1FFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x2000 | 0x04 | RW | T | CR |
++--------+-------+----+--------+----------------------------------+
+| 0x2001 | 0x04 | RW | T | PIDR |
++--------+-------+----+--------+----------------------------------+
+| 0x2002 | 0x04 | RW | T | DSISR |
++--------+-------+----+--------+----------------------------------+
+| 0x2003 | 0x04 | RW | T | VSCR |
++--------+-------+----+--------+----------------------------------+
+| 0x2004 | 0x04 | RW | T | VRSAVE |
++--------+-------+----+--------+----------------------------------+
+| 0x2005 | 0x04 | RW | T | DAWRX0 |
++--------+-------+----+--------+----------------------------------+
+| 0x2006 | 0x04 | RW | T | DAWRX1 |
++--------+-------+----+--------+----------------------------------+
+| 0x2007-| 0x04 | RW | T | PMC 1-6 |
+| 0x200c | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x200D | 0x04 | RW | T | WORT |
++--------+-------+----+--------+----------------------------------+
+| 0x200E | 0x04 | RW | T | PSPB |
++--------+-------+----+--------+----------------------------------+
+| 0x200F-| | | | Reserved |
+| 0x2FFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x3000-| 0x10 | RW | T | VSR 0-63 |
+| 0x303F | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0x3040-| | | | Reserved |
+| 0xEFFF | | | | |
++--------+-------+----+--------+----------------------------------+
+| 0xF000 | 0x08 | R | T | HDAR |
++--------+-------+----+--------+----------------------------------+
+| 0xF001 | 0x04 | R | T | HDSISR |
++--------+-------+----+--------+----------------------------------+
+| 0xF002 | 0x04 | R | T | HEIR |
++--------+-------+----+--------+----------------------------------+
+| 0xF003 | 0x08 | R | T | ASDR |
++--------+-------+----+--------+----------------------------------+
+
+
+Miscellaneous info
+==================
+
+State not in ptregs/hvregs
+--------------------------
+
+In the v1 API, some state is not in the ptregs/hvstate. This includes
+the vector register and some SPRs. For the L1 to set this state for
+the L2, the L1 loads up these hardware registers before the
+h_enter_nested() call and the L0 ensures they end up as the L2 state
+(by not touching them).
+
+The v2 API removes this and explicitly sets this state via the GSB.
+
+L1 Implementation details: Caching state
+----------------------------------------
+
+In the v1 API, all state is sent from the L1 to the L0 and vice versa
+on every h_enter_nested() hcall. If the L0 is not currently running
+any L2s, the L0 has no state information about them. The only
+exception to this is the location of the partition table, registered
+via h_set_partition_table().
+
+The v2 API changes this so that the L0 retains the L2 state even when
+it's vCPUs are no longer running. This means that the L1 only needs to
+communicate with the L0 about L2 state when it needs to modify the L2
+state, or when it's value is out of date. This provides an opportunity
+for performance optimisation.
+
+When a vCPU exits from a H_GUEST_RUN_VCPU() call, the L1 internally
+marks all L2 state as invalid. This means that if the L1 wants to know
+the L2 state (say via a kvm_get_one_reg() call), it needs call
+H_GUEST_GET_STATE() to get that state. Once it's read, it's marked as
+valid in L1 until the L2 is run again.
+
+Also, when an L1 modifies L2 vcpu state, it doesn't need to write it
+to the L0 until that L2 vcpu runs again. Hence when the L1 updates
+state (say via a kvm_set_one_reg() call), it writes to an internal L1
+copy and only flushes this copy to the L0 when the L2 runs again via
+the H_GUEST_VCPU_RUN() input buffer.
+
+This lazy updating of state by the L1 avoids unnecessary
+H_GUEST_{G|S}ET_STATE() calls.
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 50ff3359cde9..ea4033abc07d 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -82,6 +82,18 @@ config MSI_BITMAP_SELFTEST
bool "Run self-tests of the MSI bitmap code"
depends on DEBUG_KERNEL
+config GUEST_STATE_BUFFER_TEST
+ def_tristate n
+ prompt "Enable Guest State Buffer unit tests"
+ depends on KUNIT
+ depends on KVM_BOOK3S_HV_POSSIBLE
+ default KUNIT_ALL_TESTS
+ help
+ The Guest State Buffer is a data format specified in the PAPR.
+ It is by hcalls to communicate the state of L2 guests between
+ the L1 and L0 hypervisors. Enable unit tests for the library
+ used to create and use guest state buffers.
+
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
depends on PPC64
diff --git a/arch/powerpc/include/asm/guest-state-buffer.h b/arch/powerpc/include/asm/guest-state-buffer.h
new file mode 100644
index 000000000000..808149f31576
--- /dev/null
+++ b/arch/powerpc/include/asm/guest-state-buffer.h
@@ -0,0 +1,995 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Interface based on include/net/netlink.h
+ */
+#ifndef _ASM_POWERPC_GUEST_STATE_BUFFER_H
+#define _ASM_POWERPC_GUEST_STATE_BUFFER_H
+
+#include "asm/hvcall.h"
+#include <linux/gfp.h>
+#include <linux/bitmap.h>
+#include <asm/plpar_wrappers.h>
+
+/**************************************************************************
+ * Guest State Buffer Constants
+ **************************************************************************/
+/* Element without a value and any length */
+#define KVMPPC_GSID_BLANK 0x0000
+/* Size required for the L0's internal VCPU representation */
+#define KVMPPC_GSID_HOST_STATE_SIZE 0x0001
+ /* Minimum size for the H_GUEST_RUN_VCPU output buffer */
+#define KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE 0x0002
+ /* "Logical" PVR value as defined in the PAPR */
+#define KVMPPC_GSID_LOGICAL_PVR 0x0003
+ /* L0 relative timebase offset */
+#define KVMPPC_GSID_TB_OFFSET 0x0004
+ /* Partition Scoped Page Table Info */
+#define KVMPPC_GSID_PARTITION_TABLE 0x0005
+ /* Process Table Info */
+#define KVMPPC_GSID_PROCESS_TABLE 0x0006
+
+/* H_GUEST_RUN_VCPU input buffer Info */
+#define KVMPPC_GSID_RUN_INPUT 0x0C00
+/* H_GUEST_RUN_VCPU output buffer Info */
+#define KVMPPC_GSID_RUN_OUTPUT 0x0C01
+#define KVMPPC_GSID_VPA 0x0C02
+
+#define KVMPPC_GSID_GPR(x) (0x1000 + (x))
+#define KVMPPC_GSID_HDEC_EXPIRY_TB 0x1020
+#define KVMPPC_GSID_NIA 0x1021
+#define KVMPPC_GSID_MSR 0x1022
+#define KVMPPC_GSID_LR 0x1023
+#define KVMPPC_GSID_XER 0x1024
+#define KVMPPC_GSID_CTR 0x1025
+#define KVMPPC_GSID_CFAR 0x1026
+#define KVMPPC_GSID_SRR0 0x1027
+#define KVMPPC_GSID_SRR1 0x1028
+#define KVMPPC_GSID_DAR 0x1029
+#define KVMPPC_GSID_DEC_EXPIRY_TB 0x102A
+#define KVMPPC_GSID_VTB 0x102B
+#define KVMPPC_GSID_LPCR 0x102C
+#define KVMPPC_GSID_HFSCR 0x102D
+#define KVMPPC_GSID_FSCR 0x102E
+#define KVMPPC_GSID_FPSCR 0x102F
+#define KVMPPC_GSID_DAWR0 0x1030
+#define KVMPPC_GSID_DAWR1 0x1031
+#define KVMPPC_GSID_CIABR 0x1032
+#define KVMPPC_GSID_PURR 0x1033
+#define KVMPPC_GSID_SPURR 0x1034
+#define KVMPPC_GSID_IC 0x1035
+#define KVMPPC_GSID_SPRG0 0x1036
+#define KVMPPC_GSID_SPRG1 0x1037
+#define KVMPPC_GSID_SPRG2 0x1038
+#define KVMPPC_GSID_SPRG3 0x1039
+#define KVMPPC_GSID_PPR 0x103A
+#define KVMPPC_GSID_MMCR(x) (0x103B + (x))
+#define KVMPPC_GSID_MMCRA 0x103F
+#define KVMPPC_GSID_SIER(x) (0x1040 + (x))
+#define KVMPPC_GSID_BESCR 0x1043
+#define KVMPPC_GSID_EBBHR 0x1044
+#define KVMPPC_GSID_EBBRR 0x1045
+#define KVMPPC_GSID_AMR 0x1046
+#define KVMPPC_GSID_IAMR 0x1047
+#define KVMPPC_GSID_AMOR 0x1048
+#define KVMPPC_GSID_UAMOR 0x1049
+#define KVMPPC_GSID_SDAR 0x104A
+#define KVMPPC_GSID_SIAR 0x104B
+#define KVMPPC_GSID_DSCR 0x104C
+#define KVMPPC_GSID_TAR 0x104D
+#define KVMPPC_GSID_DEXCR 0x104E
+#define KVMPPC_GSID_HDEXCR 0x104F
+#define KVMPPC_GSID_HASHKEYR 0x1050
+#define KVMPPC_GSID_HASHPKEYR 0x1051
+#define KVMPPC_GSID_CTRL 0x1052
+
+#define KVMPPC_GSID_CR 0x2000
+#define KVMPPC_GSID_PIDR 0x2001
+#define KVMPPC_GSID_DSISR 0x2002
+#define KVMPPC_GSID_VSCR 0x2003
+#define KVMPPC_GSID_VRSAVE 0x2004
+#define KVMPPC_GSID_DAWRX0 0x2005
+#define KVMPPC_GSID_DAWRX1 0x2006
+#define KVMPPC_GSID_PMC(x) (0x2007 + (x))
+#define KVMPPC_GSID_WORT 0x200D
+#define KVMPPC_GSID_PSPB 0x200E
+
+#define KVMPPC_GSID_VSRS(x) (0x3000 + (x))
+
+#define KVMPPC_GSID_HDAR 0xF000
+#define KVMPPC_GSID_HDSISR 0xF001
+#define KVMPPC_GSID_HEIR 0xF002
+#define KVMPPC_GSID_ASDR 0xF003
+
+#define KVMPPC_GSE_GUESTWIDE_START KVMPPC_GSID_BLANK
+#define KVMPPC_GSE_GUESTWIDE_END KVMPPC_GSID_PROCESS_TABLE
+#define KVMPPC_GSE_GUESTWIDE_COUNT \
+ (KVMPPC_GSE_GUESTWIDE_END - KVMPPC_GSE_GUESTWIDE_START + 1)
+
+#define KVMPPC_GSE_META_START KVMPPC_GSID_RUN_INPUT
+#define KVMPPC_GSE_META_END KVMPPC_GSID_VPA
+#define KVMPPC_GSE_META_COUNT (KVMPPC_GSE_META_END - KVMPPC_GSE_META_START + 1)
+
+#define KVMPPC_GSE_DW_REGS_START KVMPPC_GSID_GPR(0)
+#define KVMPPC_GSE_DW_REGS_END KVMPPC_GSID_CTRL
+#define KVMPPC_GSE_DW_REGS_COUNT \
+ (KVMPPC_GSE_DW_REGS_END - KVMPPC_GSE_DW_REGS_START + 1)
+
+#define KVMPPC_GSE_W_REGS_START KVMPPC_GSID_CR
+#define KVMPPC_GSE_W_REGS_END KVMPPC_GSID_PSPB
+#define KVMPPC_GSE_W_REGS_COUNT \
+ (KVMPPC_GSE_W_REGS_END - KVMPPC_GSE_W_REGS_START + 1)
+
+#define KVMPPC_GSE_VSRS_START KVMPPC_GSID_VSRS(0)
+#define KVMPPC_GSE_VSRS_END KVMPPC_GSID_VSRS(63)
+#define KVMPPC_GSE_VSRS_COUNT (KVMPPC_GSE_VSRS_END - KVMPPC_GSE_VSRS_START + 1)
+
+#define KVMPPC_GSE_INTR_REGS_START KVMPPC_GSID_HDAR
+#define KVMPPC_GSE_INTR_REGS_END KVMPPC_GSID_ASDR
+#define KVMPPC_GSE_INTR_REGS_COUNT \
+ (KVMPPC_GSE_INTR_REGS_END - KVMPPC_GSE_INTR_REGS_START + 1)
+
+#define KVMPPC_GSE_IDEN_COUNT \
+ (KVMPPC_GSE_GUESTWIDE_COUNT + KVMPPC_GSE_META_COUNT + \
+ KVMPPC_GSE_DW_REGS_COUNT + KVMPPC_GSE_W_REGS_COUNT + \
+ KVMPPC_GSE_VSRS_COUNT + KVMPPC_GSE_INTR_REGS_COUNT)
+
+/**
+ * Ranges of guest state buffer elements
+ */
+enum {
+ KVMPPC_GS_CLASS_GUESTWIDE = 0x01,
+ KVMPPC_GS_CLASS_META = 0x02,
+ KVMPPC_GS_CLASS_DWORD_REG = 0x04,
+ KVMPPC_GS_CLASS_WORD_REG = 0x08,
+ KVMPPC_GS_CLASS_VECTOR = 0x10,
+ KVMPPC_GS_CLASS_INTR = 0x20,
+};
+
+/**
+ * Types of guest state buffer elements
+ */
+enum {
+ KVMPPC_GSE_BE32,
+ KVMPPC_GSE_BE64,
+ KVMPPC_GSE_VEC128,
+ KVMPPC_GSE_PARTITION_TABLE,
+ KVMPPC_GSE_PROCESS_TABLE,
+ KVMPPC_GSE_BUFFER,
+ __KVMPPC_GSE_TYPE_MAX,
+};
+
+/**
+ * Flags for guest state elements
+ */
+enum {
+ KVMPPC_GS_FLAGS_WIDE = 0x01,
+};
+
+/**
+ * struct kvmppc_gs_part_table - deserialized partition table information
+ * element
+ * @address: start of the partition table
+ * @ea_bits: number of bits in the effective address
+ * @gpd_size: root page directory size
+ */
+struct kvmppc_gs_part_table {
+ u64 address;
+ u64 ea_bits;
+ u64 gpd_size;
+};
+
+/**
+ * struct kvmppc_gs_proc_table - deserialized process table information element
+ * @address: start of the process table
+ * @gpd_size: process table size
+ */
+struct kvmppc_gs_proc_table {
+ u64 address;
+ u64 gpd_size;
+};
+
+/**
+ * struct kvmppc_gs_buff_info - deserialized meta guest state buffer information
+ * @address: start of the guest state buffer
+ * @size: size of the guest state buffer
+ */
+struct kvmppc_gs_buff_info {
+ u64 address;
+ u64 size;
+};
+
+/**
+ * struct kvmppc_gs_header - serialized guest state buffer header
+ * @nelem: count of guest state elements in the buffer
+ * @data: start of the stream of elements in the buffer
+ */
+struct kvmppc_gs_header {
+ __be32 nelems;
+ char data[];
+} __packed;
+
+/**
+ * struct kvmppc_gs_elem - serialized guest state buffer element
+ * @iden: Guest State ID
+ * @len: length of data
+ * @data: the guest state buffer element's value
+ */
+struct kvmppc_gs_elem {
+ __be16 iden;
+ __be16 len;
+ char data[];
+} __packed;
+
+/**
+ * struct kvmppc_gs_buff - a guest state buffer with metadata.
+ * @capacity: total length of the buffer
+ * @len: current length of the elements and header
+ * @guest_id: guest id associated with the buffer
+ * @vcpu_id: vcpu_id associated with the buffer
+ * @hdr: the serialised guest state buffer
+ */
+struct kvmppc_gs_buff {
+ size_t capacity;
+ size_t len;
+ unsigned long guest_id;
+ unsigned long vcpu_id;
+ struct kvmppc_gs_header *hdr;
+};
+
+/**
+ * struct kvmppc_gs_bitmap - a bitmap for element ids
+ * @bitmap: a bitmap large enough for all Guest State IDs
+ */
+struct kvmppc_gs_bitmap {
+ /* private: */
+ DECLARE_BITMAP(bitmap, KVMPPC_GSE_IDEN_COUNT);
+};
+
+/**
+ * struct kvmppc_gs_parser - a map of element ids to locations in a buffer
+ * @iterator: bitmap used for iterating
+ * @gses: contains the pointers to elements
+ *
+ * A guest state parser is used for deserialising a guest state buffer.
+ * Given a buffer, it then allows looking up guest state elements using
+ * a guest state id.
+ */
+struct kvmppc_gs_parser {
+ /* private: */
+ struct kvmppc_gs_bitmap iterator;
+ struct kvmppc_gs_elem *gses[KVMPPC_GSE_IDEN_COUNT];
+};
+
+enum {
+ GSM_GUEST_WIDE = 0x1,
+ GSM_SEND = 0x2,
+ GSM_RECEIVE = 0x4,
+ GSM_GSB_OWNER = 0x8,
+};
+
+struct kvmppc_gs_msg;
+
+/**
+ * struct kvmppc_gs_msg_ops - guest state message behavior
+ * @get_size: maximum size required for the message data
+ * @fill_info: serializes to the guest state buffer format
+ * @refresh_info: dserializes from the guest state buffer format
+ */
+struct kvmppc_gs_msg_ops {
+ size_t (*get_size)(struct kvmppc_gs_msg *gsm);
+ int (*fill_info)(struct kvmppc_gs_buff *gsb, struct kvmppc_gs_msg *gsm);
+ int (*refresh_info)(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb);
+};
+
+/**
+ * struct kvmppc_gs_msg - a guest state message
+ * @bitmap: the guest state ids that should be included
+ * @ops: modify message behavior for reading and writing to buffers
+ * @flags: guest wide or thread wide
+ * @data: location where buffer data will be written to or from.
+ *
+ * A guest state message is allows flexibility in sending in receiving data
+ * in a guest state buffer format.
+ */
+struct kvmppc_gs_msg {
+ struct kvmppc_gs_bitmap bitmap;
+ struct kvmppc_gs_msg_ops *ops;
+ unsigned long flags;
+ void *data;
+};
+
+/**************************************************************************
+ * Guest State IDs
+ **************************************************************************/
+
+u16 kvmppc_gsid_size(u16 iden);
+unsigned long kvmppc_gsid_flags(u16 iden);
+u64 kvmppc_gsid_mask(u16 iden);
+
+/**************************************************************************
+ * Guest State Buffers
+ **************************************************************************/
+struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
+ unsigned long vcpu_id, gfp_t flags);
+void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb);
+void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size);
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags);
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags);
+
+/**
+ * kvmppc_gsb_header() - the header of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns a pointer to the buffer header.
+ */
+static inline struct kvmppc_gs_header *
+kvmppc_gsb_header(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->hdr;
+}
+
+/**
+ * kvmppc_gsb_data() - the elements of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns a pointer to the first element of the buffer data.
+ */
+static inline struct kvmppc_gs_elem *kvmppc_gsb_data(struct kvmppc_gs_buff *gsb)
+{
+ return (struct kvmppc_gs_elem *)kvmppc_gsb_header(gsb)->data;
+}
+
+/**
+ * kvmppc_gsb_len() - the current length of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the length including the header of a buffer.
+ */
+static inline size_t kvmppc_gsb_len(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->len;
+}
+
+/**
+ * kvmppc_gsb_capacity() - the capacity of a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the capacity of a buffer.
+ */
+static inline size_t kvmppc_gsb_capacity(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->capacity;
+}
+
+/**
+ * kvmppc_gsb_paddress() - the physical address of buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the physical address of the buffer.
+ */
+static inline u64 kvmppc_gsb_paddress(struct kvmppc_gs_buff *gsb)
+{
+ return __pa(kvmppc_gsb_header(gsb));
+}
+
+/**
+ * kvmppc_gsb_nelems() - the number of elements in a buffer
+ * @gsb: guest state buffer
+ *
+ * Returns the number of elements in a buffer
+ */
+static inline u32 kvmppc_gsb_nelems(struct kvmppc_gs_buff *gsb)
+{
+ return be32_to_cpu(kvmppc_gsb_header(gsb)->nelems);
+}
+
+/**
+ * kvmppc_gsb_reset() - empty a guest state buffer
+ * @gsb: guest state buffer
+ *
+ * Reset the number of elements and length of buffer to empty.
+ */
+static inline void kvmppc_gsb_reset(struct kvmppc_gs_buff *gsb)
+{
+ kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(0);
+ gsb->len = sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_data_len() - the length of a buffer excluding the header
+ * @gsb: guest state buffer
+ *
+ * Returns the length of a buffer excluding the header
+ */
+static inline size_t kvmppc_gsb_data_len(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->len - sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_data_cap() - the capacity of a buffer excluding the header
+ * @gsb: guest state buffer
+ *
+ * Returns the capacity of a buffer excluding the header
+ */
+static inline size_t kvmppc_gsb_data_cap(struct kvmppc_gs_buff *gsb)
+{
+ return gsb->capacity - sizeof(struct kvmppc_gs_header);
+}
+
+/**
+ * kvmppc_gsb_for_each_elem - iterate over the elements in a buffer
+ * @i: loop counter
+ * @pos: set to current element
+ * @gsb: guest state buffer
+ * @rem: initialized to buffer capacity, holds bytes currently remaining in
+ * stream
+ */
+#define kvmppc_gsb_for_each_elem(i, pos, gsb, rem) \
+ kvmppc_gse_for_each_elem(i, kvmppc_gsb_nelems(gsb), pos, \
+ kvmppc_gsb_data(gsb), \
+ kvmppc_gsb_data_cap(gsb), rem)
+
+/**************************************************************************
+ * Guest State Elements
+ **************************************************************************/
+
+/**
+ * kvmppc_gse_iden() - guest state ID of element
+ * @gse: guest state element
+ *
+ * Return the guest state ID in host endianness.
+ */
+static inline u16 kvmppc_gse_iden(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->iden);
+}
+
+/**
+ * kvmppc_gse_len() - length of guest state element data
+ * @gse: guest state element
+ *
+ * Returns the length of guest state element data
+ */
+static inline u16 kvmppc_gse_len(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->len);
+}
+
+/**
+ * kvmppc_gse_total_len() - total length of guest state element
+ * @gse: guest state element
+ *
+ * Returns the length of the data plus the ID and size header.
+ */
+static inline u16 kvmppc_gse_total_len(const struct kvmppc_gs_elem *gse)
+{
+ return be16_to_cpu(gse->len) + sizeof(*gse);
+}
+
+/**
+ * kvmppc_gse_total_size() - space needed for a given data length
+ * @size: data length
+ *
+ * Returns size plus the space needed for the ID and size header.
+ */
+static inline u16 kvmppc_gse_total_size(u16 size)
+{
+ return sizeof(struct kvmppc_gs_elem) + size;
+}
+
+/**
+ * kvmppc_gse_data() - pointer to data of a guest state element
+ * @gse: guest state element
+ *
+ * Returns a pointer to the beginning of guest state element data.
+ */
+static inline void *kvmppc_gse_data(const struct kvmppc_gs_elem *gse)
+{
+ return (void *)gse->data;
+}
+
+/**
+ * kvmppc_gse_ok() - checks space exists for guest state element
+ * @gse: guest state element
+ * @remaining: bytes of space remaining
+ *
+ * Returns true if the guest state element can fit in remaining space.
+ */
+static inline bool kvmppc_gse_ok(const struct kvmppc_gs_elem *gse,
+ int remaining)
+{
+ return remaining >= kvmppc_gse_total_len(gse);
+}
+
+/**
+ * kvmppc_gse_next() - iterate to the next guest state element in a stream
+ * @gse: stream of guest state elements
+ * @remaining: length of the guest element stream
+ *
+ * Returns the next guest state element in a stream of elements. The length of
+ * the stream is updated in remaining.
+ */
+static inline struct kvmppc_gs_elem *
+kvmppc_gse_next(const struct kvmppc_gs_elem *gse, int *remaining)
+{
+ int len = sizeof(*gse) + kvmppc_gse_len(gse);
+
+ *remaining -= len;
+ return (struct kvmppc_gs_elem *)(gse->data + kvmppc_gse_len(gse));
+}
+
+/**
+ * kvmppc_gse_for_each_elem - iterate over a stream of guest state elements
+ * @i: loop counter
+ * @max: number of elements
+ * @pos: set to current element
+ * @head: head of elements
+ * @len: length of the stream
+ * @rem: initialized to len, holds bytes currently remaining elements
+ */
+#define kvmppc_gse_for_each_elem(i, max, pos, head, len, rem) \
+ for (i = 0, pos = head, rem = len; kvmppc_gse_ok(pos, rem) && i < max; \
+ pos = kvmppc_gse_next(pos, &(rem)), i++)
+
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data);
+int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb);
+
+/**
+ * kvmppc_gse_put_be32() - add a be32 guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: big endian value
+ */
+static inline int kvmppc_gse_put_be32(struct kvmppc_gs_buff *gsb, u16 iden,
+ __be32 val)
+{
+ __be32 tmp;
+
+ tmp = val;
+ return __kvmppc_gse_put(gsb, iden, sizeof(__be32), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_u32() - add a host endian 32bit int guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ */
+static inline int kvmppc_gse_put_u32(struct kvmppc_gs_buff *gsb, u16 iden,
+ u32 val)
+{
+ __be32 tmp;
+
+ val &= kvmppc_gsid_mask(iden);
+ tmp = cpu_to_be32(val);
+ return kvmppc_gse_put_be32(gsb, iden, tmp);
+}
+
+/**
+ * kvmppc_gse_put_be64() - add a be64 guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: big endian value
+ */
+static inline int kvmppc_gse_put_be64(struct kvmppc_gs_buff *gsb, u16 iden,
+ __be64 val)
+{
+ __be64 tmp;
+
+ tmp = val;
+ return __kvmppc_gse_put(gsb, iden, sizeof(__be64), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_u64() - add a host endian 64bit guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ */
+static inline int kvmppc_gse_put_u64(struct kvmppc_gs_buff *gsb, u16 iden,
+ u64 val)
+{
+ __be64 tmp;
+
+ val &= kvmppc_gsid_mask(iden);
+ tmp = cpu_to_be64(val);
+ return kvmppc_gse_put_be64(gsb, iden, tmp);
+}
+
+/**
+ * __kvmppc_gse_put_reg() - add a register type guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: host endian value
+ *
+ * Adds a register type guest state element. Uses the guest state ID for
+ * determining the length of the guest element. If the guest state ID has
+ * bits that can not be set they will be cleared.
+ */
+static inline int __kvmppc_gse_put_reg(struct kvmppc_gs_buff *gsb, u16 iden,
+ u64 val)
+{
+ val &= kvmppc_gsid_mask(iden);
+ if (kvmppc_gsid_size(iden) == sizeof(u64))
+ return kvmppc_gse_put_u64(gsb, iden, val);
+
+ if (kvmppc_gsid_size(iden) == sizeof(u32)) {
+ u32 tmp;
+
+ tmp = (u32)val;
+ if (tmp != val)
+ return -EINVAL;
+
+ return kvmppc_gse_put_u32(gsb, iden, tmp);
+ }
+ return -EINVAL;
+}
+
+/**
+ * kvmppc_gse_put_vector128() - add a vector guest state element to a buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: 16 byte vector value
+ */
+static inline int kvmppc_gse_put_vector128(struct kvmppc_gs_buff *gsb, u16 iden,
+ vector128 *val)
+{
+ __be64 tmp[2] = { 0 };
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u;
+
+ u.v = *val;
+ tmp[0] = cpu_to_be64(u.dw[TS_FPROFFSET]);
+#ifdef CONFIG_VSX
+ tmp[1] = cpu_to_be64(u.dw[TS_VSRLOWOFFSET]);
+#endif
+ return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
+}
+
+/**
+ * kvmppc_gse_put_part_table() - add a partition table guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: partition table value
+ */
+static inline int kvmppc_gse_put_part_table(struct kvmppc_gs_buff *gsb,
+ u16 iden,
+ struct kvmppc_gs_part_table val)
+{
+ __be64 tmp[3];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.ea_bits);
+ tmp[2] = cpu_to_be64(val.gpd_size);
+ return __kvmppc_gse_put(gsb, KVMPPC_GSID_PARTITION_TABLE, sizeof(tmp),
+ &tmp);
+}
+
+/**
+ * kvmppc_gse_put_proc_table() - add a process table guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: process table value
+ */
+static inline int kvmppc_gse_put_proc_table(struct kvmppc_gs_buff *gsb,
+ u16 iden,
+ struct kvmppc_gs_proc_table val)
+{
+ __be64 tmp[2];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.gpd_size);
+ return __kvmppc_gse_put(gsb, KVMPPC_GSID_PROCESS_TABLE, sizeof(tmp),
+ &tmp);
+}
+
+/**
+ * kvmppc_gse_put_buff_info() - adds a GSB description guest state element to a
+ * buffer
+ * @gsb: guest state buffer to add element to
+ * @iden: guest state ID
+ * @val: guest state buffer description value
+ */
+static inline int kvmppc_gse_put_buff_info(struct kvmppc_gs_buff *gsb, u16 iden,
+ struct kvmppc_gs_buff_info val)
+{
+ __be64 tmp[2];
+
+ tmp[0] = cpu_to_be64(val.address);
+ tmp[1] = cpu_to_be64(val.size);
+ return __kvmppc_gse_put(gsb, iden, sizeof(tmp), &tmp);
+}
+
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data);
+
+/**
+ * kvmppc_gse_get_be32() - return the data of a be32 element
+ * @gse: guest state element
+ */
+static inline __be32 kvmppc_gse_get_be32(const struct kvmppc_gs_elem *gse)
+{
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be32)))
+ return 0;
+ return *(__be32 *)kvmppc_gse_data(gse);
+}
+
+/**
+ * kvmppc_gse_get_u32() - return the data of a be32 element in host endianness
+ * @gse: guest state element
+ */
+static inline u32 kvmppc_gse_get_u32(const struct kvmppc_gs_elem *gse)
+{
+ return be32_to_cpu(kvmppc_gse_get_be32(gse));
+}
+
+/**
+ * kvmppc_gse_get_be64() - return the data of a be64 element
+ * @gse: guest state element
+ */
+static inline __be64 kvmppc_gse_get_be64(const struct kvmppc_gs_elem *gse)
+{
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__be64)))
+ return 0;
+ return *(__be64 *)kvmppc_gse_data(gse);
+}
+
+/**
+ * kvmppc_gse_get_u64() - return the data of a be64 element in host endianness
+ * @gse: guest state element
+ */
+static inline u64 kvmppc_gse_get_u64(const struct kvmppc_gs_elem *gse)
+{
+ return be64_to_cpu(kvmppc_gse_get_be64(gse));
+}
+
+/**
+ * kvmppc_gse_get_vector128() - return the data of a vector element
+ * @gse: guest state element
+ */
+static inline void kvmppc_gse_get_vector128(const struct kvmppc_gs_elem *gse,
+ vector128 *v)
+{
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u = { 0 };
+ __be64 *src;
+
+ if (WARN_ON(kvmppc_gse_len(gse) != sizeof(__vector128)))
+ *v = u.v;
+
+ src = (__be64 *)kvmppc_gse_data(gse);
+ u.dw[TS_FPROFFSET] = be64_to_cpu(src[0]);
+#ifdef CONFIG_VSX
+ u.dw[TS_VSRLOWOFFSET] = be64_to_cpu(src[1]);
+#endif
+ *v = u.v;
+}
+
+/**************************************************************************
+ * Guest State Bitmap
+ **************************************************************************/
+
+bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden);
+u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev);
+
+/**
+ * kvmppc_gsbm_zero - zero the entire bitmap
+ * @gsbm: guest state buffer bitmap
+ */
+static inline void kvmppc_gsbm_zero(struct kvmppc_gs_bitmap *gsbm)
+{
+ bitmap_zero(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
+}
+
+/**
+ * kvmppc_gsbm_fill - fill the entire bitmap
+ * @gsbm: guest state buffer bitmap
+ */
+static inline void kvmppc_gsbm_fill(struct kvmppc_gs_bitmap *gsbm)
+{
+ bitmap_fill(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT);
+ clear_bit(0, gsbm->bitmap);
+}
+
+/**
+ * kvmppc_gsbm_for_each - iterate the present guest state IDs
+ * @gsbm: guest state buffer bitmap
+ * @iden: current guest state ID
+ */
+#define kvmppc_gsbm_for_each(gsbm, iden) \
+ for (iden = kvmppc_gsbm_next(gsbm, 0); iden != 0; \
+ iden = kvmppc_gsbm_next(gsbm, iden))
+
+/**************************************************************************
+ * Guest State Parser
+ **************************************************************************/
+
+void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden,
+ struct kvmppc_gs_elem *gse);
+struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp,
+ u16 iden);
+
+/**
+ * kvmppc_gsp_for_each - iterate the <guest state IDs, guest state element>
+ * pairs
+ * @gsp: guest state buffer bitmap
+ * @iden: current guest state ID
+ * @gse: guest state element
+ */
+#define kvmppc_gsp_for_each(gsp, iden, gse) \
+ for (iden = kvmppc_gsbm_next(&(gsp)->iterator, 0), \
+ gse = kvmppc_gsp_lookup((gsp), iden); \
+ iden != 0; iden = kvmppc_gsbm_next(&(gsp)->iterator, iden), \
+ gse = kvmppc_gsp_lookup((gsp), iden))
+
+/**************************************************************************
+ * Guest State Message
+ **************************************************************************/
+
+/**
+ * kvmppc_gsm_for_each - iterate the guest state IDs included in a guest state
+ * message
+ * @gsp: guest state buffer bitmap
+ * @iden: current guest state ID
+ * @gse: guest state element
+ */
+#define kvmppc_gsm_for_each(gsm, iden) \
+ for (iden = kvmppc_gsbm_next(&gsm->bitmap, 0); iden != 0; \
+ iden = kvmppc_gsbm_next(&gsm->bitmap, iden))
+
+int kvmppc_gsm_init(struct kvmppc_gs_msg *mgs, struct kvmppc_gs_msg_ops *ops,
+ void *data, unsigned long flags);
+
+struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data,
+ unsigned long flags, gfp_t gfp_flags);
+void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm);
+size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm);
+int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb);
+int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb);
+
+/**
+ * kvmppc_gsm_include - indicate a guest state ID should be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline void kvmppc_gsm_include(struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ kvmppc_gsbm_set(&gsm->bitmap, iden);
+}
+
+/**
+ * kvmppc_gsm_includes - check if a guest state ID will be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline bool kvmppc_gsm_includes(struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ return kvmppc_gsbm_test(&gsm->bitmap, iden);
+}
+
+/**
+ * kvmppc_gsm_includes - indicate all guest state IDs should be included when
+ * serializing
+ * @gsm: guest state message
+ * @iden: guest state ID
+ */
+static inline void kvmppc_gsm_include_all(struct kvmppc_gs_msg *gsm)
+{
+ kvmppc_gsbm_fill(&gsm->bitmap);
+}
+
+/**
+ * kvmppc_gsm_include - clear the guest state IDs that should be included when
+ * serializing
+ * @gsm: guest state message
+ */
+static inline void kvmppc_gsm_reset(struct kvmppc_gs_msg *gsm)
+{
+ kvmppc_gsbm_zero(&gsm->bitmap);
+}
+
+/**
+ * kvmppc_gsb_receive_data - flexibly update values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Requests updated values for the guest state values included in the guest
+ * state message. The guest state message will then deserialize the guest state
+ * buffer.
+ */
+static inline int kvmppc_gsb_receive_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsb_recv(gsb, gsm->flags);
+ if (rc < 0)
+ return rc;
+
+ rc = kvmppc_gsm_refresh_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_recv - receive a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_receive_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_receive_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
+/**
+ * kvmppc_gsb_send_data - flexibly send values from a guest state buffer
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ *
+ * Sends the guest state values included in the guest state message.
+ */
+static inline int kvmppc_gsb_send_data(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ int rc;
+
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0)
+ return rc;
+ rc = kvmppc_gsb_send(gsb, gsm->flags);
+
+ return rc;
+}
+
+/**
+ * kvmppc_gsb_recv - send a single guest state ID
+ * @gsb: guest state buffer
+ * @gsm: guest state message
+ * @iden: guest state identity
+ */
+static inline int kvmppc_gsb_send_datum(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm, u16 iden)
+{
+ int rc;
+
+ kvmppc_gsm_include(gsm, iden);
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0)
+ return rc;
+ kvmppc_gsm_reset(gsm);
+ return 0;
+}
+
+#endif /* _ASM_POWERPC_GUEST_STATE_BUFFER_H */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index c099780385dd..ddb99e982917 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -100,6 +100,18 @@
#define H_COP_HW -74
#define H_STATE -75
#define H_IN_USE -77
+
+#define H_INVALID_ELEMENT_ID -79
+#define H_INVALID_ELEMENT_SIZE -80
+#define H_INVALID_ELEMENT_VALUE -81
+#define H_INPUT_BUFFER_NOT_DEFINED -82
+#define H_INPUT_BUFFER_TOO_SMALL -83
+#define H_OUTPUT_BUFFER_NOT_DEFINED -84
+#define H_OUTPUT_BUFFER_TOO_SMALL -85
+#define H_PARTITION_PAGE_TABLE_NOT_DEFINED -86
+#define H_GUEST_VCPU_STATE_NOT_HV_OWNED -87
+
+
#define H_UNSUPPORTED_FLAG_START -256
#define H_UNSUPPORTED_FLAG_END -511
#define H_MULTI_THREADS_ACTIVE -9005
@@ -381,6 +393,15 @@
#define H_ENTER_NESTED 0xF804
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C
+#define H_GUEST_GET_CAPABILITIES 0x460
+#define H_GUEST_SET_CAPABILITIES 0x464
+#define H_GUEST_CREATE 0x470
+#define H_GUEST_CREATE_VCPU 0x474
+#define H_GUEST_GET_STATE 0x478
+#define H_GUEST_SET_STATE 0x47C
+#define H_GUEST_RUN_VCPU 0x480
+#define H_GUEST_COPY_MEMORY 0x484
+#define H_GUEST_DELETE 0x488
/* Flags for H_SVM_PAGE_IN */
#define H_PAGE_IN_SHARED 0x1
@@ -467,6 +488,15 @@
#define H_RPTI_PAGE_1G 0x08
#define H_RPTI_PAGE_ALL (-1UL)
+/* Flags for H_GUEST_{S,G}_STATE */
+#define H_GUEST_FLAGS_WIDE (1UL<<(63-0))
+
+/* Flag values used for H_{S,G}SET_GUEST_CAPABILITIES */
+#define H_GUEST_CAP_COPY_MEM (1UL<<(63-0))
+#define H_GUEST_CAP_POWER9 (1UL<<(63-1))
+#define H_GUEST_CAP_POWER10 (1UL<<(63-2))
+#define H_GUEST_CAP_BITMAP2 (1UL<<(63-63))
+
#ifndef __ASSEMBLY__
#include <linux/types.h>
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bbf5e2c5fe09..4f527d09c92b 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_book3s_asm.h>
+#include <asm/guest-state-buffer.h>
struct kvmppc_bat {
u64 raw;
@@ -191,14 +192,14 @@ extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data, bool iswrite);
extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid);
+ unsigned int pshift, u64 lpid);
extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
- unsigned int lpid);
+ u64 lpid);
extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested,
bool writing, unsigned long gpa,
- unsigned int lpid);
+ u64 lpid);
extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
unsigned long gpa,
struct kvm_memory_slot *memslot,
@@ -207,7 +208,7 @@ extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
- unsigned int lpid);
+ u64 lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
@@ -295,12 +296,13 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif
+extern unsigned long nested_capabilities;
long kvmhv_nested_init(void);
void kvmhv_nested_exit(void);
void kvmhv_vm_nested_init(struct kvm *kvm);
long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu);
-void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
+void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1);
void kvmhv_release_all_nested(struct kvm *kvm);
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
@@ -316,6 +318,69 @@ long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+
+extern struct static_key_false __kvmhv_is_nestedv2;
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return static_branch_unlikely(&__kvmhv_is_nestedv2);
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return !static_branch_likely(&__kvmhv_is_nestedv2);
+}
+
+#else
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+#endif
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs);
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden);
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden);
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_reload_ptregs(vcpu, regs);
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_mark_dirty(vcpu, iden);
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ if (kvmhv_is_nestedv2())
+ return __kvmhv_nestedv2_cached_reload(vcpu, iden);
+ return 0;
+}
+
extern int kvm_irq_bypass;
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
@@ -335,60 +400,72 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.regs.gpr[num] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num));
}
static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0);
return vcpu->arch.regs.gpr[num];
}
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
vcpu->arch.regs.ccr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0);
return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.xer = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
}
static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0);
return vcpu->arch.regs.xer;
}
static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.ctr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
}
static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0);
return vcpu->arch.regs.ctr;
}
static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.link = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
}
static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0);
return vcpu->arch.regs.link;
}
static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
{
vcpu->arch.regs.nip = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
}
static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0);
return vcpu->arch.regs.nip;
}
@@ -403,10 +480,141 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+ vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
+}
+
+static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0);
+ return vcpu->arch.fp.fpscr;
+}
+
+static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.fp.fpscr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR);
+}
+
+
+static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0);
+ return vcpu->arch.fp.fpr[i][j];
+}
+
+static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j,
+ u64 val)
+{
+ vcpu->arch.fp.fpr[i][j] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i));
+}
+
+#ifdef CONFIG_ALTIVEC
+static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0);
+ *v = vcpu->arch.vr.vr[i];
+}
+
+static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i,
+ vector128 *val)
+{
+ vcpu->arch.vr.vr[i] = *val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i));
+}
+
+static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0);
+ return vcpu->arch.vr.vscr.u[3];
+}
+
+static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.vr.vscr.u[3] = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR);
+}
+#endif
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ \
+ vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.reg; \
+}
+
+#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC)
+KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE)
+
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ vcpu->arch.vcore->reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
+ return vcpu->arch.vcore->reg; \
+}
+
+#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \
+
+
+KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB)
+KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR)
+KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR)
+
+static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0);
+ return vcpu->arch.dec_expires;
+}
+
+static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.dec_expires = val;
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB);
+}
+
/* Expiry time of vcpu DEC relative to host TB */
static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
+ return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu);
}
static inline bool is_kvmppc_resume_guest(int r)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d49065af08e9..2477021bff54 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -624,7 +624,7 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long gpa, unsigned int level,
- unsigned long mmu_seq, unsigned int lpid,
+ unsigned long mmu_seq, u64 lpid,
unsigned long *rmapp, struct rmap_nested **n_rmap);
extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
struct rmap_nested **n_rmap);
@@ -677,6 +677,12 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
unsigned long ea, unsigned *hshift);
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit);
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 0c3401b2e19e..7c3291aa8922 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -89,6 +89,16 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.regs.nip;
}
+static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val)
+{
+ vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val;
+}
+
+static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i)
+{
+ return vcpu->arch.fp.fpr[i][TS_FPROFFSET];
+}
+
#ifdef CONFIG_BOOKE
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 14ee0dece853..8799b37be295 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/hvcall.h>
#include <asm/mce.h>
+#include <asm/guest-state-buffer.h>
#define __KVM_HAVE_ARCH_VCPU_DEBUGFS
@@ -276,7 +277,7 @@ struct kvm_resize_hpt;
#define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */
struct kvm_arch {
- unsigned int lpid;
+ u64 lpid;
unsigned int smt_mode; /* # vcpus per virtual core */
unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -509,6 +510,23 @@ union xive_tma_w01 {
__be64 w01;
};
+ /* Nestedv2 H_GUEST_RUN_VCPU configuration */
+struct kvmhv_nestedv2_config {
+ struct kvmppc_gs_buff_info vcpu_run_output_cfg;
+ struct kvmppc_gs_buff_info vcpu_run_input_cfg;
+ u64 vcpu_run_output_size;
+};
+
+ /* Nestedv2 L1<->L0 communication state */
+struct kvmhv_nestedv2_io {
+ struct kvmhv_nestedv2_config cfg;
+ struct kvmppc_gs_buff *vcpu_run_output;
+ struct kvmppc_gs_buff *vcpu_run_input;
+ struct kvmppc_gs_msg *vcpu_message;
+ struct kvmppc_gs_msg *vcore_message;
+ struct kvmppc_gs_bitmap valids;
+};
+
struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
@@ -829,6 +847,8 @@ struct kvm_vcpu_arch {
u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id;
gpa_t nested_io_gpr;
+ /* For nested APIv2 guests*/
+ struct kvmhv_nestedv2_io nestedv2_io;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index b4da8514af43..3281215097cc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -615,6 +615,42 @@ static inline bool kvmhv_on_pseries(void)
{
return false;
}
+
+#endif
+
+#ifndef CONFIG_PPC_BOOK3S
+
+static inline bool kvmhv_is_nestedv2(void)
+{
+ return false;
+}
+
+static inline bool kvmhv_is_nestedv1(void)
+{
+ return false;
+}
+
+static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
+static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ return 0;
+}
+
#endif
#ifdef CONFIG_KVM_XICS
@@ -927,79 +963,85 @@ static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
#endif
}
-#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
return mfspr(bookehv_spr); \
} \
-#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
{ \
mtspr(bookehv_spr, val); \
} \
-#define SHARED_WRAPPER_GET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
{ \
+ if (iden) \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \
if (kvmppc_shared_big_endian(vcpu)) \
- return be##size##_to_cpu(vcpu->arch.shared->reg); \
+ return be##size##_to_cpu((__be##size __force)vcpu->arch.shared->reg); \
else \
- return le##size##_to_cpu(vcpu->arch.shared->reg); \
+ return le##size##_to_cpu((__le##size __force)vcpu->arch.shared->reg); \
} \
-#define SHARED_WRAPPER_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
{ \
if (kvmppc_shared_big_endian(vcpu)) \
- vcpu->arch.shared->reg = cpu_to_be##size(val); \
+ vcpu->arch.shared->reg = (u##size __force)cpu_to_be##size(val); \
else \
- vcpu->arch.shared->reg = cpu_to_le##size(val); \
+ vcpu->arch.shared->reg = (u##size __force)cpu_to_le##size(val); \
+ \
+ if (iden) \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
} \
-#define SHARED_WRAPPER(reg, size) \
- SHARED_WRAPPER_GET(reg, size) \
- SHARED_WRAPPER_SET(reg, size) \
+#define KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(reg, size, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR_SET(reg, size, iden) \
-#define SPRNG_WRAPPER(reg, bookehv_spr) \
- SPRNG_WRAPPER_GET(reg, bookehv_spr) \
- SPRNG_WRAPPER_SET(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_GET(reg, bookehv_spr) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR_SET(reg, bookehv_spr) \
#ifdef CONFIG_KVM_BOOKE_HV
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
- SPRNG_WRAPPER(reg, bookehv_spr) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
+ KVMPPC_BOOKE_HV_SPRNG_ACCESSOR(reg, bookehv_spr) \
#else
-#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
- SHARED_WRAPPER(reg, size) \
+#define KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(reg, size, bookehv_spr, iden) \
+ KVMPPC_VCPU_SHARED_REGS_ACCESSOR(reg, size, iden) \
#endif
-SHARED_WRAPPER(critical, 64)
-SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
-SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
-SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
-SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
-SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
-SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
-SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
-SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
-SHARED_WRAPPER_GET(msr, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(critical, 64, 0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg0, 64, SPRN_GSPRG0, KVMPPC_GSID_SPRG0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg1, 64, SPRN_GSPRG1, KVMPPC_GSID_SPRG1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg2, 64, SPRN_GSPRG2, KVMPPC_GSID_SPRG2)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(sprg3, 64, SPRN_GSPRG3, KVMPPC_GSID_SPRG3)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr0, 64, SPRN_GSRR0, KVMPPC_GSID_SRR0)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(srr1, 64, SPRN_GSRR1, KVMPPC_GSID_SRR1)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(dar, 64, SPRN_GDEAR, KVMPPC_GSID_DAR)
+KVMPPC_BOOKE_HV_SPRNG_OR_VCPU_SHARED_REGS_ACCESSOR(esr, 64, SPRN_GESR, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR_GET(msr, 64, KVMPPC_GSID_MSR)
static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
{
if (kvmppc_shared_big_endian(vcpu))
vcpu->arch.shared->msr = cpu_to_be64(val);
else
vcpu->arch.shared->msr = cpu_to_le64(val);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
}
-SHARED_WRAPPER(dsisr, 32)
-SHARED_WRAPPER(int_pending, 32)
-SHARED_WRAPPER(sprg4, 64)
-SHARED_WRAPPER(sprg5, 64)
-SHARED_WRAPPER(sprg6, 64)
-SHARED_WRAPPER(sprg7, 64)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(dsisr, 32, KVMPPC_GSID_DSISR)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(int_pending, 32, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg4, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg5, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg6, 64, 0)
+KVMPPC_VCPU_SHARED_REGS_ACCESSOR(sprg7, 64, 0)
static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
{
diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index fe3d0ea0058a..b3ee44a40c2f 100644
--- a/arch/powerpc/include/asm/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -6,6 +6,7 @@
#include <linux/string.h>
#include <linux/irqflags.h>
+#include <linux/delay.h>
#include <asm/hvcall.h>
#include <asm/paca.h>
@@ -343,6 +344,212 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
return rc;
}
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned long token;
+ long rc;
+
+ token = -1UL;
+ do {
+ rc = plpar_hcall(H_GUEST_CREATE, retbuf, flags, token);
+ if (rc == H_SUCCESS)
+ *guest_id = retbuf[0];
+
+ if (rc == H_BUSY) {
+ token = retbuf[0];
+ cond_resched();
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ token = retbuf[0];
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_CREATE_VCPU, 0, guest_id, vcpu_id);
+
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_SET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ while (true) {
+ rc = plpar_hcall(H_GUEST_GET_STATE, retbuf, flags, guest_id,
+ vcpu_id, data_buffer, data_size);
+
+ if (rc == H_BUSY) {
+ cpu_relax();
+ continue;
+ }
+
+ if (H_IS_LONG_BUSY(rc)) {
+ mdelay(get_longbusy_msecs(rc));
+ continue;
+ }
+
+ if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ break;
+ }
+
+ return rc;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_GUEST_RUN_VCPU, retbuf, flags, guest_id, vcpu_id);
+ if (rc == H_SUCCESS)
+ *trap = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_ID)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ *failed_index = retbuf[0];
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ *failed_index = retbuf[0];
+
+ return rc;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ long rc;
+
+ do {
+ rc = plpar_hcall_norets(H_GUEST_DELETE, flags, guest_id);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ do {
+ rc = plpar_hcall(H_GUEST_SET_CAPABILITIES, retbuf, flags, capabilities);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+ } while (rc == H_BUSY);
+
+ return rc;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ do {
+ rc = plpar_hcall(H_GUEST_GET_CAPABILITIES, retbuf, flags);
+ if (rc == H_BUSY)
+ cond_resched();
+
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ }
+ } while (rc == H_BUSY);
+
+ if (rc == H_SUCCESS)
+ *capabilities = retbuf[0];
+
+ return rc;
+}
+
/*
* Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
*
@@ -355,7 +562,7 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
* error recovery of killing the process/guest will be eventually
* needed.
*/
-static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
u64 page_sizes, u64 start, u64 end)
{
long rc;
@@ -401,12 +608,68 @@ static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
return 0;
}
-static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
+static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type,
u64 page_sizes, u64 start, u64 end)
{
return 0;
}
+static inline long plpar_guest_create_vcpu(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_state(unsigned long flags,
+ unsigned long guest_id,
+ unsigned long vcpu_id,
+ unsigned long data_buffer,
+ unsigned long data_size,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_run_vcpu(unsigned long flags, unsigned long guest_id,
+ unsigned long vcpu_id, int *trap,
+ unsigned long *failed_index)
+{
+ return 0;
+}
+
+static inline long plpar_guest_create(unsigned long flags, unsigned long *guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_delete(unsigned long flags, u64 guest_id)
+{
+ return 0;
+}
+
+static inline long plpar_guest_get_capabilities(unsigned long flags,
+ unsigned long *capabilities)
+{
+ return 0;
+}
+
+static inline long plpar_guest_set_capabilities(unsigned long flags,
+ unsigned long capabilities)
+{
+ return 0;
+}
+
#endif /* CONFIG_PPC_PSERIES */
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 5319d889b184..4bd9d1230869 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -87,8 +87,12 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_ras.o \
book3s_hv_builtin.o \
book3s_hv_p9_perf.o \
+ book3s_hv_nestedv2.o \
+ guest-state-buffer.o \
$(kvm-book3s_64-builtin-tm-objs-y) \
$(kvm-book3s_64-builtin-xics-objs-y)
+
+obj-$(CONFIG_GUEST_STATE_BUFFER_TEST) += test-guest-state-buffer.o
endif
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 686d8d9eda3e..6cd20ab9e94e 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -565,7 +565,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->msr = kvmppc_get_msr(vcpu);
regs->srr0 = kvmppc_get_srr0(vcpu);
regs->srr1 = kvmppc_get_srr1(vcpu);
- regs->pid = vcpu->arch.pid;
+ regs->pid = kvmppc_get_pid(vcpu);
regs->sprg0 = kvmppc_get_sprg0(vcpu);
regs->sprg1 = kvmppc_get_sprg1(vcpu);
regs->sprg2 = kvmppc_get_sprg2(vcpu);
@@ -636,17 +636,17 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
- *val = get_reg_val(id, VCPU_FPR(vcpu, i));
+ *val = get_reg_val(id, kvmppc_get_fpr(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
- *val = get_reg_val(id, vcpu->arch.fp.fpscr);
+ *val = get_reg_val(id, kvmppc_get_fpscr(vcpu));
break;
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
- val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
- val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
+ val->vsxval[0] = kvmppc_get_vsx_fpr(vcpu, i, 0);
+ val->vsxval[1] = kvmppc_get_vsx_fpr(vcpu, i, 1);
} else {
r = -ENXIO;
}
@@ -683,19 +683,19 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.fscr);
break;
case KVM_REG_PPC_TAR:
- *val = get_reg_val(id, vcpu->arch.tar);
+ *val = get_reg_val(id, kvmppc_get_tar(vcpu));
break;
case KVM_REG_PPC_EBBHR:
- *val = get_reg_val(id, vcpu->arch.ebbhr);
+ *val = get_reg_val(id, kvmppc_get_ebbhr(vcpu));
break;
case KVM_REG_PPC_EBBRR:
- *val = get_reg_val(id, vcpu->arch.ebbrr);
+ *val = get_reg_val(id, kvmppc_get_ebbrr(vcpu));
break;
case KVM_REG_PPC_BESCR:
- *val = get_reg_val(id, vcpu->arch.bescr);
+ *val = get_reg_val(id, kvmppc_get_bescr(vcpu));
break;
case KVM_REG_PPC_IC:
- *val = get_reg_val(id, vcpu->arch.ic);
+ *val = get_reg_val(id, kvmppc_get_ic(vcpu));
break;
default:
r = -EINVAL;
@@ -724,7 +724,7 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = id - KVM_REG_PPC_FPR0;
- VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
+ kvmppc_set_fpr(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_FPSCR:
vcpu->arch.fp.fpscr = set_reg_val(id, *val);
@@ -733,8 +733,8 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
i = id - KVM_REG_PPC_VSR0;
- vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
- vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
+ kvmppc_set_vsx_fpr(vcpu, i, 0, val->vsxval[0]);
+ kvmppc_set_vsx_fpr(vcpu, i, 1, val->vsxval[1]);
} else {
r = -ENXIO;
}
@@ -765,22 +765,22 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
break;
#endif /* CONFIG_KVM_XIVE */
case KVM_REG_PPC_FSCR:
- vcpu->arch.fscr = set_reg_val(id, *val);
+ kvmppc_set_fpscr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_TAR:
- vcpu->arch.tar = set_reg_val(id, *val);
+ kvmppc_set_tar(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_EBBHR:
- vcpu->arch.ebbhr = set_reg_val(id, *val);
+ kvmppc_set_ebbhr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_EBBRR:
- vcpu->arch.ebbrr = set_reg_val(id, *val);
+ kvmppc_set_ebbrr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_BESCR:
- vcpu->arch.bescr = set_reg_val(id, *val);
+ kvmppc_set_bescr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_IC:
- vcpu->arch.ic = set_reg_val(id, *val);
+ kvmppc_set_ic(vcpu, set_reg_val(id, *val));
break;
default:
r = -EINVAL;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index efd0ebf70a5e..2b1f0cdd8c18 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -28,6 +28,7 @@
#include <asm/pte-walk.h>
#include "book3s.h"
+#include "book3s_hv.h"
#include "trace_hv.h"
//#define DEBUG_RESIZE_HPT 1
@@ -120,7 +121,7 @@ void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info)
kvm->arch.hpt = *info;
kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18);
- pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n",
+ pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n",
info->virt, (long)info->order, kvm->arch.lpid);
}
@@ -347,7 +348,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long v, orig_v, gr;
__be64 *hptep;
long int index;
- int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
+ int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR);
if (kvm_is_radix(vcpu->kvm))
return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite);
@@ -385,7 +386,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Get PP bits and key for permission check */
pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
- key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
+ key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
key &= slb_v;
/* Calculate permissions */
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 572707858d65..175a8eb2681f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -15,6 +15,7 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
+#include "book3s_hv.h"
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgalloc.h>
@@ -96,7 +97,7 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr,
void *to, void *from, unsigned long n)
{
int lpid = vcpu->kvm->arch.lpid;
- int pid = vcpu->arch.pid;
+ int pid = kvmppc_get_pid(vcpu);
/* This would cause a data segment intr so don't allow the access */
if (eaddr & (0x3FFUL << 52))
@@ -270,7 +271,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Work out effective PID */
switch (eaddr >> 62) {
case 0:
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
break;
case 3:
pid = 0;
@@ -294,9 +295,9 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
} else {
if (!(pte & _PAGE_PRIVILEGED)) {
/* Check AMR/IAMR to see if strict mode is in force */
- if (vcpu->arch.amr & (1ul << 62))
+ if (kvmppc_get_amr_hv(vcpu) & (1ul << 62))
gpte->may_read = 0;
- if (vcpu->arch.amr & (1ul << 63))
+ if (kvmppc_get_amr_hv(vcpu) & (1ul << 63))
gpte->may_write = 0;
if (vcpu->arch.iamr & (1ul << 62))
gpte->may_execute = 0;
@@ -307,7 +308,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
}
void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid)
+ unsigned int pshift, u64 lpid)
{
unsigned long psize = PAGE_SIZE;
int psi;
@@ -344,7 +345,7 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
-static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
+static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid)
{
long rc;
@@ -417,7 +418,7 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
unsigned int shift,
const struct kvm_memory_slot *memslot,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long old;
@@ -468,7 +469,7 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
* (or 4kB) mappings (of sub-pages of the same 2MB page).
*/
static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
- unsigned int lpid)
+ u64 lpid)
{
if (full) {
memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
@@ -489,7 +490,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
}
static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long im;
pmd_t *p = pmd;
@@ -518,7 +519,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
}
static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
- unsigned int lpid)
+ u64 lpid)
{
unsigned long iu;
pud_t *p = pud;
@@ -539,7 +540,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
pud_free(kvm->mm, pud);
}
-void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
+void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid)
{
unsigned long ig;
@@ -566,7 +567,7 @@ void kvmppc_free_radix(struct kvm *kvm)
}
static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
@@ -582,7 +583,7 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
}
static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
pmd_t *pmd = pmd_offset(pud, 0);
@@ -608,7 +609,7 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
unsigned long gpa, unsigned int level,
- unsigned long mmu_seq, unsigned int lpid,
+ unsigned long mmu_seq, u64 lpid,
unsigned long *rmapp, struct rmap_nested **n_rmap)
{
pgd_t *pgd;
@@ -785,7 +786,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
}
bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing,
- unsigned long gpa, unsigned int lpid)
+ unsigned long gpa, u64 lpid)
{
unsigned long pgflags;
unsigned int shift;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 15200d766fc5..14c6d7e318da 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -786,12 +786,12 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = (ioba >> stt->page_shift) - stt->offset;
page = stt->pages[idx / TCES_PER_PAGE];
if (!page) {
- vcpu->arch.regs.gpr[4] = 0;
+ kvmppc_set_gpr(vcpu, 4, 0);
return H_SUCCESS;
}
tbl = (u64 *)page_address(page);
- vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
+ kvmppc_set_gpr(vcpu, 4, tbl[idx % TCES_PER_PAGE]);
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 130bafdb1430..1ed6ec140701 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -393,7 +393,7 @@ static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
{
- unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
+ unsigned long host_pcr_bit = 0, guest_pcr_bit = 0, cap = 0;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
/* We can (emulate) our own architecture version and anything older */
@@ -424,9 +424,11 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
break;
case PVR_ARCH_300:
guest_pcr_bit = PCR_ARCH_300;
+ cap = H_GUEST_CAP_POWER9;
break;
case PVR_ARCH_31:
guest_pcr_bit = PCR_ARCH_31;
+ cap = H_GUEST_CAP_POWER10;
break;
default:
return -EINVAL;
@@ -437,8 +439,14 @@ static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
if (guest_pcr_bit > host_pcr_bit)
return -EINVAL;
+ if (kvmhv_on_pseries() && kvmhv_is_nestedv2()) {
+ if (!(cap & nested_capabilities))
+ return -EINVAL;
+ }
+
spin_lock(&vc->lock);
vc->arch_compat = arch_compat;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LOGICAL_PVR);
/*
* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit
* Also set all reserved PCR bits
@@ -794,7 +802,7 @@ static void kvmppc_update_vpa_dispatch(struct kvm_vcpu *vcpu,
vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen);
- __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + vc->tb_offset, stolen);
+ __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen);
vcpu->arch.vpa.dirty = true;
}
@@ -845,9 +853,9 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
+ if (kvmppc_get_arch_compat(vcpu) >= PVR_ARCH_207)
return true;
- if ((!vcpu->arch.vcore->arch_compat) &&
+ if ((!kvmppc_get_arch_compat(vcpu)) &&
cpu_has_feature(CPU_FTR_ARCH_207S))
return true;
return false;
@@ -868,7 +876,7 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
/* Guests can't breakpoint the hypervisor */
if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
return H_P3;
- vcpu->arch.ciabr = value1;
+ kvmppc_set_ciabr_hv(vcpu, value1);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR0:
if (!kvmppc_power8_compatible(vcpu))
@@ -879,8 +887,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr0 = value1;
- vcpu->arch.dawrx0 = value2;
+ kvmppc_set_dawr0_hv(vcpu, value1);
+ kvmppc_set_dawrx0_hv(vcpu, value2);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_SET_DAWR1:
if (!kvmppc_power8_compatible(vcpu))
@@ -895,8 +903,8 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
return H_UNSUPPORTED_FLAG_START;
if (value2 & DABRX_HYP)
return H_P4;
- vcpu->arch.dawr1 = value1;
- vcpu->arch.dawrx1 = value2;
+ kvmppc_set_dawr1_hv(vcpu, value1);
+ kvmppc_set_dawrx1_hv(vcpu, value2);
return H_SUCCESS;
case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE:
/*
@@ -1267,10 +1275,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;
break;
#endif
- case H_RANDOM:
- if (!arch_get_random_seed_longs(&vcpu->arch.regs.gpr[4], 1))
+ case H_RANDOM: {
+ unsigned long rand;
+
+ if (!arch_get_random_seed_longs(&rand, 1))
ret = H_HARDWARE;
+ kvmppc_set_gpr(vcpu, 4, rand);
break;
+ }
case H_RPT_INVALIDATE:
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
@@ -1370,7 +1382,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
*/
static void kvmppc_cede(struct kvm_vcpu *vcpu)
{
- vcpu->arch.shregs.msr |= MSR_EE;
+ __kvmppc_set_msr_hv(vcpu, __kvmppc_get_msr_hv(vcpu) | MSR_EE);
vcpu->arch.ceded = 1;
smp_mb();
if (vcpu->arch.prodded) {
@@ -1544,7 +1556,7 @@ static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
return EMULATE_FAIL;
- vcpu->arch.hfscr |= HFSCR_PM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM);
return RESUME_GUEST;
}
@@ -1554,7 +1566,7 @@ static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
return EMULATE_FAIL;
- vcpu->arch.hfscr |= HFSCR_EBB;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB);
return RESUME_GUEST;
}
@@ -1564,7 +1576,7 @@ static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
return EMULATE_FAIL;
- vcpu->arch.hfscr |= HFSCR_TM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
return RESUME_GUEST;
}
@@ -1585,7 +1597,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
- if (vcpu->arch.shregs.msr & MSR_HV) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
printk(KERN_EMERG "KVM trap in HV mode!\n");
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
@@ -1636,7 +1648,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* so that it knows that the machine check occurred.
*/
if (!vcpu->kvm->arch.fwnmi_enabled) {
- ulong flags = (vcpu->arch.shregs.msr & 0x083c0000) |
+ ulong flags = (__kvmppc_get_msr_hv(vcpu) & 0x083c0000) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_machine_check(vcpu, flags);
r = RESUME_GUEST;
@@ -1666,7 +1678,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* as a result of a hypervisor emulation interrupt
* (e40) getting turned into a 700 by BML RTAS.
*/
- flags = (vcpu->arch.shregs.msr & 0x1f0000ull) |
+ flags = (__kvmppc_get_msr_hv(vcpu) & 0x1f0000ull) |
(kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
@@ -1676,7 +1688,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
{
int i;
- if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (unlikely(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* Guest userspace executed sc 1. This can only be
* reached by the P9 path because the old path
@@ -1754,7 +1766,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
break;
}
- if (!(vcpu->arch.shregs.msr & MSR_DR))
+ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_DR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
@@ -1778,7 +1790,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
long err;
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
- vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
+ vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) &
DSISR_SRR1_MATCH_64S;
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
@@ -1787,7 +1799,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
@@ -1801,7 +1813,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
break;
}
- if (!(vcpu->arch.shregs.msr & MSR_IR))
+ if (!(__kvmppc_get_msr_hv(vcpu) & MSR_IR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;
@@ -1863,7 +1875,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* Otherwise, we just generate a program interrupt to the guest.
*/
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
- u64 cause = vcpu->arch.hfscr >> 56;
+ u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56;
r = EMULATE_FAIL;
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
@@ -1891,7 +1903,7 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
kvmppc_dump_regs(vcpu);
printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
- vcpu->arch.shregs.msr);
+ __kvmppc_get_msr_hv(vcpu));
run->hw.hardware_exit_reason = vcpu->arch.trap;
r = RESUME_HOST;
break;
@@ -1915,11 +1927,11 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* That can happen due to a bug, or due to a machine check
* occurring at just the wrong time.
*/
- if (vcpu->arch.shregs.msr & MSR_HV) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_HV) {
pr_emerg("KVM trap in HV mode while nested!\n");
pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
vcpu->arch.trap, kvmppc_get_pc(vcpu),
- vcpu->arch.shregs.msr);
+ __kvmppc_get_msr_hv(vcpu));
kvmppc_dump_regs(vcpu);
return RESUME_HOST;
}
@@ -1976,7 +1988,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
DSISR_SRR1_MATCH_64S;
- if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ if (__kvmppc_get_msr_hv(vcpu) & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvmhv_nested_page_fault(vcpu);
@@ -2183,6 +2195,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
}
vc->lpcr = new_lpcr;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
spin_unlock(&vc->lock);
}
@@ -2207,64 +2220,64 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.dabrx);
break;
case KVM_REG_PPC_DSCR:
- *val = get_reg_val(id, vcpu->arch.dscr);
+ *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu));
break;
case KVM_REG_PPC_PURR:
- *val = get_reg_val(id, vcpu->arch.purr);
+ *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu));
break;
case KVM_REG_PPC_SPURR:
- *val = get_reg_val(id, vcpu->arch.spurr);
+ *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu));
break;
case KVM_REG_PPC_AMR:
- *val = get_reg_val(id, vcpu->arch.amr);
+ *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu));
break;
case KVM_REG_PPC_UAMOR:
- *val = get_reg_val(id, vcpu->arch.uamor);
+ *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu));
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
- *val = get_reg_val(id, vcpu->arch.mmcr[i]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i));
break;
case KVM_REG_PPC_MMCR2:
- *val = get_reg_val(id, vcpu->arch.mmcr[2]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2));
break;
case KVM_REG_PPC_MMCRA:
- *val = get_reg_val(id, vcpu->arch.mmcra);
+ *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu));
break;
case KVM_REG_PPC_MMCRS:
*val = get_reg_val(id, vcpu->arch.mmcrs);
break;
case KVM_REG_PPC_MMCR3:
- *val = get_reg_val(id, vcpu->arch.mmcr[3]);
+ *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3));
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
- *val = get_reg_val(id, vcpu->arch.pmc[i]);
+ *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i));
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
*val = get_reg_val(id, vcpu->arch.spmc[i]);
break;
case KVM_REG_PPC_SIAR:
- *val = get_reg_val(id, vcpu->arch.siar);
+ *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
break;
case KVM_REG_PPC_SDAR:
- *val = get_reg_val(id, vcpu->arch.sdar);
+ *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu));
break;
case KVM_REG_PPC_SIER:
- *val = get_reg_val(id, vcpu->arch.sier[0]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0));
break;
case KVM_REG_PPC_SIER2:
- *val = get_reg_val(id, vcpu->arch.sier[1]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1));
break;
case KVM_REG_PPC_SIER3:
- *val = get_reg_val(id, vcpu->arch.sier[2]);
+ *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2));
break;
case KVM_REG_PPC_IAMR:
- *val = get_reg_val(id, vcpu->arch.iamr);
+ *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu));
break;
case KVM_REG_PPC_PSPB:
- *val = get_reg_val(id, vcpu->arch.pspb);
+ *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu));
break;
case KVM_REG_PPC_DPDES:
/*
@@ -2279,22 +2292,22 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
case KVM_REG_PPC_VTB:
- *val = get_reg_val(id, vcpu->arch.vcore->vtb);
+ *val = get_reg_val(id, kvmppc_get_vtb(vcpu));
break;
case KVM_REG_PPC_DAWR:
- *val = get_reg_val(id, vcpu->arch.dawr0);
+ *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu));
break;
case KVM_REG_PPC_DAWRX:
- *val = get_reg_val(id, vcpu->arch.dawrx0);
+ *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu));
break;
case KVM_REG_PPC_DAWR1:
- *val = get_reg_val(id, vcpu->arch.dawr1);
+ *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu));
break;
case KVM_REG_PPC_DAWRX1:
- *val = get_reg_val(id, vcpu->arch.dawrx1);
+ *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu));
break;
case KVM_REG_PPC_CIABR:
- *val = get_reg_val(id, vcpu->arch.ciabr);
+ *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu));
break;
case KVM_REG_PPC_CSIGR:
*val = get_reg_val(id, vcpu->arch.csigr);
@@ -2306,13 +2319,13 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.tcscr);
break;
case KVM_REG_PPC_PID:
- *val = get_reg_val(id, vcpu->arch.pid);
+ *val = get_reg_val(id, kvmppc_get_pid(vcpu));
break;
case KVM_REG_PPC_ACOP:
*val = get_reg_val(id, vcpu->arch.acop);
break;
case KVM_REG_PPC_WORT:
- *val = get_reg_val(id, vcpu->arch.wort);
+ *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu));
break;
case KVM_REG_PPC_TIDR:
*val = get_reg_val(id, vcpu->arch.tid);
@@ -2338,14 +2351,14 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
spin_unlock(&vcpu->arch.vpa_update_lock);
break;
case KVM_REG_PPC_TB_OFFSET:
- *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
+ *val = get_reg_val(id, kvmppc_get_tb_offset(vcpu));
break;
case KVM_REG_PPC_LPCR:
case KVM_REG_PPC_LPCR_64:
- *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
+ *val = get_reg_val(id, kvmppc_get_lpcr(vcpu));
break;
case KVM_REG_PPC_PPR:
- *val = get_reg_val(id, vcpu->arch.ppr);
+ *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu));
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
@@ -2414,10 +2427,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
break;
#endif
case KVM_REG_PPC_ARCH_COMPAT:
- *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
+ *val = get_reg_val(id, kvmppc_get_arch_compat(vcpu));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- *val = get_reg_val(id, vcpu->arch.dec_expires);
+ *val = get_reg_val(id, kvmppc_get_dec_expires(vcpu));
break;
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
@@ -2425,6 +2438,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_PTCR:
*val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
break;
+ case KVM_REG_PPC_FSCR:
+ *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu));
+ break;
default:
r = -EINVAL;
break;
@@ -2453,29 +2469,29 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
break;
case KVM_REG_PPC_DSCR:
- vcpu->arch.dscr = set_reg_val(id, *val);
+ kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_PURR:
- vcpu->arch.purr = set_reg_val(id, *val);
+ kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SPURR:
- vcpu->arch.spurr = set_reg_val(id, *val);
+ kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_AMR:
- vcpu->arch.amr = set_reg_val(id, *val);
+ kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_UAMOR:
- vcpu->arch.uamor = set_reg_val(id, *val);
+ kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1:
i = id - KVM_REG_PPC_MMCR0;
- vcpu->arch.mmcr[i] = set_reg_val(id, *val);
+ kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCR2:
- vcpu->arch.mmcr[2] = set_reg_val(id, *val);
+ kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCRA:
- vcpu->arch.mmcra = set_reg_val(id, *val);
+ kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_MMCRS:
vcpu->arch.mmcrs = set_reg_val(id, *val);
@@ -2485,32 +2501,32 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
break;
case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
i = id - KVM_REG_PPC_PMC1;
- vcpu->arch.pmc[i] = set_reg_val(id, *val);
+ kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
i = id - KVM_REG_PPC_SPMC1;
vcpu->arch.spmc[i] = set_reg_val(id, *val);
break;
case KVM_REG_PPC_SIAR:
- vcpu->arch.siar = set_reg_val(id, *val);
+ kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SDAR:
- vcpu->arch.sdar = set_reg_val(id, *val);
+ kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER:
- vcpu->arch.sier[0] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER2:
- vcpu->arch.sier[1] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val));
break;
case KVM_REG_PPC_SIER3:
- vcpu->arch.sier[2] = set_reg_val(id, *val);
+ kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val));
break;
case KVM_REG_PPC_IAMR:
- vcpu->arch.iamr = set_reg_val(id, *val);
+ kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_PSPB:
- vcpu->arch.pspb = set_reg_val(id, *val);
+ kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DPDES:
if (cpu_has_feature(CPU_FTR_ARCH_300))
@@ -2519,25 +2535,25 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
- vcpu->arch.vcore->vtb = set_reg_val(id, *val);
+ kvmppc_set_vtb(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWR:
- vcpu->arch.dawr0 = set_reg_val(id, *val);
+ kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWRX:
- vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
break;
case KVM_REG_PPC_DAWR1:
- vcpu->arch.dawr1 = set_reg_val(id, *val);
+ kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DAWRX1:
- vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP;
+ kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP);
break;
case KVM_REG_PPC_CIABR:
- vcpu->arch.ciabr = set_reg_val(id, *val);
+ kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val));
/* Don't allow setting breakpoints in hypervisor code */
- if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
- vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
+ if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV);
break;
case KVM_REG_PPC_CSIGR:
vcpu->arch.csigr = set_reg_val(id, *val);
@@ -2549,13 +2565,13 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.tcscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_PID:
- vcpu->arch.pid = set_reg_val(id, *val);
+ kvmppc_set_pid(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_ACOP:
vcpu->arch.acop = set_reg_val(id, *val);
break;
case KVM_REG_PPC_WORT:
- vcpu->arch.wort = set_reg_val(id, *val);
+ kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_TIDR:
vcpu->arch.tid = set_reg_val(id, *val);
@@ -2602,10 +2618,11 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
* decrementer, which is better than a large one that
* causes a hang.
*/
- if (!vcpu->arch.dec_expires && tb_offset)
- vcpu->arch.dec_expires = get_tb() + tb_offset;
+ kvmppc_set_tb_offset(vcpu, tb_offset);
+ if (!kvmppc_get_dec_expires(vcpu) && tb_offset)
+ kvmppc_set_dec_expires(vcpu, get_tb() + tb_offset);
- vcpu->arch.vcore->tb_offset = tb_offset;
+ kvmppc_set_tb_offset(vcpu, tb_offset);
break;
}
case KVM_REG_PPC_LPCR:
@@ -2615,7 +2632,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
break;
case KVM_REG_PPC_PPR:
- vcpu->arch.ppr = set_reg_val(id, *val);
+ kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val));
break;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
case KVM_REG_PPC_TFHAR:
@@ -2686,7 +2703,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- vcpu->arch.dec_expires = set_reg_val(id, *val);
+ kvmppc_set_dec_expires(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_ONLINE:
i = set_reg_val(id, *val);
@@ -2699,6 +2716,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_PTCR:
vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
break;
+ case KVM_REG_PPC_FSCR:
+ kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val));
+ break;
default:
r = -EINVAL;
break;
@@ -2916,19 +2936,26 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.shared_big_endian = false;
#endif
#endif
- vcpu->arch.mmcr[0] = MMCR0_FC;
+
+ if (kvmhv_is_nestedv2()) {
+ err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io);
+ if (err < 0)
+ return err;
+ }
+
+ kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC);
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
- vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+ kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT);
+ kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE);
}
- vcpu->arch.ctrl = CTRL_RUNLATCH;
+ kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH);
/* default to host PVR, since we can't spoof it */
kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
spin_lock_init(&vcpu->arch.vpa_update_lock);
spin_lock_init(&vcpu->arch.tbacct_lock);
vcpu->arch.busy_preempt = TB_NIL;
- vcpu->arch.shregs.msr = MSR_ME;
+ __kvmppc_set_msr_hv(vcpu, MSR_ME);
vcpu->arch.intr_msr = MSR_SF | MSR_ME;
/*
@@ -2938,29 +2965,30 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
* don't set the HFSCR_MSGP bit, and that causes those instructions
* to trap and then we emulate them.
*/
- vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
- HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
+ kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
+ HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP);
/* On POWER10 and later, allow prefixed instructions */
if (cpu_has_feature(CPU_FTR_ARCH_31))
- vcpu->arch.hfscr |= HFSCR_PREFIX;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX);
if (cpu_has_feature(CPU_FTR_HVMODE)) {
- vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR));
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
- vcpu->arch.hfscr |= HFSCR_TM;
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM);
#endif
}
if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
- vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+ vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu);
/*
* PM, EBB, TM are demand-faulted so start with it clear.
*/
- vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
+ kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM));
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -3071,6 +3099,8 @@ static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
spin_unlock(&vcpu->arch.vpa_update_lock);
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io);
}
static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
@@ -4035,10 +4065,58 @@ static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
}
}
+static int kvmhv_vcpu_entry_nestedv2(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr, u64 *tb)
+{
+ struct kvmhv_nestedv2_io *io;
+ unsigned long msr, i;
+ int trap;
+ long rc;
+
+ io = &vcpu->arch.nestedv2_io;
+
+ msr = mfmsr();
+ kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending())
+ return 0;
+
+ rc = kvmhv_nestedv2_flush_vcpu(vcpu, time_limit);
+ if (rc < 0)
+ return -EINVAL;
+
+ accumulate_time(vcpu, &vcpu->arch.in_guest);
+ rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id,
+ &trap, &i);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM Guest Run VCPU hcall failed\n");
+ if (rc == H_INVALID_ELEMENT_ID)
+ pr_err("KVM: Guest Run VCPU invalid element id at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_SIZE)
+ pr_err("KVM: Guest Run VCPU invalid element size at %ld\n", i);
+ else if (rc == H_INVALID_ELEMENT_VALUE)
+ pr_err("KVM: Guest Run VCPU invalid element value at %ld\n", i);
+ return -EINVAL;
+ }
+ accumulate_time(vcpu, &vcpu->arch.guest_exit);
+
+ *tb = mftb();
+ kvmppc_gsm_reset(io->vcpu_message);
+ kvmppc_gsm_reset(io->vcore_message);
+ kvmppc_gsbm_zero(&io->valids);
+
+ rc = kvmhv_nestedv2_parse_output(vcpu);
+ if (rc < 0)
+ return -EINVAL;
+
+ timer_rearm_host_dec(*tb);
+
+ return trap;
+}
+
/* call our hypervisor to load up HV regs and go */
static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long host_psscr;
unsigned long msr;
struct hv_guest_state hvregs;
@@ -4118,7 +4196,7 @@ static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, uns
if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
dec = (s32) dec;
*tb = mftb();
- vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
+ vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu));
timer_rearm_host_dec(*tb);
@@ -4153,7 +4231,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu_vpa_increment_dispatch(vcpu);
if (kvmhv_on_pseries()) {
- trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ if (kvmhv_is_nestedv1())
+ trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+ else
+ trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb);
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !nested &&
@@ -4176,7 +4257,7 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
__this_cpu_write(cpu_in_guest, NULL);
if (trap == BOOK3S_INTERRUPT_SYSCALL &&
- !(vcpu->arch.shregs.msr & MSR_PR)) {
+ !(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3);
/*
@@ -4655,7 +4736,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
- if (vcpu->arch.shregs.msr & MSR_EE) {
+ if (__kvmppc_get_msr_hv(vcpu) & MSR_EE) {
if (xive_interrupt_pending(vcpu))
kvmppc_inject_interrupt_hv(vcpu,
BOOK3S_INTERRUPT_EXTERNAL, 0);
@@ -4677,7 +4758,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
tb = mftb();
- kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + vc->tb_offset);
+ kvmppc_update_vpa_dispatch_p9(vcpu, vc, tb + kvmppc_get_tb_offset(vcpu));
trace_kvm_guest_enter(vcpu);
@@ -4844,7 +4925,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
msr |= MSR_VSX;
if ((cpu_has_feature(CPU_FTR_TM) ||
cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
- (vcpu->arch.hfscr & HFSCR_TM))
+ (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM))
msr |= MSR_TM;
msr = msr_check_and_set(msr);
@@ -4868,7 +4949,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
if (run->exit_reason == KVM_EXIT_PAPR_HCALL) {
accumulate_time(vcpu, &vcpu->arch.hcall);
- if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) {
+ if (WARN_ON_ONCE(__kvmppc_get_msr_hv(vcpu) & MSR_PR)) {
/*
* These should have been caught reflected
* into the guest by now. Final sanity check:
@@ -5133,6 +5214,14 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
if (++cores_done >= kvm->arch.online_vcores)
break;
}
+
+ if (kvmhv_is_nestedv2()) {
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LPCR);
+ }
+ }
}
void kvmppc_setup_partition_table(struct kvm *kvm)
@@ -5399,15 +5488,43 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
/* Allocate the guest's logical partition ID */
- lpid = kvmppc_alloc_lpid();
- if ((long)lpid < 0)
- return -ENOMEM;
- kvm->arch.lpid = lpid;
+ if (!kvmhv_is_nestedv2()) {
+ lpid = kvmppc_alloc_lpid();
+ if ((long)lpid < 0)
+ return -ENOMEM;
+ kvm->arch.lpid = lpid;
+ }
kvmppc_alloc_host_rm_ops();
kvmhv_vm_nested_init(kvm);
+ if (kvmhv_is_nestedv2()) {
+ long rc;
+ unsigned long guest_id;
+
+ rc = plpar_guest_create(0, &guest_id);
+
+ if (rc != H_SUCCESS)
+ pr_err("KVM: Create Guest hcall failed, rc=%ld\n", rc);
+
+ switch (rc) {
+ case H_PARAMETER:
+ case H_FUNCTION:
+ case H_STATE:
+ return -EINVAL;
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ case H_NOT_AVAILABLE:
+ return -EBUSY;
+ }
+ kvm->arch.lpid = guest_id;
+ }
+
+
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
@@ -5477,7 +5594,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
lpcr |= LPCR_HAIL;
ret = kvmppc_init_vm_radix(kvm);
if (ret) {
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
return ret;
}
kvmppc_setup_partition_table(kvm);
@@ -5567,10 +5687,14 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvm->arch.process_table = 0;
if (kvm->arch.secure_guest)
uv_svm_terminate(kvm->arch.lpid);
- kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
+ if (!kvmhv_is_nestedv2())
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
}
- kvmppc_free_lpid(kvm->arch.lpid);
+ if (kvmhv_is_nestedv2())
+ plpar_guest_delete(0, kvm->arch.lpid);
+ else
+ kvmppc_free_lpid(kvm->arch.lpid);
kvmppc_free_pimap(kvm);
}
@@ -5982,6 +6106,8 @@ static int kvmhv_enable_nested(struct kvm *kvm)
return -ENODEV;
if (!radix_enabled())
return -ENODEV;
+ if (kvmhv_is_nestedv2())
+ return -ENODEV;
/* kvm == NULL means the caller is testing if the capability exists */
if (kvm)
diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
index 2f2e59d7d433..47b2c815641e 100644
--- a/arch/powerpc/kvm/book3s_hv.h
+++ b/arch/powerpc/kvm/book3s_hv.h
@@ -3,6 +3,8 @@
/*
* Privileged (non-hypervisor) host registers to save.
*/
+#include "asm/guest-state-buffer.h"
+
struct p9_host_os_sprs {
unsigned long iamr;
unsigned long amr;
@@ -50,3 +52,77 @@ void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
#define start_timing(vcpu, next) do {} while (0)
#define end_timing(vcpu) do {} while (0)
#endif
+
+static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
+{
+ vcpu->arch.shregs.msr = val;
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
+}
+
+static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
+{
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_MSR) < 0);
+ return vcpu->arch.shregs.msr;
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ vcpu->arch.reg = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden); \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \
+{ \
+ kvmhv_nestedv2_cached_reload(vcpu, iden); \
+ return vcpu->arch.reg; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
+static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \
+{ \
+ vcpu->arch.reg[i] = val; \
+ kvmhv_nestedv2_mark_dirty(vcpu, iden(i)); \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
+static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \
+{ \
+ WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden(i)) < 0); \
+ return vcpu->arch.reg[i]; \
+}
+
+#define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
+ KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64, KVMPPC_GSID_MMCRA)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64, KVMPPC_GSID_HFSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64, KVMPPC_GSID_FSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64, KVMPPC_GSID_DSCR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64, KVMPPC_GSID_PURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64, KVMPPC_GSID_SPURR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64, KVMPPC_GSID_AMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64, KVMPPC_GSID_UAMOR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64, KVMPPC_GSID_SIAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64, KVMPPC_GSID_SDAR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64, KVMPPC_GSID_IAMR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR)
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64, KVMPPC_GSID_CTRL);
+
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64, KVMPPC_GSID_MMCR)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64, KVMPPC_GSID_SIER)
+KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32, KVMPPC_GSID_PMC)
+
+KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32, KVMPPC_GSID_PSPB)
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 0f5b021fa559..fa0e3a22cac0 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -32,6 +32,7 @@
#include "book3s_xics.h"
#include "book3s_xive.h"
+#include "book3s_hv.h"
/*
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
@@ -182,9 +183,13 @@ EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
long kvmppc_rm_h_random(struct kvm_vcpu *vcpu)
{
+ unsigned long rand;
+
if (ppc_md.get_random_seed &&
- ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4]))
+ ppc_md.get_random_seed(&rand)) {
+ kvmppc_set_gpr(vcpu, 4, rand);
return H_SUCCESS;
+ }
return H_HARDWARE;
}
@@ -510,7 +515,7 @@ void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
*/
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
msr &= ~MSR_TS_MASK;
- vcpu->arch.shregs.msr = msr;
+ __kvmppc_set_msr_hv(vcpu, msr);
kvmppc_end_cede(vcpu);
}
EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
@@ -548,7 +553,7 @@ static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
kvmppc_set_srr0(vcpu, pc);
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
kvmppc_set_pc(vcpu, new_pc);
- vcpu->arch.shregs.msr = new_msr;
+ __kvmppc_set_msr_hv(vcpu, new_msr);
}
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 377d0b4a05ee..3b658b8696bc 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -428,10 +428,12 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
return vcpu->arch.trap;
}
+unsigned long nested_capabilities;
+
long kvmhv_nested_init(void)
{
long int ptb_order;
- unsigned long ptcr;
+ unsigned long ptcr, host_capabilities;
long rc;
if (!kvmhv_on_pseries())
@@ -439,6 +441,29 @@ long kvmhv_nested_init(void)
if (!radix_enabled())
return -ENODEV;
+ rc = plpar_guest_get_capabilities(0, &host_capabilities);
+ if (rc == H_SUCCESS) {
+ unsigned long capabilities = 0;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ capabilities |= H_GUEST_CAP_POWER10;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ capabilities |= H_GUEST_CAP_POWER9;
+
+ nested_capabilities = capabilities & host_capabilities;
+ rc = plpar_guest_set_capabilities(0, nested_capabilities);
+ if (rc != H_SUCCESS) {
+ pr_err("kvm-hv: Could not configure parent hypervisor capabilities (rc=%ld)",
+ rc);
+ return -ENODEV;
+ }
+
+ static_branch_enable(&__kvmhv_is_nestedv2);
+ return 0;
+ }
+
+ pr_info("kvm-hv: nestedv2 get capabilities hcall failed, falling back to nestedv1 (rc=%ld)\n",
+ rc);
/* Partition table entry is 1<<4 bytes in size, hence the 4. */
ptb_order = KVM_MAX_NESTED_GUESTS_SHIFT + 4;
/* Minimum partition table size is 1<<12 bytes */
@@ -478,7 +503,7 @@ void kvmhv_nested_exit(void)
}
}
-static void kvmhv_flush_lpid(unsigned int lpid)
+static void kvmhv_flush_lpid(u64 lpid)
{
long rc;
@@ -500,17 +525,22 @@ static void kvmhv_flush_lpid(unsigned int lpid)
pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
}
-void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
+void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1)
{
if (!kvmhv_on_pseries()) {
mmu_partition_table_set_entry(lpid, dw0, dw1, true);
return;
}
- pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
- pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
- /* L0 will do the necessary barriers */
- kvmhv_flush_lpid(lpid);
+ if (kvmhv_is_nestedv1()) {
+ pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+ /* L0 will do the necessary barriers */
+ kvmhv_flush_lpid(lpid);
+ }
+
+ if (kvmhv_is_nestedv2())
+ kvmhv_nestedv2_set_ptbl_entry(lpid, dw0, dw1);
}
static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
diff --git a/arch/powerpc/kvm/book3s_hv_nestedv2.c b/arch/powerpc/kvm/book3s_hv_nestedv2.c
new file mode 100644
index 000000000000..fd3c4f2d9480
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_nestedv2.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
+ *
+ * Authors:
+ * Jordan Niethe <jniethe5@gmail.com>
+ *
+ * Description: KVM functions specific to running on Book 3S
+ * processors as a NESTEDv2 guest.
+ *
+ */
+
+#include "linux/blk-mq.h"
+#include "linux/console.h"
+#include "linux/gfp_types.h"
+#include "linux/signal.h"
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/pgtable.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/hvcall.h>
+#include <asm/pgalloc.h>
+#include <asm/reg.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/guest-state-buffer.h>
+#include "trace_hv.h"
+
+struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
+EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
+
+
+static size_t
+gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
+{
+ u16 ids[] = {
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ KVMPPC_GSID_RUN_INPUT,
+ KVMPPC_GSID_RUN_OUTPUT,
+
+ };
+ size_t size = 0;
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ int rc;
+
+ cfg = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
+ cfg->vcpu_run_output_size);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
+ rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
+ cfg->vcpu_run_input_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
+ kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
+ cfg->vcpu_run_output_cfg);
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ cfg = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (gse)
+ cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops config_msg_ops = {
+ .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
+ .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
+ .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
+};
+
+static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_bitmap gsbm = { 0 };
+ size_t size = 0;
+ u16 iden;
+
+ kvmppc_gsbm_fill(&gsbm);
+ kvmppc_gsbm_for_each(&gsbm, iden)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_HOST_STATE_SIZE:
+ case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
+ case KVMPPC_GSID_PARTITION_TABLE:
+ case KVMPPC_GSID_PROCESS_TABLE:
+ case KVMPPC_GSID_RUN_INPUT:
+ case KVMPPC_GSID_RUN_OUTPUT:
+ break;
+ default:
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
+ }
+ }
+ return size;
+}
+
+static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvm_vcpu *vcpu;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+
+ vcpu = gsm->data;
+
+ kvmppc_gsm_for_each(gsm, iden)
+ {
+ rc = 0;
+
+ if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
+ (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
+ continue;
+
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
+ break;
+ case KVMPPC_GSID_PURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
+ break;
+ case KVMPPC_GSID_SPURR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
+ break;
+ case KVMPPC_GSID_AMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
+ break;
+ case KVMPPC_GSID_SIAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
+ break;
+ case KVMPPC_GSID_SDAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
+ break;
+ case KVMPPC_GSID_IAMR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
+ break;
+ case KVMPPC_GSID_CIABR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
+ break;
+ case KVMPPC_GSID_WORT:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
+ break;
+ case KVMPPC_GSID_PPR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
+ break;
+ case KVMPPC_GSID_PSPB:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
+ break;
+ case KVMPPC_GSID_TAR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
+ break;
+ case KVMPPC_GSID_FSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
+ break;
+ case KVMPPC_GSID_BESCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
+ break;
+ case KVMPPC_GSID_IC:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
+ break;
+ case KVMPPC_GSID_CTRL:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
+ break;
+ case KVMPPC_GSID_PIDR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
+ break;
+ case KVMPPC_GSID_AMOR: {
+ u64 amor = ~0;
+
+ rc = kvmppc_gse_put_u64(gsb, iden, amor);
+ break;
+ }
+ case KVMPPC_GSID_VRSAVE:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.gpr[i]);
+ break;
+ case KVMPPC_GSID_CR:
+ rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
+ break;
+ case KVMPPC_GSID_XER:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
+ break;
+ case KVMPPC_GSID_CTR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
+ break;
+ case KVMPPC_GSID_LR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.regs.link);
+ break;
+ case KVMPPC_GSID_NIA:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
+ break;
+ case KVMPPC_GSID_SRR0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr0);
+ break;
+ case KVMPPC_GSID_SRR1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.srr1);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg0);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg1);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg2);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.sprg3);
+ break;
+ case KVMPPC_GSID_DAR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.dar);
+ break;
+ case KVMPPC_GSID_DSISR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.shregs.dsisr);
+ break;
+ case KVMPPC_GSID_MSR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.shregs.msr);
+ break;
+ case KVMPPC_GSID_VTB:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->vtb);
+ break;
+ case KVMPPC_GSID_LPCR:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->lpcr);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ rc = kvmppc_gse_put_u64(gsb, iden,
+ vcpu->arch.vcore->tb_offset);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&v, &vcpu->arch.fp.fpr[i],
+ sizeof(vcpu->arch.fp.fpr[i]));
+ rc = kvmppc_gse_put_vector128(gsb, iden, &v);
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.vr.vscr.u[3]);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ rc = kvmppc_gse_put_vector128(gsb, iden,
+ &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = vcpu->arch.dec_expires -
+ vcpu->arch.vcore->tb_offset;
+ rc = kvmppc_gse_put_u64(gsb, iden, dw);
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ rc = kvmppc_gse_put_u32(gsb, iden,
+ vcpu->arch.vcore->arch_compat);
+ break;
+ }
+
+ if (rc < 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvm_vcpu *vcpu;
+ struct kvmppc_gs_elem *gse;
+ vector128 v;
+ int rc, i;
+ u16 iden;
+
+ vcpu = gsm->data;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ kvmppc_gsp_for_each(&gsp, iden, gse)
+ {
+ switch (iden) {
+ case KVMPPC_GSID_DSCR:
+ vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_MMCRA:
+ vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HFSCR:
+ vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PURR:
+ vcpu->arch.purr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPURR:
+ vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_AMR:
+ vcpu->arch.amr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_UAMOR:
+ vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIAR:
+ vcpu->arch.siar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SDAR:
+ vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IAMR:
+ vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR0:
+ vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWR1:
+ vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAWRX0:
+ vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_DAWRX1:
+ vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_CIABR:
+ vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_WORT:
+ vcpu->arch.wort = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_PPR:
+ vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PSPB:
+ vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_TAR:
+ vcpu->arch.tar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FSCR:
+ vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBHR:
+ vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_EBBRR:
+ vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_BESCR:
+ vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_IC:
+ vcpu->arch.ic = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTRL:
+ vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PIDR:
+ vcpu->arch.pid = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_AMOR:
+ break;
+ case KVMPPC_GSID_VRSAVE:
+ vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
+ i = iden - KVMPPC_GSID_MMCR(0);
+ vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
+ i = iden - KVMPPC_GSID_SIER(0);
+ vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
+ i = iden - KVMPPC_GSID_PMC(0);
+ vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
+ i = iden - KVMPPC_GSID_GPR(0);
+ vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CR:
+ vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_XER:
+ vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_CTR:
+ vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LR:
+ vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_NIA:
+ vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR0:
+ vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SRR1:
+ vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG0:
+ vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG1:
+ vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG2:
+ vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_SPRG3:
+ vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DAR:
+ vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DSISR:
+ vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_MSR:
+ vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VTB:
+ vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_LPCR:
+ vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_TB_OFFSET:
+ vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_FPSCR:
+ vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
+ kvmppc_gse_get_vector128(gse, &v);
+ i = iden - KVMPPC_GSID_VSRS(0);
+ memcpy(&vcpu->arch.fp.fpr[i], &v,
+ sizeof(vcpu->arch.fp.fpr[i]));
+ break;
+#ifdef CONFIG_VSX
+ case KVMPPC_GSID_VSCR:
+ vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
+ i = iden - KVMPPC_GSID_VSRS(32);
+ kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
+ break;
+#endif
+ case KVMPPC_GSID_HDAR:
+ vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HDSISR:
+ vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
+ break;
+ case KVMPPC_GSID_ASDR:
+ vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_HEIR:
+ vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
+ break;
+ case KVMPPC_GSID_DEC_EXPIRY_TB: {
+ u64 dw;
+
+ dw = kvmppc_gse_get_u64(gse);
+ vcpu->arch.dec_expires =
+ dw + vcpu->arch.vcore->tb_offset;
+ break;
+ }
+ case KVMPPC_GSID_LOGICAL_PVR:
+ vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
+ break;
+ default:
+ continue;
+ }
+ kvmppc_gsbm_set(valids, iden);
+ }
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops vcpu_message_ops = {
+ .get_size = gs_msg_ops_vcpu_get_size,
+ .fill_info = gs_msg_ops_vcpu_fill_info,
+ .refresh_info = gs_msg_ops_vcpu_refresh_info,
+};
+
+static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ struct kvmhv_nestedv2_config *cfg;
+ struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
+ unsigned long guest_id, vcpu_id;
+ struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
+ int rc;
+
+ cfg = &io->cfg;
+ guest_id = vcpu->kvm->arch.lpid;
+ vcpu_id = vcpu->vcpu_id;
+
+ gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
+ GFP_KERNEL);
+ if (!gsm) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
+ GFP_KERNEL);
+ if (!gsb) {
+ rc = -ENOMEM;
+ goto free_gsm;
+ }
+
+ rc = kvmppc_gsb_receive_datum(gsb, gsm,
+ KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
+ goto free_gsb;
+ }
+
+ vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_output) {
+ rc = -ENOMEM;
+ goto free_gsb;
+ }
+
+ cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
+ cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
+ io->vcpu_run_output = vcpu_run_output;
+
+ gsm->flags = 0;
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
+ goto free_gs_out;
+ }
+
+ vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
+ if (!vcpu_message) {
+ rc = -ENOMEM;
+ goto free_gs_out;
+ }
+ kvmppc_gsm_include_all(vcpu_message);
+
+ io->vcpu_message = vcpu_message;
+
+ vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
+ vcpu_id, GFP_KERNEL);
+ if (!vcpu_run_input) {
+ rc = -ENOMEM;
+ goto free_vcpu_message;
+ }
+
+ io->vcpu_run_input = vcpu_run_input;
+ cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
+ cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
+ rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
+ goto free_vcpu_run_input;
+ }
+
+ vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
+ KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
+ if (!vcore_message) {
+ rc = -ENOMEM;
+ goto free_vcpu_run_input;
+ }
+
+ kvmppc_gsm_include_all(vcore_message);
+ kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
+ io->vcore_message = vcore_message;
+
+ kvmppc_gsbm_fill(&io->valids);
+ kvmppc_gsm_free(gsm);
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_vcpu_run_input:
+ kvmppc_gsb_free(vcpu_run_input);
+free_vcpu_message:
+ kvmppc_gsm_free(vcpu_message);
+free_gs_out:
+ kvmppc_gsb_free(vcpu_run_output);
+free_gsb:
+ kvmppc_gsb_free(gsb);
+free_gsm:
+ kvmppc_gsm_free(gsm);
+err:
+ return rc;
+}
+
+/**
+ * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Mark a guest state ID as having been changed by the L1 host and thus
+ * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
+ */
+int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_msg *gsm;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ gsm = io->vcpu_message;
+ kvmppc_gsm_include(gsm, iden);
+ gsm = io->vcore_message;
+ kvmppc_gsm_include(gsm, iden);
+ kvmppc_gsbm_set(valids, iden);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
+
+/**
+ * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
+ * @vcpu: vcpu
+ * @iden: guest state ID
+ *
+ * Reload the value for the guest state ID from the L0 host into the L1 host.
+ * This is cached so that going out to the L0 host only happens if necessary.
+ */
+int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc;
+
+ if (!iden)
+ return 0;
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+ if (kvmppc_gsbm_test(valids, iden))
+ return 0;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
+ rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
+ return rc;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
+
+/**
+ * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
+ * @vcpu: vcpu
+ * @time_limit: hdec expiry tb
+ *
+ * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
+ * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
+ * wide values need to be sent with H_GUEST_SET first.
+ *
+ * The hdec tb offset is always sent to L0 host.
+ */
+int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg *gsm;
+ int rc;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_input;
+ gsm = io->vcore_message;
+ rc = kvmppc_gsb_send_data(gsb, gsm);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
+ return rc;
+ }
+
+ gsm = io->vcpu_message;
+ kvmppc_gsb_reset(gsb);
+ rc = kvmppc_gsm_fill_info(gsm, gsb);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
+ return rc;
+ }
+
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
+
+/**
+ * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
+ * L0 host
+ * @lpid: guest id
+ * @dw0: partition table double word
+ * @dw1: process table double word
+ */
+int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
+{
+ struct kvmppc_gs_part_table patbl;
+ struct kvmppc_gs_proc_table prtbl;
+ struct kvmppc_gs_buff *gsb;
+ size_t size;
+ int rc;
+
+ size = kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
+ kvmppc_gse_total_size(
+ kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
+ sizeof(struct kvmppc_gs_header);
+ gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
+ if (!gsb)
+ return -ENOMEM;
+
+ patbl.address = dw0 & RPDB_MASK;
+ patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
+ ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
+ 31);
+ patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
+ rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ prtbl.address = dw1 & PRTB_MASK;
+ prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
+ rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
+ if (rc < 0)
+ goto free_gsb;
+
+ rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
+ if (rc < 0) {
+ pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
+ goto free_gsb;
+ }
+
+ kvmppc_gsb_free(gsb);
+ return 0;
+
+free_gsb:
+ kvmppc_gsb_free(gsb);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
+
+/**
+ * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
+ * @vcpu: vcpu
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+
+ io = &vcpu->arch.nestedv2_io;
+ gsb = io->vcpu_run_output;
+
+ vcpu->arch.fault_dar = 0;
+ vcpu->arch.fault_dsisr = 0;
+ vcpu->arch.fault_gpa = 0;
+ vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
+
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+ return kvmppc_gsm_refresh_info(&gsm, gsb);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
+
+static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmppc_gsm_free(io->vcpu_message);
+ kvmppc_gsm_free(io->vcore_message);
+ kvmppc_gsb_free(io->vcpu_run_input);
+ kvmppc_gsb_free(io->vcpu_run_output);
+}
+
+int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
+{
+ struct kvmhv_nestedv2_io *io;
+ struct kvmppc_gs_bitmap *valids;
+ struct kvmppc_gs_buff *gsb;
+ struct kvmppc_gs_msg gsm;
+ int rc = 0;
+
+
+ io = &vcpu->arch.nestedv2_io;
+ valids = &io->valids;
+
+ gsb = io->vcpu_run_input;
+ kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
+
+ for (int i = 0; i < 32; i++) {
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
+ }
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
+
+ if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
+ kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
+
+ rc = kvmppc_gsb_receive_data(gsb, &gsm);
+ if (rc < 0)
+ pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
+
+int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
+ struct pt_regs *regs)
+{
+ for (int i = 0; i < 32; i++)
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
+
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
+ kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
+
+/**
+ * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ *
+ * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
+ */
+int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ long rc;
+
+ rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
+
+ if (rc != H_SUCCESS) {
+ pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
+ switch (rc) {
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_ABORTED:
+ return -ENOMEM;
+ case H_AUTHORITY:
+ return -EPERM;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ rc = kvmhv_nestedv2_host_create(vcpu, io);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
+
+/**
+ * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
+ * @vcpu: vcpu
+ * @io: NESTEDv2 nested io state
+ */
+void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
+ struct kvmhv_nestedv2_io *io)
+{
+ kvmhv_nestedv2_host_free(vcpu, io);
+}
+EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 34f1db212824..34bc0a8a1288 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -305,7 +305,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
u32 pid;
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
/*
* Prior memory accesses to host PID Q3 must be completed before we
@@ -330,7 +330,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
int i;
lpid = kvm->arch.lpid;
- pid = vcpu->arch.pid;
+ pid = kvmppc_get_pid(vcpu);
/*
* See switch_mmu_to_guest_radix. ptesync should not be required here
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 82be6d87514b..9012acadbca8 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -174,14 +174,14 @@ long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
ppc_md.hmi_exception_early(NULL);
out:
- if (vc->tb_offset) {
+ if (kvmppc_get_tb_offset(vcpu)) {
u64 new_tb = mftb() + vc->tb_offset;
mtspr(SPRN_TBU40, new_tb);
if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
new_tb += 0x1000000;
mtspr(SPRN_TBU40, new_tb);
}
- vc->tb_offset_applied = vc->tb_offset;
+ vc->tb_offset_applied = kvmppc_get_tb_offset(vcpu);
}
return ret;
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9182324dbef9..17cb75a127b0 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -776,8 +776,8 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED;
}
- vcpu->arch.regs.gpr[4 + i * 2] = v;
- vcpu->arch.regs.gpr[5 + i * 2] = r;
+ kvmppc_set_gpr(vcpu, 4 + i * 2, v);
+ kvmppc_set_gpr(vcpu, 5 + i * 2, r);
}
return H_SUCCESS;
}
@@ -824,7 +824,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
}
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
@@ -872,7 +872,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
kvmppc_set_dirty_from_hpte(kvm, v, gr);
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index e165bfa842bf..e42984878503 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -481,7 +481,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
unsigned long xics_rm_h_xirr_x(struct kvm_vcpu *vcpu)
{
- vcpu->arch.regs.gpr[5] = get_tb();
+ kvmppc_set_gpr(vcpu, 5, get_tb());
return xics_rm_h_xirr(vcpu);
}
@@ -518,7 +518,7 @@ unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
} while (!icp_rm_try_update(icp, old_state, new_state));
/* Return the result in GPR4 */
- vcpu->arch.regs.gpr[4] = xirr;
+ kvmppc_set_gpr(vcpu, 4, xirr);
return check_too_hard(xics, icp);
}
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index e2d6f9327f77..92f33115144b 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -858,7 +858,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
}
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
- pr_info("LPID %d went secure\n", kvm->arch.lpid);
+ pr_info("LPID %lld went secure\n", kvm->arch.lpid);
out:
srcu_read_unlock(&kvm->srcu, srcu_idx);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f4115819e738..29a382249770 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -328,7 +328,7 @@ static unsigned long xive_vm_h_xirr(struct kvm_vcpu *vcpu)
*/
/* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (old_cppr << 24);
+ kvmppc_set_gpr(vcpu, 4, hirq | (old_cppr << 24));
return H_SUCCESS;
}
@@ -364,7 +364,7 @@ static unsigned long xive_vm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server
hirq = xive_vm_scan_interrupts(xc, pending, scan_poll);
/* Return interrupt and old CPPR in GPR4 */
- vcpu->arch.regs.gpr[4] = hirq | (xc->cppr << 24);
+ kvmppc_set_gpr(vcpu, 4, hirq | (xc->cppr << 24));
return H_SUCCESS;
}
@@ -884,10 +884,10 @@ int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
}
if (single_escalation)
- name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
+ name = kasprintf(GFP_KERNEL, "kvm-%lld-%d",
vcpu->kvm->arch.lpid, xc->server_num);
else
- name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
+ name = kasprintf(GFP_KERNEL, "kvm-%lld-%d-%d",
vcpu->kvm->arch.lpid, xc->server_num, prio);
if (!name) {
pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
@@ -2779,8 +2779,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
{
- struct kvmppc_vcore *vc = vcpu->arch.vcore;
-
/* The VM should have configured XICS mode before doing XICS hcalls. */
if (!kvmppc_xics_enabled(vcpu))
return H_TOO_HARD;
@@ -2799,7 +2797,7 @@ int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
return xive_vm_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
case H_XIRR_X:
xive_vm_h_xirr(vcpu);
- kvmppc_set_gpr(vcpu, 5, get_tb() + vc->tb_offset);
+ kvmppc_set_gpr(vcpu, 5, get_tb() + kvmppc_get_tb_offset(vcpu));
return H_SUCCESS;
}
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 059c08ae0340..077fd88a0b68 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -92,7 +92,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_host_swabbed = 0;
emulated = EMULATE_FAIL;
- vcpu->arch.regs.msr = vcpu->arch.shared->msr;
+ vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
+ kvmhv_nestedv2_reload_ptregs(vcpu, &vcpu->arch.regs);
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
@@ -250,7 +251,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(vcpu,
- VCPU_FPR(vcpu, op.reg), size, 1);
+ kvmppc_get_fpr(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
@@ -357,6 +358,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
+ kvmhv_nestedv2_mark_dirty_ptregs(vcpu, &vcpu->arch.regs);
/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
diff --git a/arch/powerpc/kvm/guest-state-buffer.c b/arch/powerpc/kvm/guest-state-buffer.c
new file mode 100644
index 000000000000..b80dbc58621f
--- /dev/null
+++ b/arch/powerpc/kvm/guest-state-buffer.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "asm/hvcall.h"
+#include <linux/log2.h>
+#include <asm/pgalloc.h>
+#include <asm/guest-state-buffer.h>
+
+static const u16 kvmppc_gse_iden_len[__KVMPPC_GSE_TYPE_MAX] = {
+ [KVMPPC_GSE_BE32] = sizeof(__be32),
+ [KVMPPC_GSE_BE64] = sizeof(__be64),
+ [KVMPPC_GSE_VEC128] = sizeof(vector128),
+ [KVMPPC_GSE_PARTITION_TABLE] = sizeof(struct kvmppc_gs_part_table),
+ [KVMPPC_GSE_PROCESS_TABLE] = sizeof(struct kvmppc_gs_proc_table),
+ [KVMPPC_GSE_BUFFER] = sizeof(struct kvmppc_gs_buff_info),
+};
+
+/**
+ * kvmppc_gsb_new() - create a new guest state buffer
+ * @size: total size of the guest state buffer (includes header)
+ * @guest_id: guest_id
+ * @vcpu_id: vcpu_id
+ * @flags: GFP flags
+ *
+ * Returns a guest state buffer.
+ */
+struct kvmppc_gs_buff *kvmppc_gsb_new(size_t size, unsigned long guest_id,
+ unsigned long vcpu_id, gfp_t flags)
+{
+ struct kvmppc_gs_buff *gsb;
+
+ gsb = kzalloc(sizeof(*gsb), flags);
+ if (!gsb)
+ return NULL;
+
+ size = roundup_pow_of_two(size);
+ gsb->hdr = kzalloc(size, GFP_KERNEL);
+ if (!gsb->hdr)
+ goto free;
+
+ gsb->capacity = size;
+ gsb->len = sizeof(struct kvmppc_gs_header);
+ gsb->vcpu_id = vcpu_id;
+ gsb->guest_id = guest_id;
+
+ gsb->hdr->nelems = cpu_to_be32(0);
+
+ return gsb;
+
+free:
+ kfree(gsb);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_new);
+
+/**
+ * kvmppc_gsb_free() - free a guest state buffer
+ * @gsb: guest state buffer
+ */
+void kvmppc_gsb_free(struct kvmppc_gs_buff *gsb)
+{
+ kfree(gsb->hdr);
+ kfree(gsb);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_free);
+
+/**
+ * kvmppc_gsb_put() - allocate space in a guest state buffer
+ * @gsb: buffer to allocate in
+ * @size: amount of space to allocate
+ *
+ * Returns a pointer to the amount of space requested within the buffer and
+ * increments the count of elements in the buffer.
+ *
+ * Does not check if there is enough space in the buffer.
+ */
+void *kvmppc_gsb_put(struct kvmppc_gs_buff *gsb, size_t size)
+{
+ u32 nelems = kvmppc_gsb_nelems(gsb);
+ void *p;
+
+ p = (void *)kvmppc_gsb_header(gsb) + kvmppc_gsb_len(gsb);
+ gsb->len += size;
+
+ kvmppc_gsb_header(gsb)->nelems = cpu_to_be32(nelems + 1);
+ return p;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_put);
+
+static int kvmppc_gsid_class(u16 iden)
+{
+ if ((iden >= KVMPPC_GSE_GUESTWIDE_START) &&
+ (iden <= KVMPPC_GSE_GUESTWIDE_END))
+ return KVMPPC_GS_CLASS_GUESTWIDE;
+
+ if ((iden >= KVMPPC_GSE_META_START) && (iden <= KVMPPC_GSE_META_END))
+ return KVMPPC_GS_CLASS_META;
+
+ if ((iden >= KVMPPC_GSE_DW_REGS_START) &&
+ (iden <= KVMPPC_GSE_DW_REGS_END))
+ return KVMPPC_GS_CLASS_DWORD_REG;
+
+ if ((iden >= KVMPPC_GSE_W_REGS_START) &&
+ (iden <= KVMPPC_GSE_W_REGS_END))
+ return KVMPPC_GS_CLASS_WORD_REG;
+
+ if ((iden >= KVMPPC_GSE_VSRS_START) && (iden <= KVMPPC_GSE_VSRS_END))
+ return KVMPPC_GS_CLASS_VECTOR;
+
+ if ((iden >= KVMPPC_GSE_INTR_REGS_START) &&
+ (iden <= KVMPPC_GSE_INTR_REGS_END))
+ return KVMPPC_GS_CLASS_INTR;
+
+ return -1;
+}
+
+static int kvmppc_gsid_type(u16 iden)
+{
+ int type = -1;
+
+ switch (kvmppc_gsid_class(iden)) {
+ case KVMPPC_GS_CLASS_GUESTWIDE:
+ switch (iden) {
+ case KVMPPC_GSID_HOST_STATE_SIZE:
+ case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
+ case KVMPPC_GSID_TB_OFFSET:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GSID_PARTITION_TABLE:
+ type = KVMPPC_GSE_PARTITION_TABLE;
+ break;
+ case KVMPPC_GSID_PROCESS_TABLE:
+ type = KVMPPC_GSE_PROCESS_TABLE;
+ break;
+ case KVMPPC_GSID_LOGICAL_PVR:
+ type = KVMPPC_GSE_BE32;
+ break;
+ }
+ break;
+ case KVMPPC_GS_CLASS_META:
+ switch (iden) {
+ case KVMPPC_GSID_RUN_INPUT:
+ case KVMPPC_GSID_RUN_OUTPUT:
+ type = KVMPPC_GSE_BUFFER;
+ break;
+ case KVMPPC_GSID_VPA:
+ type = KVMPPC_GSE_BE64;
+ break;
+ }
+ break;
+ case KVMPPC_GS_CLASS_DWORD_REG:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GS_CLASS_WORD_REG:
+ type = KVMPPC_GSE_BE32;
+ break;
+ case KVMPPC_GS_CLASS_VECTOR:
+ type = KVMPPC_GSE_VEC128;
+ break;
+ case KVMPPC_GS_CLASS_INTR:
+ switch (iden) {
+ case KVMPPC_GSID_HDAR:
+ case KVMPPC_GSID_ASDR:
+ case KVMPPC_GSID_HEIR:
+ type = KVMPPC_GSE_BE64;
+ break;
+ case KVMPPC_GSID_HDSISR:
+ type = KVMPPC_GSE_BE32;
+ break;
+ }
+ break;
+ }
+
+ return type;
+}
+
+/**
+ * kvmppc_gsid_flags() - the flags for a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns any flags for the guest state ID.
+ */
+unsigned long kvmppc_gsid_flags(u16 iden)
+{
+ unsigned long flags = 0;
+
+ switch (kvmppc_gsid_class(iden)) {
+ case KVMPPC_GS_CLASS_GUESTWIDE:
+ flags = KVMPPC_GS_FLAGS_WIDE;
+ break;
+ case KVMPPC_GS_CLASS_META:
+ case KVMPPC_GS_CLASS_DWORD_REG:
+ case KVMPPC_GS_CLASS_WORD_REG:
+ case KVMPPC_GS_CLASS_VECTOR:
+ case KVMPPC_GS_CLASS_INTR:
+ break;
+ }
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_flags);
+
+/**
+ * kvmppc_gsid_size() - the size of a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns the size of guest state ID.
+ */
+u16 kvmppc_gsid_size(u16 iden)
+{
+ int type;
+
+ type = kvmppc_gsid_type(iden);
+ if (type == -1)
+ return 0;
+
+ if (type >= __KVMPPC_GSE_TYPE_MAX)
+ return 0;
+
+ return kvmppc_gse_iden_len[type];
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_size);
+
+/**
+ * kvmppc_gsid_mask() - the settable bits of a guest state ID
+ * @iden: guest state ID
+ *
+ * Returns a mask of settable bits for a guest state ID.
+ */
+u64 kvmppc_gsid_mask(u16 iden)
+{
+ u64 mask = ~0ull;
+
+ switch (iden) {
+ case KVMPPC_GSID_LPCR:
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER |
+ LPCR_GTSE;
+ break;
+ case KVMPPC_GSID_MSR:
+ mask = ~(MSR_HV | MSR_S | MSR_ME);
+ break;
+ }
+
+ return mask;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsid_mask);
+
+/**
+ * __kvmppc_gse_put() - add a guest state element to a buffer
+ * @gsb: buffer to the element to
+ * @iden: guest state ID
+ * @size: length of data
+ * @data: pointer to data
+ */
+int __kvmppc_gse_put(struct kvmppc_gs_buff *gsb, u16 iden, u16 size,
+ const void *data)
+{
+ struct kvmppc_gs_elem *gse;
+ u16 total_size;
+
+ total_size = sizeof(*gse) + size;
+ if (total_size + kvmppc_gsb_len(gsb) > kvmppc_gsb_capacity(gsb))
+ return -ENOMEM;
+
+ if (kvmppc_gsid_size(iden) != size)
+ return -EINVAL;
+
+ gse = kvmppc_gsb_put(gsb, total_size);
+ gse->iden = cpu_to_be16(iden);
+ gse->len = cpu_to_be16(size);
+ memcpy(gse->data, data, size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__kvmppc_gse_put);
+
+/**
+ * kvmppc_gse_parse() - create a parse map from a guest state buffer
+ * @gsp: guest state parser
+ * @gsb: guest state buffer
+ */
+int kvmppc_gse_parse(struct kvmppc_gs_parser *gsp, struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_elem *curr;
+ int rem, i;
+
+ kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
+ if (kvmppc_gse_len(curr) !=
+ kvmppc_gsid_size(kvmppc_gse_iden(curr)))
+ return -EINVAL;
+ kvmppc_gsp_insert(gsp, kvmppc_gse_iden(curr), curr);
+ }
+
+ if (kvmppc_gsb_nelems(gsb) != i)
+ return -EINVAL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gse_parse);
+
+static inline int kvmppc_gse_flatten_iden(u16 iden)
+{
+ int bit = 0;
+ int class;
+
+ class = kvmppc_gsid_class(iden);
+
+ if (class == KVMPPC_GS_CLASS_GUESTWIDE) {
+ bit += iden - KVMPPC_GSE_GUESTWIDE_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_GUESTWIDE_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_META) {
+ bit += iden - KVMPPC_GSE_META_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_META_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_DWORD_REG) {
+ bit += iden - KVMPPC_GSE_DW_REGS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_DW_REGS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_WORD_REG) {
+ bit += iden - KVMPPC_GSE_W_REGS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_W_REGS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_VECTOR) {
+ bit += iden - KVMPPC_GSE_VSRS_START;
+ return bit;
+ }
+
+ bit += KVMPPC_GSE_VSRS_COUNT;
+
+ if (class == KVMPPC_GS_CLASS_INTR) {
+ bit += iden - KVMPPC_GSE_INTR_REGS_START;
+ return bit;
+ }
+
+ return 0;
+}
+
+static inline u16 kvmppc_gse_unflatten_iden(int bit)
+{
+ u16 iden;
+
+ if (bit < KVMPPC_GSE_GUESTWIDE_COUNT) {
+ iden = KVMPPC_GSE_GUESTWIDE_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_GUESTWIDE_COUNT;
+
+ if (bit < KVMPPC_GSE_META_COUNT) {
+ iden = KVMPPC_GSE_META_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_META_COUNT;
+
+ if (bit < KVMPPC_GSE_DW_REGS_COUNT) {
+ iden = KVMPPC_GSE_DW_REGS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_DW_REGS_COUNT;
+
+ if (bit < KVMPPC_GSE_W_REGS_COUNT) {
+ iden = KVMPPC_GSE_W_REGS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_W_REGS_COUNT;
+
+ if (bit < KVMPPC_GSE_VSRS_COUNT) {
+ iden = KVMPPC_GSE_VSRS_START + bit;
+ return iden;
+ }
+ bit -= KVMPPC_GSE_VSRS_COUNT;
+
+ if (bit < KVMPPC_GSE_IDEN_COUNT) {
+ iden = KVMPPC_GSE_INTR_REGS_START + bit;
+ return iden;
+ }
+
+ return 0;
+}
+
+/**
+ * kvmppc_gsp_insert() - add a mapping from an guest state ID to an element
+ * @gsp: guest state parser
+ * @iden: guest state id (key)
+ * @gse: guest state element (value)
+ */
+void kvmppc_gsp_insert(struct kvmppc_gs_parser *gsp, u16 iden,
+ struct kvmppc_gs_elem *gse)
+{
+ int i;
+
+ i = kvmppc_gse_flatten_iden(iden);
+ kvmppc_gsbm_set(&gsp->iterator, iden);
+ gsp->gses[i] = gse;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsp_insert);
+
+/**
+ * kvmppc_gsp_lookup() - lookup an element from a guest state ID
+ * @gsp: guest state parser
+ * @iden: guest state ID (key)
+ *
+ * Returns the guest state element if present.
+ */
+struct kvmppc_gs_elem *kvmppc_gsp_lookup(struct kvmppc_gs_parser *gsp, u16 iden)
+{
+ int i;
+
+ i = kvmppc_gse_flatten_iden(iden);
+ return gsp->gses[i];
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsp_lookup);
+
+/**
+ * kvmppc_gsbm_set() - set the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+void kvmppc_gsbm_set(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ set_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_set);
+
+/**
+ * kvmppc_gsbm_clear() - clear the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+void kvmppc_gsbm_clear(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ clear_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_clear);
+
+/**
+ * kvmppc_gsbm_test() - test the guest state ID
+ * @gsbm: guest state bitmap
+ * @iden: guest state ID
+ */
+bool kvmppc_gsbm_test(struct kvmppc_gs_bitmap *gsbm, u16 iden)
+{
+ return test_bit(kvmppc_gse_flatten_iden(iden), gsbm->bitmap);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_test);
+
+/**
+ * kvmppc_gsbm_next() - return the next set guest state ID
+ * @gsbm: guest state bitmap
+ * @prev: last guest state ID
+ */
+u16 kvmppc_gsbm_next(struct kvmppc_gs_bitmap *gsbm, u16 prev)
+{
+ int bit, pbit;
+
+ pbit = prev ? kvmppc_gse_flatten_iden(prev) + 1 : 0;
+ bit = find_next_bit(gsbm->bitmap, KVMPPC_GSE_IDEN_COUNT, pbit);
+
+ if (bit < KVMPPC_GSE_IDEN_COUNT)
+ return kvmppc_gse_unflatten_iden(bit);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsbm_next);
+
+/**
+ * kvmppc_gsm_init() - initialize a guest state message
+ * @gsm: guest state message
+ * @ops: callbacks
+ * @data: private data
+ * @flags: guest wide or thread wide
+ */
+int kvmppc_gsm_init(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_msg_ops *ops,
+ void *data, unsigned long flags)
+{
+ memset(gsm, 0, sizeof(*gsm));
+ gsm->ops = ops;
+ gsm->data = data;
+ gsm->flags = flags;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_init);
+
+/**
+ * kvmppc_gsm_new() - creates a new guest state message
+ * @ops: callbacks
+ * @data: private data
+ * @flags: guest wide or thread wide
+ * @gfp_flags: GFP allocation flags
+ *
+ * Returns an initialized guest state message.
+ */
+struct kvmppc_gs_msg *kvmppc_gsm_new(struct kvmppc_gs_msg_ops *ops, void *data,
+ unsigned long flags, gfp_t gfp_flags)
+{
+ struct kvmppc_gs_msg *gsm;
+
+ gsm = kzalloc(sizeof(*gsm), gfp_flags);
+ if (!gsm)
+ return NULL;
+
+ kvmppc_gsm_init(gsm, ops, data, flags);
+
+ return gsm;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_new);
+
+/**
+ * kvmppc_gsm_size() - creates a new guest state message
+ * @gsm: self
+ *
+ * Returns the size required for the message.
+ */
+size_t kvmppc_gsm_size(struct kvmppc_gs_msg *gsm)
+{
+ if (gsm->ops->get_size)
+ return gsm->ops->get_size(gsm);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_size);
+
+/**
+ * kvmppc_gsm_free() - free guest state message
+ * @gsm: guest state message
+ *
+ * Returns the size required for the message.
+ */
+void kvmppc_gsm_free(struct kvmppc_gs_msg *gsm)
+{
+ kfree(gsm);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_free);
+
+/**
+ * kvmppc_gsm_fill_info() - serialises message to guest state buffer format
+ * @gsm: self
+ * @gsb: buffer to serialise into
+ */
+int kvmppc_gsm_fill_info(struct kvmppc_gs_msg *gsm, struct kvmppc_gs_buff *gsb)
+{
+ if (!gsm->ops->fill_info)
+ return -EINVAL;
+
+ return gsm->ops->fill_info(gsb, gsm);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_fill_info);
+
+/**
+ * kvmppc_gsm_refresh_info() - deserialises from guest state buffer
+ * @gsm: self
+ * @gsb: buffer to serialise from
+ */
+int kvmppc_gsm_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ if (!gsm->ops->fill_info)
+ return -EINVAL;
+
+ return gsm->ops->refresh_info(gsm, gsb);
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsm_refresh_info);
+
+/**
+ * kvmppc_gsb_send - send all elements in the buffer to the hypervisor.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_SET_STATE hcall for the guest state buffer.
+ */
+int kvmppc_gsb_send(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (kvmppc_gsb_nelems(gsb) == 0)
+ return 0;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_set_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_send);
+
+/**
+ * kvmppc_gsb_recv - request all elements in the buffer have their value
+ * updated.
+ * @gsb: guest state buffer
+ * @flags: guest wide or thread wide
+ *
+ * Performs the H_GUEST_GET_STATE hcall for the guest state buffer.
+ * After returning from the hcall the guest state elements that were
+ * present in the buffer will have updated values from the hypervisor.
+ */
+int kvmppc_gsb_recv(struct kvmppc_gs_buff *gsb, unsigned long flags)
+{
+ unsigned long hflags = 0;
+ unsigned long i;
+ int rc;
+
+ if (flags & KVMPPC_GS_FLAGS_WIDE)
+ hflags |= H_GUEST_FLAGS_WIDE;
+
+ rc = plpar_guest_get_state(hflags, gsb->guest_id, gsb->vcpu_id,
+ __pa(gsb->hdr), gsb->capacity, &i);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gsb_recv);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 7197c8256668..f6af752698d0 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -934,11 +934,11 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, offset) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, offset, gpr);
}
}
@@ -949,13 +949,13 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
- VCPU_VSX_FPR(vcpu, index, 0) = gpr;
- VCPU_VSX_FPR(vcpu, index, 1) = gpr;
+ kvmppc_set_vsx_fpr(vcpu, index, 0, gpr);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, gpr);
}
}
@@ -970,12 +970,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
- VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
- VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, 0, val.vsxval[0]);
+ kvmppc_set_vsx_fpr(vcpu, index, 1, val.vsxval[0]);
}
}
@@ -991,15 +991,15 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
return;
if (index >= 32) {
- val.vval = VCPU_VSX_VR(vcpu, index - 32);
+ kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index - 32) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval);
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
- val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
+ val.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, index, dword_offset);
val.vsx32val[word_offset] = gpr32;
- VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
+ kvmppc_set_vsx_fpr(vcpu, index, dword_offset, val.vsxval[0]);
}
}
#endif /* CONFIG_VSX */
@@ -1058,9 +1058,9 @@ static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
@@ -1074,9 +1074,9 @@ static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
@@ -1090,9 +1090,9 @@ static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx16val[offset] = gpr16;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
@@ -1106,9 +1106,9 @@ static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- val.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &val.vval);
val.vsx8val[offset] = gpr8;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ kvmppc_set_vsx_vr(vcpu, index, &val.vval);
}
#endif /* CONFIG_ALTIVEC */
@@ -1194,14 +1194,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
break;
#ifdef CONFIG_PPC_BOOK3S
case KVM_MMIO_REG_QPR:
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
case KVM_MMIO_REG_FQPR:
- VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
+ kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr);
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
@@ -1419,9 +1419,9 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
}
if (rs < 32) {
- *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
+ *val = kvmppc_get_vsx_fpr(vcpu, rs, vsx_offset);
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsxval[vsx_offset];
}
break;
@@ -1438,10 +1438,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
if (rs < 32) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
- reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
+ reg.vsxval[0] = kvmppc_get_vsx_fpr(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
+ kvmppc_get_vsx_vr(vcpu, rs - 32, &reg.vval);
*val = reg.vsx32val[vsx_offset];
}
break;
@@ -1556,7 +1556,7 @@ static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsxval[vmx_offset];
return result;
@@ -1574,7 +1574,7 @@ static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx32val[vmx_offset];
return result;
@@ -1592,7 +1592,7 @@ static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx16val[vmx_offset];
return result;
@@ -1610,7 +1610,7 @@ static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
if (vmx_offset == -1)
return -1;
- reg.vval = VCPU_VSX_VR(vcpu, index);
+ kvmppc_get_vsx_vr(vcpu, index, &reg.vval);
*val = reg.vsx8val[vmx_offset];
return result;
@@ -1719,17 +1719,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu));
break;
case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu));
break;
#endif /* CONFIG_ALTIVEC */
default:
@@ -1770,21 +1770,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = -ENXIO;
break;
}
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval);
break;
case KVM_REG_PPC_VSCR:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_VRSAVE:
if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
r = -ENXIO;
break;
}
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val));
break;
#endif /* CONFIG_ALTIVEC */
default:
diff --git a/arch/powerpc/kvm/test-guest-state-buffer.c b/arch/powerpc/kvm/test-guest-state-buffer.c
new file mode 100644
index 000000000000..4720b8dc8837
--- /dev/null
+++ b/arch/powerpc/kvm/test-guest-state-buffer.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <kunit/test.h>
+
+#include <asm/guest-state-buffer.h>
+
+static void test_creating_buffer(struct kunit *test)
+{
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x100;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb->hdr);
+
+ KUNIT_EXPECT_EQ(test, gsb->capacity, roundup_pow_of_two(size));
+ KUNIT_EXPECT_EQ(test, gsb->len, sizeof(__be32));
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_adding_element(struct kunit *test)
+{
+ const struct kvmppc_gs_elem *head, *curr;
+ union {
+ __vector128 v;
+ u64 dw[2];
+ } u;
+ int rem;
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x1000;
+ int i, rc;
+ u64 data;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ /* Single elements, direct use of __kvmppc_gse_put() */
+ data = 0xdeadbeef;
+ rc = __kvmppc_gse_put(gsb, KVMPPC_GSID_GPR(0), 8, &data);
+ KUNIT_EXPECT_GE(test, rc, 0);
+
+ head = kvmppc_gsb_data(gsb);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(head), KVMPPC_GSID_GPR(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(head), 8);
+ data = 0;
+ memcpy(&data, kvmppc_gse_data(head), 8);
+ KUNIT_EXPECT_EQ(test, data, 0xdeadbeef);
+
+ /* Multiple elements, simple wrapper */
+ rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(1), 0xcafef00d);
+ KUNIT_EXPECT_GE(test, rc, 0);
+
+ u.dw[0] = 0x1;
+ u.dw[1] = 0x2;
+ rc = kvmppc_gse_put_vector128(gsb, KVMPPC_GSID_VSRS(0), &u.v);
+ KUNIT_EXPECT_GE(test, rc, 0);
+ u.dw[0] = 0x0;
+ u.dw[1] = 0x0;
+
+ kvmppc_gsb_for_each_elem(i, curr, gsb, rem) {
+ switch (i) {
+ case 0:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_GPR(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_get_be64(curr),
+ 0xdeadbeef);
+ break;
+ case 1:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_GPR(1));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 8);
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_get_u64(curr),
+ 0xcafef00d);
+ break;
+ case 2:
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_iden(curr),
+ KVMPPC_GSID_VSRS(0));
+ KUNIT_EXPECT_EQ(test, kvmppc_gse_len(curr), 16);
+ kvmppc_gse_get_vector128(curr, &u.v);
+ KUNIT_EXPECT_EQ(test, u.dw[0], 0x1);
+ KUNIT_EXPECT_EQ(test, u.dw[1], 0x2);
+ break;
+ }
+ }
+ KUNIT_EXPECT_EQ(test, i, 3);
+
+ kvmppc_gsb_reset(gsb);
+ KUNIT_EXPECT_EQ(test, kvmppc_gsb_nelems(gsb), 0);
+ KUNIT_EXPECT_EQ(test, kvmppc_gsb_len(gsb),
+ sizeof(struct kvmppc_gs_header));
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_gs_parsing(struct kunit *test)
+{
+ struct kvmppc_gs_elem *gse;
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_buff *gsb;
+ size_t size = 0x1000;
+ u64 tmp1, tmp2;
+
+ gsb = kvmppc_gsb_new(size, 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ tmp1 = 0xdeadbeefull;
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), tmp1);
+
+ KUNIT_EXPECT_GE(test, kvmppc_gse_parse(&gsp, gsb), 0);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gse);
+
+ tmp2 = kvmppc_gse_get_u64(gse);
+ KUNIT_EXPECT_EQ(test, tmp2, 0xdeadbeefull);
+
+ kvmppc_gsb_free(gsb);
+}
+
+static void test_gs_bitmap(struct kunit *test)
+{
+ struct kvmppc_gs_bitmap gsbm = { 0 };
+ struct kvmppc_gs_bitmap gsbm1 = { 0 };
+ struct kvmppc_gs_bitmap gsbm2 = { 0 };
+ u16 iden;
+ int i, j;
+
+ i = 0;
+ for (u16 iden = KVMPPC_GSID_HOST_STATE_SIZE;
+ iden <= KVMPPC_GSID_PROCESS_TABLE; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_RUN_INPUT; iden <= KVMPPC_GSID_VPA;
+ iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_GPR(0); iden <= KVMPPC_GSID_CTRL; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_CR; iden <= KVMPPC_GSID_PSPB; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_VSRS(0); iden <= KVMPPC_GSID_VSRS(63);
+ iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ for (u16 iden = KVMPPC_GSID_HDAR; iden <= KVMPPC_GSID_ASDR; iden++) {
+ kvmppc_gsbm_set(&gsbm, iden);
+ kvmppc_gsbm_set(&gsbm1, iden);
+ KUNIT_EXPECT_TRUE(test, kvmppc_gsbm_test(&gsbm, iden));
+ kvmppc_gsbm_clear(&gsbm, iden);
+ KUNIT_EXPECT_FALSE(test, kvmppc_gsbm_test(&gsbm, iden));
+ i++;
+ }
+
+ j = 0;
+ kvmppc_gsbm_for_each(&gsbm1, iden)
+ {
+ kvmppc_gsbm_set(&gsbm2, iden);
+ j++;
+ }
+ KUNIT_EXPECT_EQ(test, i, j);
+ KUNIT_EXPECT_MEMEQ(test, &gsbm1, &gsbm2, sizeof(gsbm1));
+}
+
+struct kvmppc_gs_msg_test1_data {
+ u64 a;
+ u32 b;
+ struct kvmppc_gs_part_table c;
+ struct kvmppc_gs_proc_table d;
+ struct kvmppc_gs_buff_info e;
+};
+
+static size_t test1_get_size(struct kvmppc_gs_msg *gsm)
+{
+ size_t size = 0;
+ u16 ids[] = {
+ KVMPPC_GSID_PARTITION_TABLE,
+ KVMPPC_GSID_PROCESS_TABLE,
+ KVMPPC_GSID_RUN_INPUT,
+ KVMPPC_GSID_GPR(0),
+ KVMPPC_GSID_CR,
+ };
+
+ for (int i = 0; i < ARRAY_SIZE(ids); i++)
+ size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
+ return size;
+}
+
+static int test1_fill_info(struct kvmppc_gs_buff *gsb,
+ struct kvmppc_gs_msg *gsm)
+{
+ struct kvmppc_gs_msg_test1_data *data = gsm->data;
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_GPR(0)))
+ kvmppc_gse_put_u64(gsb, KVMPPC_GSID_GPR(0), data->a);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_CR))
+ kvmppc_gse_put_u32(gsb, KVMPPC_GSID_CR, data->b);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PARTITION_TABLE))
+ kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
+ data->c);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_PROCESS_TABLE))
+ kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PARTITION_TABLE,
+ data->d);
+
+ if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT))
+ kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, data->e);
+
+ return 0;
+}
+
+static int test1_refresh_info(struct kvmppc_gs_msg *gsm,
+ struct kvmppc_gs_buff *gsb)
+{
+ struct kvmppc_gs_parser gsp = { 0 };
+ struct kvmppc_gs_msg_test1_data *data = gsm->data;
+ struct kvmppc_gs_elem *gse;
+ int rc;
+
+ rc = kvmppc_gse_parse(&gsp, gsb);
+ if (rc < 0)
+ return rc;
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_GPR(0));
+ if (gse)
+ data->a = kvmppc_gse_get_u64(gse);
+
+ gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_CR);
+ if (gse)
+ data->b = kvmppc_gse_get_u32(gse);
+
+ return 0;
+}
+
+static struct kvmppc_gs_msg_ops gs_msg_test1_ops = {
+ .get_size = test1_get_size,
+ .fill_info = test1_fill_info,
+ .refresh_info = test1_refresh_info,
+};
+
+static void test_gs_msg(struct kunit *test)
+{
+ struct kvmppc_gs_msg_test1_data test1_data = {
+ .a = 0xdeadbeef,
+ .b = 0x1,
+ };
+ struct kvmppc_gs_msg *gsm;
+ struct kvmppc_gs_buff *gsb;
+
+ gsm = kvmppc_gsm_new(&gs_msg_test1_ops, &test1_data, GSM_SEND,
+ GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsm);
+
+ gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), 0, 0, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, gsb);
+
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_PARTITION_TABLE);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_PROCESS_TABLE);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_RUN_INPUT);
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_GPR(0));
+ kvmppc_gsm_include(gsm, KVMPPC_GSID_CR);
+
+ kvmppc_gsm_fill_info(gsm, gsb);
+
+ memset(&test1_data, 0, sizeof(test1_data));
+
+ kvmppc_gsm_refresh_info(gsm, gsb);
+ KUNIT_EXPECT_EQ(test, test1_data.a, 0xdeadbeef);
+ KUNIT_EXPECT_EQ(test, test1_data.b, 0x1);
+
+ kvmppc_gsm_free(gsm);
+}
+
+static struct kunit_case guest_state_buffer_testcases[] = {
+ KUNIT_CASE(test_creating_buffer),
+ KUNIT_CASE(test_adding_element),
+ KUNIT_CASE(test_gs_bitmap),
+ KUNIT_CASE(test_gs_parsing),
+ KUNIT_CASE(test_gs_msg),
+ {}
+};
+
+static struct kunit_suite guest_state_buffer_test_suite = {
+ .name = "guest_state_buffer_test",
+ .test_cases = guest_state_buffer_testcases,
+};
+
+kunit_test_suites(&guest_state_buffer_test_suite);
+
+MODULE_LICENSE("GPL");