summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 17:57:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-25 17:57:35 -0700
commit0d1e8b8d2bcd3150d51754d8d0fdbf44dc88b0d3 (patch)
tree2794cb2347daa76b00160a6ffb68663f4138dcc7 /arch/s390
parent83c4087ce468601501ecde4d0ec5b2abd5f57c31 (diff)
parent22a7cdcae6a4a3c8974899e62851d270956f58ce (diff)
Merge tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Radim Krčmář: "ARM: - Improved guest IPA space support (32 to 52 bits) - RAS event delivery for 32bit - PMU fixes - Guest entry hardening - Various cleanups - Port of dirty_log_test selftest PPC: - Nested HV KVM support for radix guests on POWER9. The performance is much better than with PR KVM. Migration and arbitrary level of nesting is supported. - Disable nested HV-KVM on early POWER9 chips that need a particular hardware bug workaround - One VM per core mode to prevent potential data leaks - PCI pass-through optimization - merge ppc-kvm topic branch and kvm-ppc-fixes to get a better base s390: - Initial version of AP crypto virtualization via vfio-mdev - Improvement for vfio-ap - Set the host program identifier - Optimize page table locking x86: - Enable nested virtualization by default - Implement Hyper-V IPI hypercalls - Improve #PF and #DB handling - Allow guests to use Enlightened VMCS - Add migration selftests for VMCS and Enlightened VMCS - Allow coalesced PIO accesses - Add an option to perform nested VMCS host state consistency check through hardware - Automatic tuning of lapic_timer_advance_ns - Many fixes, minor improvements, and cleanups" * tag 'kvm-4.20-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (204 commits) KVM/nVMX: Do not validate that posted_intr_desc_addr is page aligned Revert "kvm: x86: optimize dr6 restore" KVM: PPC: Optimize clearing TCEs for sparse tables x86/kvm/nVMX: tweak shadow fields selftests/kvm: add missing executables to .gitignore KVM: arm64: Safety check PSTATE when entering guest and handle IL KVM: PPC: Book3S HV: Don't use streamlined entry path on early POWER9 chips arm/arm64: KVM: Enable 32 bits kvm vcpu events support arm/arm64: KVM: Rename function kvm_arch_dev_ioctl_check_extension() KVM: arm64: Fix caching of host MDCR_EL2 value KVM: VMX: enable nested virtualization by default KVM/x86: Use 32bit xor to clear registers in svm.c kvm: x86: Introduce KVM_CAP_EXCEPTION_PAYLOAD kvm: vmx: Defer setting of DR6 until #DB delivery kvm: x86: Defer setting of CR2 until #PF delivery kvm: x86: Add payload operands to kvm_multiple_exception kvm: x86: Add exception payload fields to kvm_vcpu_events kvm: x86: Add has_payload and payload to kvm_queued_exception KVM: Documentation: Fix omission in struct kvm_vcpu_events KVM: selftests: add Enlightened VMCS test ...
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/include/asm/kvm_host.h15
-rw-r--r--arch/s390/include/uapi/asm/kvm.h2
-rw-r--r--arch/s390/kvm/kvm-s390.c184
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/vsie.c210
-rw-r--r--arch/s390/mm/gmap.c10
-rw-r--r--arch/s390/tools/gen_facilities.c2
8 files changed, 372 insertions, 63 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 039a3417dfc4..8b25e1f45b27 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -783,6 +783,17 @@ config VFIO_CCW
To compile this driver as a module, choose M here: the
module will be called vfio_ccw.
+config VFIO_AP
+ def_tristate n
+ prompt "VFIO support for AP devices"
+ depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM
+ help
+ This driver grants access to Adjunct Processor (AP) devices
+ via the VFIO mediated device interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vfio_ap.
+
endmenu
menu "Dump support"
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 29c940bf8506..d5d24889c3bc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
+#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5)
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
@@ -186,6 +187,7 @@ struct kvm_s390_sie_block {
#define ECA_AIV 0x00200000
#define ECA_VX 0x00020000
#define ECA_PROTEXCI 0x00002000
+#define ECA_APIE 0x00000008
#define ECA_SII 0x00000001
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
@@ -237,7 +239,11 @@ struct kvm_s390_sie_block {
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
- __u8 reservedb0[20]; /* 0x00b0 */
+ __u8 reservedb0[8]; /* 0x00b0 */
+#define HPID_KVM 0x4
+#define HPID_VSIE 0x5
+ __u8 hpid; /* 0x00b8 */
+ __u8 reservedb9[11]; /* 0x00b9 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
__u32 reservedc8; /* 0x00c8 */
@@ -255,6 +261,8 @@ struct kvm_s390_sie_block {
__u8 reservede4[4]; /* 0x00e4 */
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */
+#define CRYCB_FORMAT_MASK 0x00000003
+#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
@@ -715,6 +723,7 @@ struct kvm_s390_crypto {
__u32 crycbd;
__u8 aes_kw;
__u8 dea_kw;
+ __u8 apie;
};
#define APCB0_MASK_SIZE 1
@@ -855,6 +864,10 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+void kvm_arch_crypto_clear_masks(struct kvm *kvm);
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm);
+
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9a50f02b9894..16511d97e8dc 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc {
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
+#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4
+#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5
/* kvm attributes for migration mode */
#define KVM_S390_VM_MIGRATION_STOP 0
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ac5da6b0b862..fe24150ff666 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -40,6 +40,7 @@
#include <asm/sclp.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
+#include <asm/ap.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -844,20 +845,24 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
kvm_s390_vcpu_block_all(kvm);
- kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_crypto_setup(vcpu);
+ /* recreate the shadow crycb by leaving the VSIE handler */
+ kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
+ }
kvm_s390_vcpu_unblock_all(kvm);
}
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
- if (!test_kvm_facility(kvm, 76))
- return -EINVAL;
-
mutex_lock(&kvm->lock);
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
@@ -865,6 +870,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
@@ -872,17 +881,39 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
break;
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE:
+ if (!ap_instructions_available()) {
+ mutex_unlock(&kvm->lock);
+ return -EOPNOTSUPP;
+ }
+ kvm->arch.crypto.apie = 1;
+ break;
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE:
+ if (!ap_instructions_available()) {
+ mutex_unlock(&kvm->lock);
+ return -EOPNOTSUPP;
+ }
+ kvm->arch.crypto.apie = 0;
+ break;
default:
mutex_unlock(&kvm->lock);
return -ENXIO;
@@ -1491,6 +1522,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0;
break;
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE:
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE:
+ ret = ap_instructions_available() ? 0 : -ENXIO;
+ break;
default:
ret = -ENXIO;
break;
@@ -1992,55 +2027,101 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r;
}
-static int kvm_s390_query_ap_config(u8 *config)
-{
- u32 fcn_code = 0x04000000UL;
- u32 cc = 0;
-
- memset(config, 0, 128);
- asm volatile(
- "lgr 0,%1\n"
- "lgr 2,%2\n"
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: ipm %0\n"
- "srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+r" (cc)
- : "r" (fcn_code), "r" (config)
- : "cc", "0", "2", "memory"
- );
-
- return cc;
-}
-
static int kvm_s390_apxa_installed(void)
{
- u8 config[128];
- int cc;
+ struct ap_config_info info;
- if (test_facility(12)) {
- cc = kvm_s390_query_ap_config(config);
-
- if (cc)
- pr_err("PQAP(QCI) failed with cc=%d", cc);
- else
- return config[0] & 0x40;
+ if (ap_instructions_available()) {
+ if (ap_qci(&info) == 0)
+ return info.apxa;
}
return 0;
}
+/*
+ * The format of the crypto control block (CRYCB) is specified in the 3 low
+ * order bits of the CRYCB designation (CRYCBD) field as follows:
+ * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
+ * AP extended addressing (APXA) facility are installed.
+ * Format 1: The APXA facility is not installed but the MSAX3 facility is.
+ * Format 2: Both the APXA and MSAX3 facilities are installed
+ */
static void kvm_s390_set_crycb_format(struct kvm *kvm)
{
kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
+ /* Clear the CRYCB format bits - i.e., set format 0 by default */
+ kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
+
+ /* Check whether MSAX3 is installed */
+ if (!test_kvm_facility(kvm, 76))
+ return;
+
if (kvm_s390_apxa_installed())
kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
else
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm)
+{
+ struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
+
+ mutex_lock(&kvm->lock);
+ kvm_s390_vcpu_block_all(kvm);
+
+ switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
+ case CRYCB_FORMAT2: /* APCB1 use 256 bits */
+ memcpy(crycb->apcb1.apm, apm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
+ apm[0], apm[1], apm[2], apm[3]);
+ memcpy(crycb->apcb1.aqm, aqm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
+ aqm[0], aqm[1], aqm[2], aqm[3]);
+ memcpy(crycb->apcb1.adm, adm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
+ adm[0], adm[1], adm[2], adm[3]);
+ break;
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0: /* Fall through both use APCB0 */
+ memcpy(crycb->apcb0.apm, apm, 8);
+ memcpy(crycb->apcb0.aqm, aqm, 2);
+ memcpy(crycb->apcb0.adm, adm, 2);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
+ apm[0], *((unsigned short *)aqm),
+ *((unsigned short *)adm));
+ break;
+ default: /* Can not happen */
+ break;
+ }
+
+ /* recreate the shadow crycb for each vcpu */
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
+ kvm_s390_vcpu_unblock_all(kvm);
+ mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
+
+void kvm_arch_crypto_clear_masks(struct kvm *kvm)
+{
+ mutex_lock(&kvm->lock);
+ kvm_s390_vcpu_block_all(kvm);
+
+ memset(&kvm->arch.crypto.crycb->apcb0, 0,
+ sizeof(kvm->arch.crypto.crycb->apcb0));
+ memset(&kvm->arch.crypto.crycb->apcb1, 0,
+ sizeof(kvm->arch.crypto.crycb->apcb1));
+
+ VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
+ /* recreate the shadow crycb for each vcpu */
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
+ kvm_s390_vcpu_unblock_all(kvm);
+ mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
+
static u64 kvm_s390_get_initial_cpuid(void)
{
struct cpuid cpuid;
@@ -2052,12 +2133,12 @@ static u64 kvm_s390_get_initial_cpuid(void)
static void kvm_s390_crypto_init(struct kvm *kvm)
{
- if (!test_kvm_facility(kvm, 76))
- return;
-
kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
kvm_s390_set_crycb_format(kvm);
+ if (!test_kvm_facility(kvm, 76))
+ return;
+
/* Enable AES/DEA protected key functions by default */
kvm->arch.crypto.aes_kw = 1;
kvm->arch.crypto.dea_kw = 1;
@@ -2583,17 +2664,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
- if (!test_kvm_facility(vcpu->kvm, 76))
+ /*
+ * If the AP instructions are not being interpreted and the MSAX3
+ * facility is not configured for the guest, there is nothing to set up.
+ */
+ if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
return;
+ vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
+ vcpu->arch.sie_block->eca &= ~ECA_APIE;
+
+ if (vcpu->kvm->arch.crypto.apie)
+ vcpu->arch.sie_block->eca |= ECA_APIE;
+ /* Set up protected key support */
if (vcpu->kvm->arch.crypto.aes_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
-
- vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
@@ -2685,6 +2774,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
+ vcpu->arch.sie_block->hpid = HPID_KVM;
+
kvm_s390_vcpu_crypto_setup(vcpu);
return rc;
@@ -2768,18 +2859,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
exit_sie(vcpu);
}
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
+{
+ return atomic_read(&vcpu->arch.sie_block->prog20) &
+ (PROG_BLOCK_SIE | PROG_REQUEST);
+}
+
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/*
- * Kick a guest cpu out of SIE and wait until SIE is not running.
+ * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
+ kvm_s390_vsie_kick(vcpu);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
@@ -3196,6 +3294,8 @@ retry:
/* nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ /* we left the vsie handler, nothing to do, just clear the request */
+ kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 981e3ba97461..1f6e36cdce0d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu);
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a2b28cd1e3fe..a153257bf7d9 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -135,14 +135,148 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
atomic_set(&scb_s->cpuflags, newflags);
return 0;
}
+/* Copy to APCB FORMAT1 from APCB FORMAT0 */
+static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
+ unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
+{
+ struct kvm_s390_apcb0 tmp;
-/*
+ if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
+ return -EFAULT;
+
+ apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
+ apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
+ apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
+
+ return 0;
+
+}
+
+/**
+ * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
+ * @vcpu: pointer to the virtual CPU
+ * @apcb_s: pointer to start of apcb in the shadow crycb
+ * @apcb_o: pointer to start of original apcb in the guest2
+ * @apcb_h: pointer to start of apcb in the guest1
+ *
+ * Returns 0 and -EFAULT on error reading guest apcb
+ */
+static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ unsigned long apcb_o, unsigned long *apcb_h)
+{
+ if (read_guest_real(vcpu, apcb_o, apcb_s,
+ sizeof(struct kvm_s390_apcb0)))
+ return -EFAULT;
+
+ bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
+
+ return 0;
+}
+
+/**
+ * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
+ * @vcpu: pointer to the virtual CPU
+ * @apcb_s: pointer to start of apcb in the shadow crycb
+ * @apcb_o: pointer to start of original guest apcb
+ * @apcb_h: pointer to start of apcb in the host
+ *
+ * Returns 0 and -EFAULT on error reading guest apcb
+ */
+static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ unsigned long apcb_o,
+ unsigned long *apcb_h)
+{
+ if (read_guest_real(vcpu, apcb_o, apcb_s,
+ sizeof(struct kvm_s390_apcb1)))
+ return -EFAULT;
+
+ bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
+
+ return 0;
+}
+
+/**
+ * setup_apcb - Create a shadow copy of the apcb.
+ * @vcpu: pointer to the virtual CPU
+ * @crycb_s: pointer to shadow crycb
+ * @crycb_o: pointer to original guest crycb
+ * @crycb_h: pointer to the host crycb
+ * @fmt_o: format of the original guest crycb.
+ * @fmt_h: format of the host crycb.
+ *
+ * Checks the compatibility between the guest and host crycb and calls the
+ * appropriate copy function.
+ *
+ * Return 0 or an error number if the guest and host crycb are incompatible.
+ */
+static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
+ const u32 crycb_o,
+ struct kvm_s390_crypto_cb *crycb_h,
+ int fmt_o, int fmt_h)
+{
+ struct kvm_s390_crypto_cb *crycb;
+
+ crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
+
+ switch (fmt_o) {
+ case CRYCB_FORMAT2:
+ if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
+ return -EACCES;
+ if (fmt_h != CRYCB_FORMAT2)
+ return -EINVAL;
+ return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
+ (unsigned long) &crycb->apcb1,
+ (unsigned long *)&crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ switch (fmt_h) {
+ case CRYCB_FORMAT2:
+ return setup_apcb10(vcpu, &crycb_s->apcb1,
+ (unsigned long) &crycb->apcb0,
+ &crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ return setup_apcb00(vcpu,
+ (unsigned long *) &crycb_s->apcb0,
+ (unsigned long) &crycb->apcb0,
+ (unsigned long *) &crycb_h->apcb0);
+ }
+ break;
+ case CRYCB_FORMAT0:
+ if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
+ return -EACCES;
+
+ switch (fmt_h) {
+ case CRYCB_FORMAT2:
+ return setup_apcb10(vcpu, &crycb_s->apcb1,
+ (unsigned long) &crycb->apcb0,
+ &crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0:
+ return setup_apcb00(vcpu,
+ (unsigned long *) &crycb_s->apcb0,
+ (unsigned long) &crycb->apcb0,
+ (unsigned long *) &crycb_h->apcb0);
+ }
+ }
+ return -EINVAL;
+}
+
+/**
+ * shadow_crycb - Create a shadow copy of the crycb block
+ * @vcpu: a pointer to the virtual CPU
+ * @vsie_page: a pointer to internal date used for the vSIE
+ *
* Create a shadow copy of the crycb block and setup key wrapping, if
* requested for guest 3 and enabled for guest 2.
*
- * We only accept format-1 (no AP in g2), but convert it into format-2
+ * We accept format-1 or format-2, but we convert format-1 into format-2
+ * in the shadow CRYCB.
+ * Using format-2 enables the firmware to choose the right format when
+ * scheduling the SIE.
* There is nothing to do for format-0.
*
+ * This function centralize the issuing of set_validity_icpt() for all
+ * the subfunctions working on the crycb.
+ *
* Returns: - 0 if shadowed or nothing to do
* - > 0 if control has to be given to guest 2
*/
@@ -154,23 +288,40 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2;
u8 ecb3_flags;
+ int apie_h;
+ int key_msk = test_kvm_facility(vcpu->kvm, 76);
+ int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
+ int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
+ int ret = 0;
scb_s->crycbd = 0;
- if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
- return 0;
- /* format-1 is supported with message-security-assist extension 3 */
- if (!test_kvm_facility(vcpu->kvm, 76))
+
+ apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
+ if (!apie_h && !key_msk)
return 0;
+
+ if (!crycb_addr)
+ return set_validity_icpt(scb_s, 0x0039U);
+
+ if (fmt_o == CRYCB_FORMAT1)
+ if ((crycb_addr & PAGE_MASK) !=
+ ((crycb_addr + 128) & PAGE_MASK))
+ return set_validity_icpt(scb_s, 0x003CU);
+
+ if (apie_h && (scb_o->eca & ECA_APIE)) {
+ ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
+ vcpu->kvm->arch.crypto.crycb,
+ fmt_o, fmt_h);
+ if (ret)
+ goto end;
+ scb_s->eca |= scb_o->eca & ECA_APIE;
+ }
+
/* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA);
if (!ecb3_flags)
- return 0;
-
- if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK))
- return set_validity_icpt(scb_s, 0x003CU);
- else if (!crycb_addr)
- return set_validity_icpt(scb_s, 0x0039U);
+ goto end;
/* copy only the wrapping keys */
if (read_guest_real(vcpu, crycb_addr + 72,
@@ -178,8 +329,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
- scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 |
- CRYCB_FORMAT2;
/* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
@@ -187,6 +336,16 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
/* as 56%8 == 0, bitmap_xor won't overwrite any data */
bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
+end:
+ switch (ret) {
+ case -EINVAL:
+ return set_validity_icpt(scb_s, 0x0020U);
+ case -EFAULT:
+ return set_validity_icpt(scb_s, 0x0035U);
+ case -EACCES:
+ return set_validity_icpt(scb_s, 0x003CU);
+ }
+ scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
return 0;
}
@@ -383,6 +542,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_facility(vcpu->kvm, 156))
scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
+ scb_s->hpid = HPID_VSIE;
+
prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page);
out:
@@ -830,7 +991,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int guest_bp_isolation;
- int rc;
+ int rc = 0;
handle_last_fault(vcpu, vsie_page);
@@ -858,7 +1019,18 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
guest_enter_irqoff();
local_irq_enable();
- rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
+ /*
+ * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
+ * and VCPU requests also hinder the vSIE from running and lead
+ * to an immediate exit. kvm_s390_vsie_kick() has to be used to
+ * also kick the vSIE.
+ */
+ vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
+ barrier();
+ if (!kvm_s390_vcpu_sie_inhibited(vcpu))
+ rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
+ barrier();
+ vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
local_irq_disable();
guest_exit_irqoff();
@@ -1005,7 +1177,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (rc == -EAGAIN)
rc = 0;
if (rc || scb_s->icptcode || signal_pending(current) ||
- kvm_s390_vcpu_has_irq(vcpu, 0))
+ kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
break;
}
@@ -1122,7 +1295,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
if (unlikely(scb_addr & 0x1ffUL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
+ if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
return 0;
vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 911c7ded35f1..1e668b95e0c6 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -907,10 +907,16 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
pmd_t *pmdp;
BUG_ON(gmap_is_shadow(gmap));
- spin_lock(&gmap->guest_table_lock);
pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+ if (!pmdp)
+ return NULL;
- if (!pmdp || pmd_none(*pmdp)) {
+ /* without huge pages, there is no need to take the table lock */
+ if (!gmap->mm->context.allow_gmap_hpage_1m)
+ return pmd_none(*pmdp) ? NULL : pmdp;
+
+ spin_lock(&gmap->guest_table_lock);
+ if (pmd_none(*pmdp)) {
spin_unlock(&gmap->guest_table_lock);
return NULL;
}
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 0c85aedcf9b3..fd788e0f2e5b 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -106,6 +106,8 @@ static struct facility_def facility_defs[] = {
.name = "FACILITIES_KVM_CPUMODEL",
.bits = (int[]){
+ 12, /* AP Query Configuration Information */
+ 15, /* AP Facilities Test */
156, /* etoken facility */
-1 /* END */
}