summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-10-13 12:00:26 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2018-10-13 12:00:26 +0200
commit3d0d0d9b1d805e39456a9d49443d847092cb21ab (patch)
tree6e99cda54e3db2f9ce039ac3d1269de2a81cc818
parent7dd2157cb61a38bee83e3bc4f9bc3311f7053b4b (diff)
parented3054a3025879c7d3f64de7a58b7f6427e0d3a0 (diff)
Merge tag 'kvm-s390-next-4.20-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390/vfio-ap: Fixes and enhancements for vfio-ap - add tracing - fix a locking bug - make local functions and data static
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/kvm/kvm-s390.c57
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c4
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c55
4 files changed, 70 insertions, 48 deletions
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index febd1709472a..d5d24889c3bc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -865,6 +865,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
void kvm_arch_crypto_clear_masks(struct kvm *kvm);
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm);
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index b7964d77c6cd..fe24150ff666 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -859,8 +859,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
mutex_lock(&kvm->lock);
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
- if (!test_kvm_facility(kvm, 76))
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
@@ -868,8 +870,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
- if (!test_kvm_facility(kvm, 76))
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
@@ -877,16 +881,20 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
- if (!test_kvm_facility(kvm, 76))
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
return -EINVAL;
+ }
kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
- if (!test_kvm_facility(kvm, 76))
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
return -EINVAL;
+ }
kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
@@ -2056,6 +2064,46 @@ static void kvm_s390_set_crycb_format(struct kvm *kvm)
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm)
+{
+ struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
+
+ mutex_lock(&kvm->lock);
+ kvm_s390_vcpu_block_all(kvm);
+
+ switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
+ case CRYCB_FORMAT2: /* APCB1 use 256 bits */
+ memcpy(crycb->apcb1.apm, apm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
+ apm[0], apm[1], apm[2], apm[3]);
+ memcpy(crycb->apcb1.aqm, aqm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
+ aqm[0], aqm[1], aqm[2], aqm[3]);
+ memcpy(crycb->apcb1.adm, adm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
+ adm[0], adm[1], adm[2], adm[3]);
+ break;
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0: /* Fall through both use APCB0 */
+ memcpy(crycb->apcb0.apm, apm, 8);
+ memcpy(crycb->apcb0.aqm, aqm, 2);
+ memcpy(crycb->apcb0.adm, adm, 2);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
+ apm[0], *((unsigned short *)aqm),
+ *((unsigned short *)adm));
+ break;
+ default: /* Can not happen */
+ break;
+ }
+
+ /* recreate the shadow crycb for each vcpu */
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
+ kvm_s390_vcpu_unblock_all(kvm);
+ mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
+
void kvm_arch_crypto_clear_masks(struct kvm *kvm)
{
mutex_lock(&kvm->lock);
@@ -2066,6 +2114,7 @@ void kvm_arch_crypto_clear_masks(struct kvm *kvm)
memset(&kvm->arch.crypto.crycb->apcb1, 0,
sizeof(kvm->arch.crypto.crycb->apcb1));
+ VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
/* recreate the shadow crycb for each vcpu */
kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
kvm_s390_vcpu_unblock_all(kvm);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 8b51821d9bf7..7667b38728f0 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -112,7 +112,7 @@ static void vfio_ap_matrix_dev_destroy(void)
root_device_unregister(matrix_dev->device.parent);
}
-int __init vfio_ap_init(void)
+static int __init vfio_ap_init(void)
{
int ret;
@@ -146,7 +146,7 @@ int __init vfio_ap_init(void)
return 0;
}
-void __exit vfio_ap_exit(void)
+static void __exit vfio_ap_exit(void)
{
vfio_ap_mdev_unregister();
ap_driver_unregister(&vfio_ap_drv);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index d3d9eb72b0f1..272ef427dcc0 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -77,7 +77,7 @@ static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
}
-MDEV_TYPE_ATTR_RO(name);
+static MDEV_TYPE_ATTR_RO(name);
static ssize_t available_instances_show(struct kobject *kobj,
struct device *dev, char *buf)
@@ -86,7 +86,7 @@ static ssize_t available_instances_show(struct kobject *kobj,
atomic_read(&matrix_dev->available_instances));
}
-MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(available_instances);
static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
char *buf)
@@ -94,7 +94,7 @@ static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
}
-MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(device_api);
static struct attribute *vfio_ap_mdev_type_attrs[] = {
&mdev_type_attr_name.attr,
@@ -395,7 +395,7 @@ static ssize_t unassign_adapter_store(struct device *dev,
return count;
}
-DEVICE_ATTR_WO(unassign_adapter);
+static DEVICE_ATTR_WO(unassign_adapter);
static int
vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
@@ -491,7 +491,7 @@ done:
return ret;
}
-DEVICE_ATTR_WO(assign_domain);
+static DEVICE_ATTR_WO(assign_domain);
/**
@@ -537,7 +537,7 @@ static ssize_t unassign_domain_store(struct device *dev,
return count;
}
-DEVICE_ATTR_WO(unassign_domain);
+static DEVICE_ATTR_WO(unassign_domain);
/**
* assign_control_domain_store
@@ -586,7 +586,7 @@ static ssize_t assign_control_domain_store(struct device *dev,
return count;
}
-DEVICE_ATTR_WO(assign_control_domain);
+static DEVICE_ATTR_WO(assign_control_domain);
/**
* unassign_control_domain_store
@@ -630,7 +630,7 @@ static ssize_t unassign_control_domain_store(struct device *dev,
return count;
}
-DEVICE_ATTR_WO(unassign_control_domain);
+static DEVICE_ATTR_WO(unassign_control_domain);
static ssize_t control_domains_show(struct device *dev,
struct device_attribute *dev_attr,
@@ -654,7 +654,7 @@ static ssize_t control_domains_show(struct device *dev,
return nchars;
}
-DEVICE_ATTR_RO(control_domains);
+static DEVICE_ATTR_RO(control_domains);
static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -704,7 +704,7 @@ static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
return nchars;
}
-DEVICE_ATTR_RO(matrix);
+static DEVICE_ATTR_RO(matrix);
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
@@ -727,37 +727,6 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
NULL
};
-static void vfio_ap_mdev_copy_masks(struct ap_matrix_mdev *matrix_mdev)
-{
- int nbytes;
- unsigned long *apm, *aqm, *adm;
- struct kvm_s390_crypto_cb *crycb = matrix_mdev->kvm->arch.crypto.crycb;
-
- switch (matrix_mdev->kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
- case CRYCB_FORMAT2:
- apm = (unsigned long *)crycb->apcb1.apm;
- aqm = (unsigned long *)crycb->apcb1.aqm;
- adm = (unsigned long *)crycb->apcb1.adm;
- break;
- case CRYCB_FORMAT1:
- case CRYCB_FORMAT0:
- apm = (unsigned long *)crycb->apcb0.apm;
- aqm = (unsigned long *)crycb->apcb0.aqm;
- adm = (unsigned long *)crycb->apcb0.adm;
- break;
- default:
- /* cannot happen */
- return;
- }
-
- nbytes = DIV_ROUND_UP(matrix_mdev->matrix.apm_max + 1, BITS_PER_BYTE);
- memcpy(apm, matrix_mdev->matrix.apm, nbytes);
- nbytes = DIV_ROUND_UP(matrix_mdev->matrix.aqm_max + 1, BITS_PER_BYTE);
- memcpy(aqm, matrix_mdev->matrix.aqm, nbytes);
- nbytes = DIV_ROUND_UP(matrix_mdev->matrix.adm_max + 1, BITS_PER_BYTE);
- memcpy(adm, matrix_mdev->matrix.adm, nbytes);
-}
-
/**
* vfio_ap_mdev_set_kvm
*
@@ -814,7 +783,9 @@ static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
if (!matrix_mdev->kvm->arch.crypto.crycbd)
return NOTIFY_DONE;
- vfio_ap_mdev_copy_masks(matrix_mdev);
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.adm);
return NOTIFY_OK;
}