summaryrefslogtreecommitdiff
path: root/drivers/s390/crypto/vfio_ap_ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/crypto/vfio_ap_ops.c')
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c233
1 files changed, 151 insertions, 82 deletions
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 9f76f2d7b66e..48da32ad0493 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -354,16 +354,32 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
if (!*nib)
return -EINVAL;
- if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
+ if (!kvm_s390_is_gpa_in_memslot(vcpu->kvm, *nib))
return -EINVAL;
return 0;
}
-static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
+/**
+ * ensure_nib_shared() - Ensure the address of the NIB is secure and shared
+ * @addr: the physical (absolute) address of the NIB
+ *
+ * This function checks whether the NIB page, which has been pinned with
+ * vfio_pin_pages(), is a shared page belonging to a secure guest.
+ *
+ * It will call uv_pin_shared() on it; if the page was already pinned shared
+ * (i.e. if the NIB belongs to a secure guest and is shared), then 0
+ * (success) is returned. If the NIB was not shared, vfio_pin_pages() had
+ * exported it and now it does not belong to the secure guest anymore. In
+ * that case, an error is returned.
+ *
+ * Context: the NIB (at physical address @addr) has to be pinned with
+ * vfio_pin_pages() before calling this function.
+ *
+ * Return: 0 in case of success, otherwise an error < 0.
+ */
+static int ensure_nib_shared(unsigned long addr)
{
- int ret;
-
/*
* The nib has to be located in shared storage since guest and
* host access it. vfio_pin_pages() will do a pin shared and
@@ -374,12 +390,7 @@ static int ensure_nib_shared(unsigned long addr, struct gmap *gmap)
*
* If the page is already pinned shared the UV will return a success.
*/
- ret = uv_pin_shared(addr);
- if (ret) {
- /* vfio_pin_pages() likely exported the page so let's re-import */
- gmap_convert_to_secure(gmap, addr);
- }
- return ret;
+ return uv_pin_shared(addr);
}
/**
@@ -425,6 +436,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
return status;
}
+ /* The pin will probably be successful even if the NIB was not shared */
ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
IOMMU_READ | IOMMU_WRITE, &h_page);
switch (ret) {
@@ -447,7 +459,7 @@ static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
/* NIB in non-shared storage is a rc 6 for PV guests */
if (kvm_s390_pv_cpu_is_protected(vcpu) &&
- ensure_nib_shared(h_nib & PAGE_MASK, kvm->arch.gmap)) {
+ ensure_nib_shared(h_nib & PAGE_MASK)) {
vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
status.response_code = AP_RESPONSE_INVALID_ADDRESS;
return status;
@@ -638,13 +650,22 @@ static void vfio_ap_matrix_init(struct ap_config_info *info,
matrix->adm_max = info->apxa ? info->nd : 15;
}
+static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_signal(matrix_mdev->cfg_chg_trigger);
+}
+
static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
{
- if (matrix_mdev->kvm)
+ if (matrix_mdev->kvm) {
kvm_arch_crypto_set_masks(matrix_mdev->kvm,
matrix_mdev->shadow_apcb.apm,
matrix_mdev->shadow_apcb.aqm,
matrix_mdev->shadow_apcb.adm);
+
+ signal_guest_ap_cfg_changed(matrix_mdev);
+ }
}
static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
@@ -780,6 +801,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
if (ret)
goto err_put_vdev;
matrix_mdev->req_trigger = NULL;
+ matrix_mdev->cfg_chg_trigger = NULL;
dev_set_drvdata(&mdev->dev, matrix_mdev);
mutex_lock(&matrix_dev->mdevs_lock);
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
@@ -851,48 +873,66 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
vfio_put_device(&matrix_mdev->vdev);
}
-#define MDEV_SHARING_ERR "Userspace may not re-assign queue %02lx.%04lx " \
- "already assigned to %s"
+#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s"
+
+#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev"
+
+static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee,
+ struct ap_matrix_mdev *assigned_to,
+ unsigned long *apm, unsigned long *aqm)
+{
+ unsigned long apid, apqi;
-static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *matrix_mdev,
- unsigned long *apm,
- unsigned long *aqm)
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR,
+ apid, apqi, dev_name(mdev_dev(assigned_to->mdev)));
+ }
+ }
+}
+
+static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee,
+ unsigned long *apm, unsigned long *aqm)
{
unsigned long apid, apqi;
- const struct device *dev = mdev_dev(matrix_mdev->mdev);
- const char *mdev_name = dev_name(dev);
- for_each_set_bit_inv(apid, apm, AP_DEVICES)
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
- dev_warn(dev, MDEV_SHARING_ERR, apid, apqi, mdev_name);
+ dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi);
+ }
}
/**
* vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
+ * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being
+ * assigned; or, NULL if this function was called by the AP bus
+ * driver in_use callback to verify none of the APQNs being reserved
+ * for the host device driver are in use by a vfio_ap mediated device
* @mdev_apm: mask indicating the APIDs of the APQNs to be verified
* @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
- * Verifies that each APQN derived from the Cartesian product of a bitmap of
- * AP adapter IDs and AP queue indexes is not configured for any matrix
- * mediated device. AP queue sharing is not allowed.
+ * Verifies that each APQN derived from the Cartesian product of APIDs
+ * represented by the bits set in @mdev_apm and the APQIs of the bits set in
+ * @mdev_aqm is not assigned to a mediated device other than the mdev to which
+ * the APQN is being assigned (@assignee). AP queue sharing is not allowed.
*
* Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
-static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
+static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee,
+ unsigned long *mdev_apm,
unsigned long *mdev_aqm)
{
- struct ap_matrix_mdev *matrix_mdev;
+ struct ap_matrix_mdev *assigned_to;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) {
/*
- * If the input apm and aqm are fields of the matrix_mdev
- * object, then move on to the next matrix_mdev.
+ * If the mdev to which the mdev_apm and mdev_aqm is being
+ * assigned is the same as the mdev being verified
*/
- if (mdev_apm == matrix_mdev->matrix.apm &&
- mdev_aqm == matrix_mdev->matrix.aqm)
+ if (assignee == assigned_to)
continue;
memset(apm, 0, sizeof(apm));
@@ -902,15 +942,16 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
- if (!bitmap_and(apm, mdev_apm, matrix_mdev->matrix.apm,
- AP_DEVICES))
+ if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES))
continue;
- if (!bitmap_and(aqm, mdev_aqm, matrix_mdev->matrix.aqm,
- AP_DOMAINS))
+ if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS))
continue;
- vfio_ap_mdev_log_sharing_err(matrix_mdev, apm, aqm);
+ if (assignee)
+ vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm);
+ else
+ vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm);
return -EADDRINUSE;
}
@@ -927,7 +968,7 @@ static int vfio_ap_mdev_verify_no_sharing(unsigned long *mdev_apm,
*
* Return: One of the following values:
* o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
- * most likely -EBUSY indicating the ap_perms_mutex lock is already held.
+ * most likely -EBUSY indicating the ap_attr_mutex lock is already held.
* o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
* zcrypt default driver.
* o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
@@ -939,7 +980,8 @@ static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
matrix_mdev->matrix.aqm))
return -EADDRNOTAVAIL;
- return vfio_ap_mdev_verify_no_sharing(matrix_mdev->matrix.apm,
+ return vfio_ap_mdev_verify_no_sharing(matrix_mdev,
+ matrix_mdev->matrix.apm,
matrix_mdev->matrix.aqm);
}
@@ -1037,7 +1079,7 @@ static ssize_t assign_adapter_store(struct device *dev,
DECLARE_BITMAP(apm_filtered, AP_DEVICES);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&ap_perms_mutex);
+ mutex_lock(&ap_attr_mutex);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
@@ -1072,7 +1114,7 @@ static ssize_t assign_adapter_store(struct device *dev,
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return ret;
}
@@ -1261,7 +1303,7 @@ static ssize_t assign_domain_store(struct device *dev,
DECLARE_BITMAP(apm_filtered, AP_DEVICES);
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- mutex_lock(&ap_perms_mutex);
+ mutex_lock(&ap_attr_mutex);
get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
@@ -1296,7 +1338,7 @@ static ssize_t assign_domain_store(struct device *dev,
ret = count;
done:
release_update_locks_for_mdev(matrix_mdev);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return ret;
}
@@ -1521,18 +1563,13 @@ static ssize_t control_domains_show(struct device *dev,
char *buf)
{
unsigned long id;
- int nchars = 0;
- int n;
- char *bufpos = buf;
struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
+ int nchars = 0;
mutex_lock(&matrix_dev->mdevs_lock);
- for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
- n = sprintf(bufpos, "%04lx\n", id);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1)
+ nchars += sysfs_emit_at(buf, nchars, "%04lx\n", id);
mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
@@ -1541,7 +1578,6 @@ static DEVICE_ATTR_RO(control_domains);
static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
- char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
@@ -1549,33 +1585,21 @@ static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
unsigned long napm_bits = matrix->apm_max + 1;
unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
- int n;
apid1 = find_first_bit_inv(matrix->apm, napm_bits);
apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
- for_each_set_bit_inv(apqi, matrix->aqm,
- naqm_bits) {
- n = sprintf(bufpos, "%02lx.%04lx\n", apid,
- apqi);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+ nchars += sysfs_emit_at(buf, nchars, "%02lx.%04lx\n", apid, apqi);
}
} else if (apid1 < napm_bits) {
- for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
- n = sprintf(bufpos, "%02lx.\n", apid);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits)
+ nchars += sysfs_emit_at(buf, nchars, "%02lx.\n", apid);
} else if (apqi1 < naqm_bits) {
- for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits) {
- n = sprintf(bufpos, ".%04lx\n", apqi);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+ nchars += sysfs_emit_at(buf, nchars, ".%04lx\n", apqi);
}
return nchars;
@@ -1694,7 +1718,7 @@ static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr
return -ENOMEM;
rest = newbuf;
- mutex_lock(&ap_perms_mutex);
+ mutex_lock(&ap_attr_mutex);
get_update_locks_for_mdev(matrix_mdev);
/* Save old state */
@@ -1755,7 +1779,7 @@ static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr
}
out:
release_update_locks_for_mdev(matrix_mdev);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
kfree(newbuf);
return rc;
}
@@ -2052,6 +2076,13 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
+ get_update_locks_for_mdev(matrix_mdev);
+
+ if (matrix_mdev->kvm) {
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ signal_guest_ap_cfg_changed(matrix_mdev);
+ }
+
if (matrix_mdev->req_trigger) {
if (!(count % 10))
dev_notice_ratelimited(dev,
@@ -2063,6 +2094,8 @@ static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
dev_notice(dev,
"No device request registered, blocked until released by user\n");
}
+
+ release_update_locks_for_mdev(matrix_mdev);
}
static int vfio_ap_mdev_get_device_info(unsigned long arg)
@@ -2103,6 +2136,10 @@ static ssize_t vfio_ap_get_irq_info(unsigned long arg)
info.count = 1;
info.flags = VFIO_IRQ_INFO_EVENTFD;
break;
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ info.count = 1;
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+ break;
default:
return -EINVAL;
}
@@ -2166,6 +2203,39 @@ static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
return 0;
}
+static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg)
+{
+ s32 fd;
+ void __user *data;
+ unsigned long minsz;
+ struct eventfd_ctx *cfg_chg_trigger;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+ data = (void __user *)(arg + minsz);
+
+ if (get_user(fd, (s32 __user *)data))
+ return -EFAULT;
+
+ if (fd == -1) {
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_ctx_put(matrix_mdev->cfg_chg_trigger);
+ matrix_mdev->cfg_chg_trigger = NULL;
+ } else if (fd >= 0) {
+ cfg_chg_trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(cfg_chg_trigger))
+ return PTR_ERR(cfg_chg_trigger);
+
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_ctx_put(matrix_mdev->cfg_chg_trigger);
+
+ matrix_mdev->cfg_chg_trigger = cfg_chg_trigger;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
unsigned long arg)
{
@@ -2181,6 +2251,8 @@ static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
switch (irq_set.index) {
case VFIO_AP_REQ_IRQ_INDEX:
return vfio_ap_set_request_irq(matrix_mdev, arg);
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ return vfio_ap_set_cfg_change_irq(matrix_mdev, arg);
default:
return -EINVAL;
}
@@ -2205,8 +2277,8 @@ static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
ret = vfio_ap_mdev_reset_queues(matrix_mdev);
break;
case VFIO_DEVICE_GET_IRQ_INFO:
- ret = vfio_ap_get_irq_info(arg);
- break;
+ ret = vfio_ap_get_irq_info(arg);
+ break;
case VFIO_DEVICE_SET_IRQS:
ret = vfio_ap_set_irqs(matrix_mdev, arg);
break;
@@ -2263,14 +2335,11 @@ static ssize_t status_show(struct device *dev,
if (matrix_mdev->kvm &&
test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_IN_USE);
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_IN_USE);
else
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_ASSIGNED);
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_ASSIGNED);
} else {
- nchars = scnprintf(buf, PAGE_SIZE, "%s\n",
- AP_QUEUE_UNASSIGNED);
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_UNASSIGNED);
}
mutex_unlock(&matrix_dev->mdevs_lock);
@@ -2325,10 +2394,10 @@ int vfio_ap_mdev_register(void)
matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
- matrix_dev->mdev_types[0] = &matrix_dev->mdev_type;
+ matrix_dev->mdev_types = &matrix_dev->mdev_type;
ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
&vfio_ap_matrix_driver,
- matrix_dev->mdev_types, 1);
+ &matrix_dev->mdev_types, 1);
if (ret)
goto err_driver;
return 0;
@@ -2467,7 +2536,7 @@ int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
mutex_lock(&matrix_dev->guests_lock);
mutex_lock(&matrix_dev->mdevs_lock);
- ret = vfio_ap_mdev_verify_no_sharing(apm, aqm);
+ ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm);
mutex_unlock(&matrix_dev->mdevs_lock);
mutex_unlock(&matrix_dev->guests_lock);