summaryrefslogtreecommitdiff
path: root/drivers/s390/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/crypto')
-rw-r--r--drivers/s390/crypto/Makefile25
-rw-r--r--drivers/s390/crypto/ap_bus.c2400
-rw-r--r--drivers/s390/crypto/ap_bus.h329
-rw-r--r--drivers/s390/crypto/ap_card.c141
-rw-r--r--drivers/s390/crypto/ap_debug.h8
-rw-r--r--drivers/s390/crypto/ap_queue.c1154
-rw-r--r--drivers/s390/crypto/pkey_api.c2110
-rw-r--r--drivers/s390/crypto/pkey_base.c380
-rw-r--r--drivers/s390/crypto/pkey_base.h240
-rw-r--r--drivers/s390/crypto/pkey_cca.c625
-rw-r--r--drivers/s390/crypto/pkey_ep11.c571
-rw-r--r--drivers/s390/crypto/pkey_pckmo.c473
-rw-r--r--drivers/s390/crypto/pkey_sysfs.c646
-rw-r--r--drivers/s390/crypto/pkey_uv.c317
-rw-r--r--drivers/s390/crypto/vfio_ap_debug.h32
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c107
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2800
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h128
-rw-r--r--drivers/s390/crypto/zcrypt_api.c1366
-rw-r--r--drivers/s390/crypto/zcrypt_api.h85
-rw-r--r--drivers/s390/crypto/zcrypt_card.c76
-rw-r--r--drivers/s390/crypto/zcrypt_cca_key.h89
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c1813
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h268
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c236
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.h134
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.c291
-rw-r--r--drivers/s390/crypto/zcrypt_cex2c.h18
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c602
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h8
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c1636
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h158
-rw-r--r--drivers/s390/crypto/zcrypt_error.h116
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c264
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.h7
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c1002
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.h45
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c70
38 files changed, 15430 insertions, 5340 deletions
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 6ccd93d0b1cb..e83c6603c858 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -4,18 +4,35 @@
#
ap-objs := ap_bus.o ap_card.o ap_queue.o
-obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
+obj-$(CONFIG_AP) += ap.o
# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
-obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
+obj-$(CONFIG_ZCRYPT) += zcrypt_cex4.o
-# pkey kernel module
-pkey-objs := pkey_api.o
+# pkey base and api module
+pkey-objs := pkey_base.o pkey_api.o pkey_sysfs.o
obj-$(CONFIG_PKEY) += pkey.o
+# pkey cca handler module
+pkey-cca-objs := pkey_cca.o
+obj-$(CONFIG_PKEY_CCA) += pkey-cca.o
+
+# pkey ep11 handler module
+pkey-ep11-objs := pkey_ep11.o
+obj-$(CONFIG_PKEY_EP11) += pkey-ep11.o
+
+# pkey pckmo handler module
+pkey-pckmo-objs := pkey_pckmo.o
+obj-$(CONFIG_PKEY_PCKMO) += pkey-pckmo.o
+
+# pkey uv handler module
+pkey-uv-objs := pkey_uv.o
+obj-$(CONFIG_PKEY_UV) += pkey-uv.o
+
# adjunct processor matrix
vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 48ea0004a56d..a445494fd2be 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1,31 +1,34 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2006, 2012
+ * Copyright IBM Corp. 2006, 2023
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* Felix Beck <felix.beck@de.ibm.com>
* Holger Dengler <hd@linux.vnet.ibm.com>
+ * Harald Freudenberger <freude@linux.ibm.com>
*
* Adjunct processor bus.
*/
-#define KMSG_COMPONENT "ap"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "ap: " fmt
#include <linux/kernel_stat.h>
#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/freezer.h>
#include <linux/interrupt.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
-#include <linux/suspend.h>
+#include <asm/machine.h>
#include <asm/airq.h>
+#include <asm/tpi.h>
#include <linux/atomic.h>
#include <asm/isc.h>
#include <linux/hrtimer.h>
@@ -35,13 +38,18 @@
#include <linux/mod_devicetable.h>
#include <linux/debugfs.h>
#include <linux/ctype.h>
+#include <linux/module.h>
+#include <asm/uv.h>
+#include <asm/chsc.h>
+#include <linux/mempool.h>
#include "ap_bus.h"
#include "ap_debug.h"
-/*
- * Module parameters; note though this file itself isn't modular.
- */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Adjunct Processor Bus driver");
+MODULE_LICENSE("GPL");
+
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
module_param_named(domain, ap_domain_index, int, 0440);
@@ -60,19 +68,44 @@ static char *aqm_str;
module_param_named(aqmask, aqm_str, charp, 0440);
MODULE_PARM_DESC(aqmask, "AP bus domain mask.");
+static int ap_useirq = 1;
+module_param_named(useirq, ap_useirq, int, 0440);
+MODULE_PARM_DESC(useirq, "Use interrupt if available, default is 1 (on).");
+
+atomic_t ap_max_msg_size = ATOMIC_INIT(AP_DEFAULT_MAX_MSG_SIZE);
+EXPORT_SYMBOL(ap_max_msg_size);
+
static struct device *ap_root_device;
-DEFINE_SPINLOCK(ap_list_lock);
-LIST_HEAD(ap_card_list);
+/* Hashtable of all queue devices on the AP bus */
+DEFINE_HASHTABLE(ap_queues, 8);
+/* lock used for the ap_queues hashtable */
+DEFINE_SPINLOCK(ap_queues_lock);
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
EXPORT_SYMBOL(ap_perms);
-DEFINE_MUTEX(ap_perms_mutex);
-EXPORT_SYMBOL(ap_perms_mutex);
+/* true if apmask and/or aqmask are NOT default */
+bool ap_apmask_aqmask_in_use;
+/* counter for how many driver_overrides are currently active */
+int ap_driver_override_ctr;
+/*
+ * Mutex for consistent read and write of the ap_perms struct,
+ * ap_apmask_aqmask_in_use, ap_driver_override_ctr
+ * and the ap bus sysfs attributes apmask and aqmask.
+ */
+DEFINE_MUTEX(ap_attr_mutex);
+EXPORT_SYMBOL(ap_attr_mutex);
+
+/* # of bindings complete since init */
+static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
+
+/* completion for APQN bindings complete */
+static DECLARE_COMPLETION(ap_apqn_bindings_complete);
-static struct ap_config_info *ap_configuration;
-static bool initialised;
+static struct ap_config_info qci[2];
+static struct ap_config_info *const ap_qci_info = &qci[0];
+static struct ap_config_info *const ap_qci_info_old = &qci[1];
/*
* AP bus related debug feature things.
@@ -80,18 +113,44 @@ static bool initialised;
debug_info_t *ap_dbf_info;
/*
- * Workqueue timer for bus rescan.
+ * There is a need for a do-not-allocate-memory path through the AP bus
+ * layer. The pkey layer may be triggered via the in-kernel interface from
+ * a protected key crypto algorithm (namely PAES) to convert a secure key
+ * into a protected key. This happens in a workqueue context, so sleeping
+ * is allowed but memory allocations causing IO operations are not permitted.
+ * To accomplish this, an AP message memory pool with pre-allocated space
+ * is established. When ap_init_apmsg() with use_mempool set to true is
+ * called, instead of kmalloc() the ap message buffer is allocated from
+ * the ap_msg_pool. This pool only holds a limited amount of buffers:
+ * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and
+ * exactly one of these items (if available) is returned if ap_init_apmsg()
+ * with the use_mempool arg set to true is called. When this pool is exhausted
+ * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without
+ * any attempt to allocate memory and the caller has to deal with that.
*/
-static struct timer_list ap_config_timer;
-static int ap_config_time = AP_CONFIG_TIME;
-static void ap_scan_bus(struct work_struct *);
-static DECLARE_WORK(ap_scan_work, ap_scan_bus);
+static mempool_t *ap_msg_pool;
+static unsigned int ap_msg_pool_min_items = 8;
+module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440);
+MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items");
+
+/*
+ * AP bus rescan related things.
+ */
+static bool ap_scan_bus(void);
+static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
+static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */
+static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
+static int ap_scan_bus_time = AP_CONFIG_TIME;
+static struct timer_list ap_scan_bus_timer;
+static void ap_scan_bus_wq_callback(struct work_struct *);
+static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback);
/*
* Tasklet & timer for AP request polling and interrupts
*/
static void ap_tasklet_fn(unsigned long);
-static DECLARE_TASKLET(ap_tasklet, ap_tasklet_fn, 0);
+static DECLARE_TASKLET_OLD(ap_tasklet, ap_tasklet_fn);
static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
static struct task_struct *ap_poll_kthread;
static DEFINE_MUTEX(ap_poll_thread_mutex);
@@ -101,24 +160,26 @@ static struct hrtimer ap_poll_timer;
* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds.
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.
*/
-static unsigned long long poll_timeout = 250000;
+static unsigned long poll_high_timeout = 250000UL;
-/* Suspend flag */
-static int ap_suspend_flag;
-/* Maximum domain id */
-static int ap_max_domain_id;
/*
- * Flag to check if domain was set through module parameter domain=. This is
- * important when supsend and resume is done in a z/VM environment where the
- * domain might change.
+ * Some state machine states only require a low frequency polling.
+ * We use 25 Hz frequency for these.
*/
-static int user_set_domain;
-static struct bus_type ap_bus_type;
+static unsigned long poll_low_timeout = 40000000UL;
+
+/* Maximum domain id, if not given via qci */
+static int ap_max_domain_id = 15;
+/* Maximum adapter id, if not given via qci */
+static int ap_max_adapter_id = 63;
+
+static const struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
-static void ap_interrupt_handler(struct airq_struct *airq);
+static void ap_interrupt_handler(struct airq_struct *airq,
+ struct tpi_info *tpi_info);
-static int ap_airq_flag;
+static bool ap_irq_flag;
static struct airq_struct ap_airq = {
.handler = ap_interrupt_handler,
@@ -126,15 +187,6 @@ static struct airq_struct ap_airq = {
};
/**
- * ap_using_interrupts() - Returns non-zero if interrupt support is
- * available.
- */
-static inline int ap_using_interrupts(void)
-{
- return ap_airq_flag;
-}
-
-/**
* ap_airq_ptr() - Get the address of the adapter interrupt indicator
*
* Returns the address of the local-summary-indicator of the adapter
@@ -143,7 +195,7 @@ static inline int ap_using_interrupts(void)
*/
void *ap_airq_ptr(void)
{
- if (ap_using_interrupts())
+ if (ap_irq_flag)
return ap_airq.lsi_ptr;
return NULL;
}
@@ -159,12 +211,12 @@ static int ap_interrupts_available(void)
}
/**
- * ap_configuration_available(): Test if AP configuration
- * information is available.
+ * ap_qci_available(): Test if AP configuration
+ * information can be queried via QCI subfunction.
*
- * Returns 1 if AP configuration information is available.
+ * Returns 1 if subfunction PQAP(QCI) is available.
*/
-static int ap_configuration_available(void)
+static int ap_qci_available(void)
{
return test_facility(12);
}
@@ -173,7 +225,7 @@ static int ap_configuration_available(void)
* ap_apft_available(): Test if AP facilities test (APFT)
* facility is available.
*
- * Returns 1 if APFT is is available.
+ * Returns 1 if APFT is available.
*/
static int ap_apft_available(void)
{
@@ -187,45 +239,55 @@ static int ap_apft_available(void)
*/
static inline int ap_qact_available(void)
{
- if (ap_configuration)
- return ap_configuration->qact;
- return 0;
+ return ap_qci_info->qact;
}
/*
- * ap_query_configuration(): Fetch cryptographic config info
+ * ap_sb_available(): Test if the AP secure binding facility is available.
*
- * Returns the ap configuration info fetched via PQAP(QCI).
- * On success 0 is returned, on failure a negative errno
- * is returned, e.g. if the PQAP(QCI) instruction is not
- * available, the return value will be -EOPNOTSUPP.
+ * Returns 1 if secure binding facility is available.
*/
-static inline int ap_query_configuration(struct ap_config_info *info)
+int ap_sb_available(void)
{
- if (!ap_configuration_available())
- return -EOPNOTSUPP;
- if (!info)
- return -EINVAL;
- return ap_qci(info);
+ return ap_qci_info->apsb;
}
-EXPORT_SYMBOL(ap_query_configuration);
-/**
- * ap_init_configuration(): Allocate and query configuration array.
+/*
+ * ap_is_se_guest(): Check for SE guest with AP pass-through support.
*/
-static void ap_init_configuration(void)
+bool ap_is_se_guest(void)
{
- if (!ap_configuration_available())
- return;
+ return is_prot_virt_guest() && ap_sb_available();
+}
+EXPORT_SYMBOL(ap_is_se_guest);
- ap_configuration = kzalloc(sizeof(*ap_configuration), GFP_KERNEL);
- if (!ap_configuration)
- return;
- if (ap_query_configuration(ap_configuration) != 0) {
- kfree(ap_configuration);
- ap_configuration = NULL;
+/**
+ * ap_init_qci_info(): Allocate and query qci config info.
+ * Does also update the static variables ap_max_domain_id
+ * and ap_max_adapter_id if this info is available.
+ */
+static void __init ap_init_qci_info(void)
+{
+ if (!ap_qci_available() ||
+ ap_qci(ap_qci_info)) {
+ AP_DBF_INFO("%s QCI not supported\n", __func__);
return;
}
+ memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
+ AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
+
+ if (ap_qci_info->apxa) {
+ if (ap_qci_info->na) {
+ ap_max_adapter_id = ap_qci_info->na;
+ AP_DBF_INFO("%s new ap_max_adapter_id is %d\n",
+ __func__, ap_max_adapter_id);
+ }
+ if (ap_qci_info->nd) {
+ ap_max_domain_id = ap_qci_info->nd;
+ AP_DBF_INFO("%s new ap_max_domain_id is %d\n",
+ __func__, ap_max_domain_id);
+ }
+ }
}
/*
@@ -239,7 +301,6 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
/*
* ap_test_config_card_id(): Test, whether an AP card ID is configured.
- * @id AP card ID
*
* Returns 0 if the card is not configured
* 1 if the card is configured or
@@ -247,112 +308,122 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
*/
static inline int ap_test_config_card_id(unsigned int id)
{
- if (!ap_configuration) /* QCI not supported */
- return 1;
- return ap_test_config(ap_configuration->apm, id);
+ if (id > ap_max_adapter_id)
+ return 0;
+ if (ap_qci_info->flags)
+ return ap_test_config(ap_qci_info->apm, id);
+ return 1;
}
/*
- * ap_test_config_domain(): Test, whether an AP usage domain is configured.
- * @domain AP usage domain ID
+ * ap_test_config_usage_domain(): Test, whether an AP usage domain
+ * is configured.
*
* Returns 0 if the usage domain is not configured
* 1 if the usage domain is configured or
* if the configuration information is not available
*/
-static inline int ap_test_config_domain(unsigned int domain)
+int ap_test_config_usage_domain(unsigned int domain)
{
- if (!ap_configuration) /* QCI not supported */
- return domain < 16;
- return ap_test_config(ap_configuration->aqm, domain);
+ if (domain > ap_max_domain_id)
+ return 0;
+ if (ap_qci_info->flags)
+ return ap_test_config(ap_qci_info->aqm, domain);
+ return 1;
}
+EXPORT_SYMBOL(ap_test_config_usage_domain);
-/**
- * ap_query_queue(): Check if an AP queue is available.
- * @qid: The AP queue number
- * @queue_depth: Pointer to queue depth value
- * @device_type: Pointer to device type value
- * @facilities: Pointer to facility indicator
+/*
+ * ap_test_config_ctrl_domain(): Test, whether an AP control domain
+ * is configured.
+ * @domain AP control domain ID
+ *
+ * Returns 1 if the control domain is configured
+ * 0 in all other cases
+ */
+int ap_test_config_ctrl_domain(unsigned int domain)
+{
+ if (!ap_qci_info || domain > ap_max_domain_id)
+ return 0;
+ return ap_test_config(ap_qci_info->adm, domain);
+}
+EXPORT_SYMBOL(ap_test_config_ctrl_domain);
+
+/*
+ * ap_queue_info(): Check and get AP queue info.
+ * Returns: 1 if APQN exists and info is filled,
+ * 0 if APQN seems to exist but there is no info
+ * available (eg. caused by an asynch pending error)
+ * -1 invalid APQN, TAPQ error or AP queue status which
+ * indicates there is no APQN.
*/
-static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type,
- unsigned int *facilities)
+static int ap_queue_info(ap_qid_t qid, struct ap_tapq_hwinfo *hwinfo,
+ bool *decfg, bool *cstop)
{
struct ap_queue_status status;
- unsigned long info;
- int nd;
- if (!ap_test_config_card_id(AP_QID_CARD(qid)))
- return -ENODEV;
+ hwinfo->value = 0;
+
+ /* make sure we don't run into a specifiation exception */
+ if (AP_QID_CARD(qid) > ap_max_adapter_id ||
+ AP_QID_QUEUE(qid) > ap_max_domain_id)
+ return -1;
+
+ /* call TAPQ on this APQN */
+ status = ap_test_queue(qid, ap_apft_available(), hwinfo);
- status = ap_test_queue(qid, ap_apft_available(), &info);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- *queue_depth = (int)(info & 0xff);
- *device_type = (int)((info >> 24) & 0xff);
- *facilities = (unsigned int)(info >> 32);
- /* Update maximum domain id */
- nd = (info >> 16) & 0xff;
- /* if N bit is available, z13 and newer */
- if ((info & (1UL << 57)) && nd > 0)
- ap_max_domain_id = nd;
- else /* older machine types */
- ap_max_domain_id = 15;
- switch (*device_type) {
- /* For CEX2 and CEX3 the available functions
- * are not reflected by the facilities bits.
- * Instead it is coded into the type. So here
- * modify the function bits based on the type.
- */
- case AP_DEVICE_TYPE_CEX2A:
- case AP_DEVICE_TYPE_CEX3A:
- *facilities |= 0x08000000;
- break;
- case AP_DEVICE_TYPE_CEX2C:
- case AP_DEVICE_TYPE_CEX3C:
- *facilities |= 0x10000000;
- break;
- default:
- break;
- }
- return 0;
- case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
- case AP_RESPONSE_INVALID_ADDRESS:
- return -ENODEV;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_OTHERWISE_CHANGED:
case AP_RESPONSE_BUSY:
- return -EBUSY;
+ /* For all these RCs the tapq info should be available */
+ break;
default:
- BUG();
+ /* On a pending async error the info should be available */
+ if (!status.async)
+ return -1;
+ break;
}
+
+ /* There should be at least one of the mode bits set */
+ if (WARN_ON_ONCE(!hwinfo->value))
+ return 0;
+
+ *decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+ *cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
+
+ return 1;
}
-void ap_wait(enum ap_wait wait)
+void ap_wait(enum ap_sm_wait wait)
{
ktime_t hr_time;
switch (wait) {
- case AP_WAIT_AGAIN:
- case AP_WAIT_INTERRUPT:
- if (ap_using_interrupts())
+ case AP_SM_WAIT_AGAIN:
+ case AP_SM_WAIT_INTERRUPT:
+ if (ap_irq_flag)
break;
if (ap_poll_kthread) {
wake_up(&ap_poll_wait);
break;
}
- /* Fall through */
- case AP_WAIT_TIMEOUT:
+ fallthrough;
+ case AP_SM_WAIT_LOW_TIMEOUT:
+ case AP_SM_WAIT_HIGH_TIMEOUT:
spin_lock_bh(&ap_poll_timer_lock);
if (!hrtimer_is_queued(&ap_poll_timer)) {
- hr_time = poll_timeout;
+ hr_time =
+ wait == AP_SM_WAIT_LOW_TIMEOUT ?
+ poll_low_timeout : poll_high_timeout;
hrtimer_forward_now(&ap_poll_timer, hr_time);
hrtimer_restart(&ap_poll_timer);
}
spin_unlock_bh(&ap_poll_timer_lock);
break;
- case AP_WAIT_NONE:
+ case AP_SM_WAIT_NONE:
default:
break;
}
@@ -366,12 +437,10 @@ void ap_wait(enum ap_wait wait)
*/
void ap_request_timeout(struct timer_list *t)
{
- struct ap_queue *aq = from_timer(aq, t, timeout);
+ struct ap_queue *aq = timer_container_of(aq, t, timeout);
- if (ap_suspend_flag)
- return;
spin_lock_bh(&aq->lock);
- ap_wait(ap_sm_event(aq, AP_EVENT_TIMEOUT));
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
spin_unlock_bh(&aq->lock);
}
@@ -383,20 +452,20 @@ void ap_request_timeout(struct timer_list *t)
*/
static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused)
{
- if (!ap_suspend_flag)
- tasklet_schedule(&ap_tasklet);
+ tasklet_schedule(&ap_tasklet);
return HRTIMER_NORESTART;
}
/**
* ap_interrupt_handler() - Schedule ap_tasklet on interrupt
* @airq: pointer to adapter interrupt descriptor
+ * @tpi_info: ignored
*/
-static void ap_interrupt_handler(struct airq_struct *airq)
+static void ap_interrupt_handler(struct airq_struct *airq,
+ struct tpi_info *tpi_info)
{
inc_irq_stat(IRQIO_APB);
- if (!ap_suspend_flag)
- tasklet_schedule(&ap_tasklet);
+ tasklet_schedule(&ap_tasklet);
}
/**
@@ -407,45 +476,41 @@ static void ap_interrupt_handler(struct airq_struct *airq)
*/
static void ap_tasklet_fn(unsigned long dummy)
{
- struct ap_card *ac;
+ int bkt;
struct ap_queue *aq;
- enum ap_wait wait = AP_WAIT_NONE;
+ enum ap_sm_wait wait = AP_SM_WAIT_NONE;
/* Reset the indicator if interrupts are used. Thus new interrupts can
- * be received. Doing it in the beginning of the tasklet is therefor
+ * be received. Doing it in the beginning of the tasklet is therefore
* important that no requests on any AP get lost.
*/
- if (ap_using_interrupts())
- xchg(ap_airq.lsi_ptr, 0);
-
- spin_lock_bh(&ap_list_lock);
- for_each_ap_card(ac) {
- for_each_ap_queue(aq, ac) {
- spin_lock_bh(&aq->lock);
- wait = min(wait, ap_sm_event_loop(aq, AP_EVENT_POLL));
- spin_unlock_bh(&aq->lock);
- }
+ if (ap_irq_flag)
+ WRITE_ONCE(*ap_airq.lsi_ptr, 0);
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ spin_lock_bh(&aq->lock);
+ wait = min(wait, ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+ spin_unlock_bh(&aq->lock);
}
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
ap_wait(wait);
}
static int ap_pending_requests(void)
{
- struct ap_card *ac;
+ int bkt;
struct ap_queue *aq;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_card(ac) {
- for_each_ap_queue(aq, ac) {
- if (aq->queue_count == 0)
- continue;
- spin_unlock_bh(&ap_list_lock);
- return 1;
- }
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->queue_count == 0)
+ continue;
+ spin_unlock_bh(&ap_queues_lock);
+ return 1;
}
- spin_unlock_bh(&ap_list_lock);
+ spin_unlock_bh(&ap_queues_lock);
return 0;
}
@@ -468,7 +533,7 @@ static int ap_poll_thread(void *data)
while (!kthread_should_stop()) {
add_wait_queue(&ap_poll_wait, &wait);
set_current_state(TASK_INTERRUPTIBLE);
- if (ap_suspend_flag || !ap_pending_requests()) {
+ if (!ap_pending_requests()) {
schedule();
try_to_freeze();
}
@@ -489,7 +554,7 @@ static int ap_poll_thread_start(void)
{
int rc;
- if (ap_using_interrupts() || ap_poll_kthread)
+ if (ap_irq_flag || ap_poll_kthread)
return 0;
mutex_lock(&ap_poll_thread_mutex);
ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
@@ -513,6 +578,48 @@ static void ap_poll_thread_stop(void)
#define is_card_dev(x) ((x)->parent == ap_root_device)
#define is_queue_dev(x) ((x)->parent != ap_root_device)
+/*
+ * ap_init_apmsg() - Initialize ap_message.
+ */
+int ap_init_apmsg(struct ap_message *ap_msg, u32 flags)
+{
+ unsigned int maxmsgsize;
+
+ memset(ap_msg, 0, sizeof(*ap_msg));
+ ap_msg->flags = flags;
+
+ if (flags & AP_MSG_FLAG_MEMPOOL) {
+ ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE;
+ return 0;
+ }
+
+ maxmsgsize = atomic_read(&ap_max_msg_size);
+ ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = maxmsgsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(ap_init_apmsg);
+
+/*
+ * ap_release_apmsg() - Release ap_message.
+ */
+void ap_release_apmsg(struct ap_message *ap_msg)
+{
+ if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) {
+ memzero_explicit(ap_msg->msg, ap_msg->bufsize);
+ mempool_free(ap_msg->msg, ap_msg_pool);
+ } else {
+ kfree_sensitive(ap_msg->msg);
+ }
+}
+EXPORT_SYMBOL(ap_release_apmsg);
+
/**
* ap_bus_match()
* @dev: Pointer to device
@@ -520,9 +627,9 @@ static void ap_poll_thread_stop(void)
*
* AP bus driver registration/unregistration.
*/
-static int ap_bus_match(struct device *dev, struct device_driver *drv)
+static int ap_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct ap_driver *ap_drv = to_ap_drv(drv);
+ const struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
/*
@@ -550,158 +657,242 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* It sets up a single environment variable DEV_TYPE which contains the
* hardware device type.
*/
-static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ap_device *ap_dev = to_ap_dev(dev);
- int retval = 0;
+ int rc = 0;
+ const struct ap_device *ap_dev = to_ap_dev(dev);
- if (!ap_dev)
- return -ENODEV;
+ /* Uevents from ap bus core don't need extensions to the env */
+ if (dev == ap_root_device)
+ return 0;
- /* Set up DEV_TYPE environment variable. */
- retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
- if (retval)
- return retval;
+ if (is_card_dev(dev)) {
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
- /* Add MODALIAS= */
- retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
+ /* Set up DEV_TYPE environment variable. */
+ rc = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type);
+ if (rc)
+ return rc;
+ /* Add MODALIAS= */
+ rc = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type);
+ if (rc)
+ return rc;
+
+ /* Add MODE=<accel|cca|ep11> */
+ if (ac->hwinfo.accel)
+ rc = add_uevent_var(env, "MODE=accel");
+ else if (ac->hwinfo.cca)
+ rc = add_uevent_var(env, "MODE=cca");
+ else if (ac->hwinfo.ep11)
+ rc = add_uevent_var(env, "MODE=ep11");
+ if (rc)
+ return rc;
+ } else {
+ struct ap_queue *aq = to_ap_queue(&ap_dev->device);
+
+ /* Add MODE=<accel|cca|ep11> */
+ if (aq->card->hwinfo.accel)
+ rc = add_uevent_var(env, "MODE=accel");
+ else if (aq->card->hwinfo.cca)
+ rc = add_uevent_var(env, "MODE=cca");
+ else if (aq->card->hwinfo.ep11)
+ rc = add_uevent_var(env, "MODE=ep11");
+ if (rc)
+ return rc;
+ }
- return retval;
+ return 0;
}
-static int ap_dev_suspend(struct device *dev)
+static void ap_send_init_scan_done_uevent(void)
{
- struct ap_device *ap_dev = to_ap_dev(dev);
+ char *envp[] = { "INITSCAN=done", NULL };
- if (ap_dev->drv && ap_dev->drv->suspend)
- ap_dev->drv->suspend(ap_dev);
- return 0;
+ kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
-static int ap_dev_resume(struct device *dev)
+static void ap_send_bindings_complete_uevent(void)
{
- struct ap_device *ap_dev = to_ap_dev(dev);
+ char buf[32];
+ char *envp[] = { "BINDINGS=complete", buf, NULL };
- if (ap_dev->drv && ap_dev->drv->resume)
- ap_dev->drv->resume(ap_dev);
- return 0;
+ snprintf(buf, sizeof(buf), "COMPLETECOUNT=%llu",
+ atomic64_inc_return(&ap_bindings_complete_count));
+ kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
-static void ap_bus_suspend(void)
+void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg)
{
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+ char buf[16];
+ char *envp[] = { buf, NULL };
- ap_suspend_flag = 1;
- /*
- * Disable scanning for devices, thus we do not want to scan
- * for them after removing.
- */
- flush_work(&ap_scan_work);
- tasklet_disable(&ap_tasklet);
+ snprintf(buf, sizeof(buf), "CONFIG=%d", cfg ? 1 : 0);
+
+ kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
+EXPORT_SYMBOL(ap_send_config_uevent);
-static int __ap_card_devices_unregister(struct device *dev, void *dummy)
+void ap_send_online_uevent(struct ap_device *ap_dev, int online)
{
- if (is_card_dev(dev))
- device_unregister(dev);
- return 0;
+ char buf[16];
+ char *envp[] = { buf, NULL };
+
+ snprintf(buf, sizeof(buf), "ONLINE=%d", online ? 1 : 0);
+
+ kobject_uevent_env(&ap_dev->device.kobj, KOBJ_CHANGE, envp);
}
+EXPORT_SYMBOL(ap_send_online_uevent);
-static int __ap_queue_devices_unregister(struct device *dev, void *dummy)
+static void ap_send_mask_changed_uevent(unsigned long *newapm,
+ unsigned long *newaqm)
{
- if (is_queue_dev(dev))
- device_unregister(dev);
- return 0;
+ char buf[100];
+ char *envp[] = { buf, NULL };
+
+ if (newapm)
+ snprintf(buf, sizeof(buf),
+ "APMASK=0x%016lx%016lx%016lx%016lx\n",
+ newapm[0], newapm[1], newapm[2], newapm[3]);
+ else
+ snprintf(buf, sizeof(buf),
+ "AQMASK=0x%016lx%016lx%016lx%016lx\n",
+ newaqm[0], newaqm[1], newaqm[2], newaqm[3]);
+
+ kobject_uevent_env(&ap_root_device->kobj, KOBJ_CHANGE, envp);
}
-static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
+/*
+ * calc # of bound APQNs
+ */
+
+struct __ap_calc_ctrs {
+ unsigned int apqns;
+ unsigned int bound;
+};
+
+static int __ap_calc_helper(struct device *dev, void *arg)
{
- if (is_queue_dev(dev) &&
- AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long) data)
- device_unregister(dev);
+ struct __ap_calc_ctrs *pctrs = (struct __ap_calc_ctrs *)arg;
+
+ if (is_queue_dev(dev)) {
+ pctrs->apqns++;
+ if (dev->driver)
+ pctrs->bound++;
+ }
+
return 0;
}
-static void ap_bus_resume(void)
+static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
{
- int rc;
-
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+ struct __ap_calc_ctrs ctrs;
- /* remove all queue devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_queue_devices_unregister);
- /* remove all card devices */
- bus_for_each_dev(&ap_bus_type, NULL, NULL,
- __ap_card_devices_unregister);
+ memset(&ctrs, 0, sizeof(ctrs));
+ bus_for_each_dev(&ap_bus_type, NULL, (void *)&ctrs, __ap_calc_helper);
- /* Reset thin interrupt setting */
- if (ap_interrupts_available() && !ap_using_interrupts()) {
- rc = register_adapter_interrupt(&ap_airq);
- ap_airq_flag = (rc == 0);
- }
- if (!ap_interrupts_available() && ap_using_interrupts()) {
- unregister_adapter_interrupt(&ap_airq);
- ap_airq_flag = 0;
- }
- /* Reset domain */
- if (!user_set_domain)
- ap_domain_index = -1;
- /* Get things going again */
- ap_suspend_flag = 0;
- if (ap_airq_flag)
- xchg(ap_airq.lsi_ptr, 0);
- tasklet_enable(&ap_tasklet);
- queue_work(system_long_wq, &ap_scan_work);
+ *apqns = ctrs.apqns;
+ *bound = ctrs.bound;
}
-static int ap_power_event(struct notifier_block *this, unsigned long event,
- void *ptr)
+/*
+ * After ap bus scan do check if all existing APQNs are
+ * bound to device drivers.
+ */
+static void ap_check_bindings_complete(void)
{
- switch (event) {
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- ap_bus_suspend();
- break;
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- ap_bus_resume();
- break;
- default:
- break;
+ unsigned int apqns, bound;
+
+ if (atomic64_read(&ap_scan_bus_count) >= 1) {
+ ap_calc_bound_apqns(&apqns, &bound);
+ if (bound == apqns) {
+ if (!completion_done(&ap_apqn_bindings_complete)) {
+ complete_all(&ap_apqn_bindings_complete);
+ ap_send_bindings_complete_uevent();
+ pr_debug("all apqn bindings complete\n");
+ }
+ }
}
- return NOTIFY_DONE;
}
-static struct notifier_block ap_power_notifier = {
- .notifier_call = ap_power_event,
-};
-static SIMPLE_DEV_PM_OPS(ap_bus_pm_ops, ap_dev_suspend, ap_dev_resume);
+/*
+ * Interface to wait for the AP bus to have done one initial ap bus
+ * scan and all detected APQNs have been bound to device drivers.
+ * If these both conditions are not fulfilled, this function blocks
+ * on a condition with wait_for_completion_interruptible_timeout().
+ * If these both conditions are fulfilled (before the timeout hits)
+ * the return value is 0. If the timeout (in jiffies) hits instead
+ * -ETIME is returned. On failures negative return values are
+ * returned to the caller.
+ */
+int ap_wait_apqn_bindings_complete(unsigned long timeout)
+{
+ int rc = 0;
+ long l;
-static struct bus_type ap_bus_type = {
- .name = "ap",
- .match = &ap_bus_match,
- .uevent = &ap_uevent,
- .pm = &ap_bus_pm_ops,
-};
+ if (completion_done(&ap_apqn_bindings_complete))
+ return 0;
+
+ if (timeout)
+ l = wait_for_completion_interruptible_timeout(
+ &ap_apqn_bindings_complete, timeout);
+ else
+ l = wait_for_completion_interruptible(
+ &ap_apqn_bindings_complete);
+ if (l < 0)
+ rc = l == -ERESTARTSYS ? -EINTR : l;
+ else if (l == 0 && timeout)
+ rc = -ETIME;
+
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
+
+static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
+{
+ if (is_queue_dev(dev) &&
+ AP_QID_CARD(to_ap_queue(dev)->qid) == (int)(long)data)
+ device_unregister(dev);
+ return 0;
+}
static int __ap_revise_reserved(struct device *dev, void *dummy)
{
int rc, card, queue, devres, drvres;
if (is_queue_dev(dev)) {
- card = AP_QID_CARD(to_ap_queue(dev)->qid);
- queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
- mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm)
- && test_bit_inv(queue, ap_perms.aqm);
- mutex_unlock(&ap_perms_mutex);
- drvres = to_ap_drv(dev->driver)->flags
- & AP_DRIVER_FLAG_DEFAULT;
- if (!!devres != !!drvres) {
- AP_DBF(DBF_DEBUG, "reprobing queue=%02x.%04x\n",
- card, queue);
- rc = device_reprobe(dev);
+ struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_device *ap_dev = &aq->ap_dev;
+
+ card = AP_QID_CARD(aq->qid);
+ queue = AP_QID_QUEUE(aq->qid);
+
+ if (ap_dev->driver_override) {
+ if (strcmp(ap_dev->driver_override,
+ ap_drv->driver.name)) {
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
+ rc = device_reprobe(dev);
+ if (rc) {
+ AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
+ __func__, card, queue);
+ }
+ }
+ } else {
+ mutex_lock(&ap_attr_mutex);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_attr_mutex);
+ drvres = to_ap_drv(dev->driver)->flags
+ & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres) {
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
+ rc = device_reprobe(dev);
+ if (rc) {
+ AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
+ __func__, card, queue);
+ }
+ }
}
}
@@ -713,41 +904,69 @@ static void ap_bus_revise_bindings(void)
bus_for_each_dev(&ap_bus_type, NULL, NULL, __ap_revise_reserved);
}
+/**
+ * ap_owned_by_def_drv: indicates whether an AP adapter is reserved for the
+ * default host driver or not.
+ * @card: the APID of the adapter card to check
+ * @queue: the APQI of the queue to check
+ *
+ * Note: the ap_attr_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether the AP adapter is reserved for the host (1)
+ * or not (0).
+ */
int ap_owned_by_def_drv(int card, int queue)
{
+ struct ap_queue *aq;
int rc = 0;
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
- mutex_lock(&ap_perms_mutex);
+ aq = ap_get_qdev(AP_MKQID(card, queue));
+ if (aq) {
+ const struct device_driver *drv = aq->ap_dev.device.driver;
+ const struct ap_driver *ap_drv = to_ap_drv(drv);
+ bool override = !!aq->ap_dev.driver_override;
+
+ if (override && drv && ap_drv->flags & AP_DRIVER_FLAG_DEFAULT)
+ rc = 1;
+ put_device(&aq->ap_dev.device);
+ if (override)
+ goto out;
+ }
- if (test_bit_inv(card, ap_perms.apm)
- && test_bit_inv(queue, ap_perms.aqm))
+ if (test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm))
rc = 1;
- mutex_unlock(&ap_perms_mutex);
-
+out:
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
+/**
+ * ap_apqn_in_matrix_owned_by_def_drv: indicates whether every APQN contained in
+ * a set is reserved for the host drivers
+ * or not.
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
+ *
+ * Note: the ap_attr_mutex must be locked by the caller of this function.
+ *
+ * Return: an int specifying whether each APQN is reserved for the host (1) or
+ * not (0)
+ */
int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
unsigned long *aqm)
{
int card, queue, rc = 0;
- mutex_lock(&ap_perms_mutex);
-
for (card = 0; !rc && card < AP_DEVICES; card++)
- if (test_bit_inv(card, apm) &&
- test_bit_inv(card, ap_perms.apm))
+ if (test_bit_inv(card, apm))
for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
- if (test_bit_inv(queue, aqm) &&
- test_bit_inv(queue, ap_perms.aqm))
- rc = 1;
-
- mutex_unlock(&ap_perms_mutex);
+ if (test_bit_inv(queue, aqm))
+ rc = ap_owned_by_def_drv(card, queue);
return rc;
}
@@ -757,7 +976,10 @@ static int ap_device_probe(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
struct ap_driver *ap_drv = to_ap_drv(dev->driver);
- int card, queue, devres, drvres, rc;
+ int card, queue, devres, drvres, rc = -ENODEV;
+
+ if (!get_device(dev))
+ return rc;
if (is_queue_dev(dev)) {
/*
@@ -768,77 +990,117 @@ static int ap_device_probe(struct device *dev)
*/
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
- mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm)
- && test_bit_inv(queue, ap_perms.aqm);
- mutex_unlock(&ap_perms_mutex);
- drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
- if (!!devres != !!drvres)
- return -ENODEV;
- /* (re-)init queue's state machine */
- ap_queue_reinit_state(to_ap_queue(dev));
+ if (ap_dev->driver_override) {
+ if (strcmp(ap_dev->driver_override,
+ ap_drv->driver.name))
+ goto out;
+ } else {
+ mutex_lock(&ap_attr_mutex);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_attr_mutex);
+ drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres)
+ goto out;
+ }
}
+ /*
+ * Rearm the bindings complete completion to trigger
+ * bindings complete when all devices are bound again
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+
/* Add queue/card to list of active queues/cards */
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_add(&to_ap_card(dev)->list, &ap_card_list);
- else
- list_add(&to_ap_queue(dev)->list,
- &to_ap_queue(dev)->card->queues);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_add(ap_queues, &to_ap_queue(dev)->hnode,
+ to_ap_queue(dev)->qid);
+ spin_unlock_bh(&ap_queues_lock);
- ap_dev->drv = ap_drv;
rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
if (rc) {
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_del_init(&to_ap_card(dev)->list);
- else
- list_del_init(&to_ap_queue(dev)->list);
- spin_unlock_bh(&ap_list_lock);
- ap_dev->drv = NULL;
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
}
+out:
+ if (rc) {
+ put_device(dev);
+ } else {
+ if (is_queue_dev(dev)) {
+ pr_debug("queue=%02x.%04x new driver=%s\n",
+ card, queue, ap_drv->driver.name);
+ } else {
+ pr_debug("card=%02x new driver=%s\n",
+ to_ap_card(dev)->id, ap_drv->driver.name);
+ }
+ }
return rc;
}
-static int ap_device_remove(struct device *dev)
+static void ap_device_remove(struct device *dev)
{
struct ap_device *ap_dev = to_ap_dev(dev);
- struct ap_driver *ap_drv = ap_dev->drv;
+ struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+ /* prepare ap queue device removal */
if (is_queue_dev(dev))
- ap_queue_remove(to_ap_queue(dev));
+ ap_queue_prepare_remove(to_ap_queue(dev));
+
+ /* driver's chance to clean up gracefully */
if (ap_drv->remove)
ap_drv->remove(ap_dev);
+ /* now do the ap queue device remove */
+ if (is_queue_dev(dev))
+ ap_queue_remove(to_ap_queue(dev));
+
/* Remove queue/card from list of active queues/cards */
- spin_lock_bh(&ap_list_lock);
- if (is_card_dev(dev))
- list_del_init(&to_ap_card(dev)->list);
- else
- list_del_init(&to_ap_queue(dev)->list);
- spin_unlock_bh(&ap_list_lock);
+ spin_lock_bh(&ap_queues_lock);
+ if (is_queue_dev(dev))
+ hash_del(&to_ap_queue(dev)->hnode);
+ spin_unlock_bh(&ap_queues_lock);
- return 0;
+ put_device(dev);
}
+struct ap_queue *ap_get_qdev(ap_qid_t qid)
+{
+ int bkt;
+ struct ap_queue *aq;
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode) {
+ if (aq->qid == qid) {
+ get_device(&aq->ap_dev.device);
+ spin_unlock_bh(&ap_queues_lock);
+ return aq;
+ }
+ }
+ spin_unlock_bh(&ap_queues_lock);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ap_get_qdev);
+
int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
struct device_driver *drv = &ap_drv->driver;
-
- if (!initialised)
- return -ENODEV;
+ int rc;
drv->bus = &ap_bus_type;
- drv->probe = ap_device_probe;
- drv->remove = ap_device_remove;
drv->owner = owner;
drv->name = name;
- return driver_register(drv);
+ rc = driver_register(drv);
+
+ ap_check_bindings_complete();
+
+ return rc;
}
EXPORT_SYMBOL(ap_driver_register);
@@ -848,26 +1110,85 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
}
EXPORT_SYMBOL(ap_driver_unregister);
-void ap_bus_force_rescan(void)
+/*
+ * Enforce a synchronous AP bus rescan.
+ * Returns true if the bus scan finds a change in the AP configuration
+ * and AP devices have been added or deleted when this function returns.
+ */
+bool ap_bus_force_rescan(void)
{
- if (ap_suspend_flag)
- return;
- /* processing a asynchronous bus rescan */
- del_timer(&ap_config_timer);
- queue_work(system_long_wq, &ap_scan_work);
- flush_work(&ap_scan_work);
+ unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
+ bool rc = false;
+
+ pr_debug("> scan counter=%lu\n", scan_counter);
+
+ /* Only trigger AP bus scans after the initial scan is done */
+ if (scan_counter <= 0)
+ goto out;
+
+ /*
+ * There is one unlikely but nevertheless valid scenario where the
+ * thread holding the mutex may try to send some crypto load but
+ * all cards are offline so a rescan is triggered which causes
+ * a recursive call of ap_bus_force_rescan(). A simple return if
+ * the mutex is already locked by this thread solves this.
+ */
+ if (mutex_is_locked(&ap_scan_bus_mutex)) {
+ if (ap_scan_bus_task == current)
+ goto out;
+ }
+
+ /* Try to acquire the AP scan bus mutex */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ /* mutex acquired, run the AP bus scan */
+ ap_scan_bus_task = current;
+ ap_scan_bus_result = ap_scan_bus();
+ rc = ap_scan_bus_result;
+ ap_scan_bus_task = NULL;
+ mutex_unlock(&ap_scan_bus_mutex);
+ goto out;
+ }
+
+ /*
+ * Mutex acquire failed. So there is currently another task
+ * already running the AP bus scan. Then let's simple wait
+ * for the lock which means the other task has finished and
+ * stored the result in ap_scan_bus_result.
+ */
+ if (mutex_lock_interruptible(&ap_scan_bus_mutex)) {
+ /* some error occurred, ignore and go out */
+ goto out;
+ }
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
- * hex2bitmap() - parse hex mask string and set bitmap.
- * Valid strings are "0x012345678" with at least one valid hex number.
- * Rest of the bitmap to the right is padded with 0. No spaces allowed
- * within the string, the leading 0x may be omitted.
- * Returns the bitmask with exactly the bits set as given by the hex
- * string (both in big endian order).
+ * A config change has happened, force an ap bus rescan.
*/
-static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+static int ap_bus_cfg_chg(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ if (action != CHSC_NOTIFY_AP_CFG)
+ return NOTIFY_DONE;
+
+ pr_debug("config change, forcing bus rescan\n");
+
+ ap_bus_force_rescan();
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block ap_bus_nb = {
+ .notifier_call = ap_bus_cfg_chg,
+};
+
+int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits)
{
int i, n, b;
@@ -894,6 +1215,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL(ap_hex2bitmap);
/*
* modify_bitmap() - parse bitmask argument and modify an existing
@@ -914,7 +1236,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
*/
static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
{
- int a, i, z;
+ unsigned long a, i, z;
char *np, sign;
/* bits needs to be a multiple of 8 */
@@ -947,6 +1269,23 @@ static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
return 0;
}
+static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
+ unsigned long *newmap)
+{
+ unsigned long size;
+ int rc;
+
+ size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ if (*str == '+' || *str == '-') {
+ memcpy(newmap, bitmap, size);
+ rc = modify_bitmap(str, newmap, bits);
+ } else {
+ memset(newmap, 0, size);
+ rc = ap_hex2bitmap(str, newmap, bits);
+ }
+ return rc;
+}
+
int ap_parse_mask_str(const char *str,
unsigned long *bitmap, int bits,
struct mutex *lock)
@@ -958,7 +1297,7 @@ int ap_parse_mask_str(const char *str,
if (bits & 0x07)
return -EINVAL;
- size = BITS_TO_LONGS(bits)*sizeof(unsigned long);
+ size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
newmap = kmalloc(size, GFP_KERNEL);
if (!newmap)
return -ENOMEM;
@@ -966,14 +1305,7 @@ int ap_parse_mask_str(const char *str,
kfree(newmap);
return -ERESTARTSYS;
}
-
- if (*str == '+' || *str == '-') {
- memcpy(newmap, bitmap, size);
- rc = modify_bitmap(str, newmap, bits);
- } else {
- memset(newmap, 0, size);
- rc = hex2bitmap(str, newmap, bits);
- }
+ rc = ap_parse_bitmap_str(str, bitmap, bits, newmap);
if (rc == 0)
memcpy(bitmap, newmap, size);
mutex_unlock(lock);
@@ -986,12 +1318,12 @@ EXPORT_SYMBOL(ap_parse_mask_str);
* AP bus attributes.
*/
-static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
+static ssize_t ap_domain_show(const struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
+ return sysfs_emit(buf, "%d\n", ap_domain_index);
}
-static ssize_t ap_domain_store(struct bus_type *bus,
+static ssize_t ap_domain_store(const struct bus_type *bus,
const char *buf, size_t count)
{
int domain;
@@ -1000,129 +1332,135 @@ static ssize_t ap_domain_store(struct bus_type *bus,
domain < 0 || domain > ap_max_domain_id ||
!test_bit_inv(domain, ap_perms.aqm))
return -EINVAL;
+
spin_lock_bh(&ap_domain_lock);
ap_domain_index = domain;
spin_unlock_bh(&ap_domain_lock);
- AP_DBF(DBF_DEBUG, "stored new default domain=%d\n", domain);
+ AP_DBF_INFO("%s stored new default domain=%d\n",
+ __func__, domain);
return count;
}
static BUS_ATTR_RW(ap_domain);
-static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf)
+static ssize_t ap_control_domain_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ if (!ap_qci_info->flags) /* QCI not supported */
+ return sysfs_emit(buf, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->adm[0], ap_configuration->adm[1],
- ap_configuration->adm[2], ap_configuration->adm[3],
- ap_configuration->adm[4], ap_configuration->adm[5],
- ap_configuration->adm[6], ap_configuration->adm[7]);
+ return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->adm[0], ap_qci_info->adm[1],
+ ap_qci_info->adm[2], ap_qci_info->adm[3],
+ ap_qci_info->adm[4], ap_qci_info->adm[5],
+ ap_qci_info->adm[6], ap_qci_info->adm[7]);
}
static BUS_ATTR_RO(ap_control_domain_mask);
-static ssize_t ap_usage_domain_mask_show(struct bus_type *bus, char *buf)
+static ssize_t ap_usage_domain_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ if (!ap_qci_info->flags) /* QCI not supported */
+ return sysfs_emit(buf, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->aqm[0], ap_configuration->aqm[1],
- ap_configuration->aqm[2], ap_configuration->aqm[3],
- ap_configuration->aqm[4], ap_configuration->aqm[5],
- ap_configuration->aqm[6], ap_configuration->aqm[7]);
+ return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->aqm[0], ap_qci_info->aqm[1],
+ ap_qci_info->aqm[2], ap_qci_info->aqm[3],
+ ap_qci_info->aqm[4], ap_qci_info->aqm[5],
+ ap_qci_info->aqm[6], ap_qci_info->aqm[7]);
}
static BUS_ATTR_RO(ap_usage_domain_mask);
-static ssize_t ap_adapter_mask_show(struct bus_type *bus, char *buf)
+static ssize_t ap_adapter_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_configuration) /* QCI not supported */
- return snprintf(buf, PAGE_SIZE, "not supported\n");
+ if (!ap_qci_info->flags) /* QCI not supported */
+ return sysfs_emit(buf, "not supported\n");
- return snprintf(buf, PAGE_SIZE,
- "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
- ap_configuration->apm[0], ap_configuration->apm[1],
- ap_configuration->apm[2], ap_configuration->apm[3],
- ap_configuration->apm[4], ap_configuration->apm[5],
- ap_configuration->apm[6], ap_configuration->apm[7]);
+ return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
+ ap_qci_info->apm[0], ap_qci_info->apm[1],
+ ap_qci_info->apm[2], ap_qci_info->apm[3],
+ ap_qci_info->apm[4], ap_qci_info->apm[5],
+ ap_qci_info->apm[6], ap_qci_info->apm[7]);
}
static BUS_ATTR_RO(ap_adapter_mask);
-static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
+static ssize_t ap_interrupts_show(const struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- ap_using_interrupts() ? 1 : 0);
+ return sysfs_emit(buf, "%d\n", ap_irq_flag ? 1 : 0);
}
static BUS_ATTR_RO(ap_interrupts);
-static ssize_t config_time_show(struct bus_type *bus, char *buf)
+static ssize_t config_time_show(const struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
+ return sysfs_emit(buf, "%d\n", ap_scan_bus_time);
}
-static ssize_t config_time_store(struct bus_type *bus,
+static ssize_t config_time_store(const struct bus_type *bus,
const char *buf, size_t count)
{
int time;
if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
return -EINVAL;
- ap_config_time = time;
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_scan_bus_time = time;
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
return count;
}
static BUS_ATTR_RW(config_time);
-static ssize_t poll_thread_show(struct bus_type *bus, char *buf)
+static ssize_t poll_thread_show(const struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
+ return sysfs_emit(buf, "%d\n", ap_poll_kthread ? 1 : 0);
}
-static ssize_t poll_thread_store(struct bus_type *bus,
+static ssize_t poll_thread_store(const struct bus_type *bus,
const char *buf, size_t count)
{
- int flag, rc;
+ bool value;
+ int rc;
- if (sscanf(buf, "%d\n", &flag) != 1)
- return -EINVAL;
- if (flag) {
+ rc = kstrtobool(buf, &value);
+ if (rc)
+ return rc;
+
+ if (value) {
rc = ap_poll_thread_start();
if (rc)
count = rc;
- } else
+ } else {
ap_poll_thread_stop();
+ }
return count;
}
static BUS_ATTR_RW(poll_thread);
-static ssize_t poll_timeout_show(struct bus_type *bus, char *buf)
+static ssize_t poll_timeout_show(const struct bus_type *bus, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout);
+ return sysfs_emit(buf, "%lu\n", poll_high_timeout);
}
-static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
+static ssize_t poll_timeout_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- unsigned long long time;
+ unsigned long value;
ktime_t hr_time;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &value);
+ if (rc)
+ return rc;
/* 120 seconds = maximum poll interval */
- if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 ||
- time > 120000000000ULL)
+ if (value > 120000000000UL)
return -EINVAL;
- poll_timeout = time;
- hr_time = poll_timeout;
+ poll_high_timeout = value;
+ hr_time = poll_high_timeout;
spin_lock_bh(&ap_poll_timer_lock);
hrtimer_cancel(&ap_poll_timer);
@@ -1135,95 +1473,330 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
static BUS_ATTR_RW(poll_timeout);
-static ssize_t ap_max_domain_id_show(struct bus_type *bus, char *buf)
+static ssize_t ap_max_domain_id_show(const struct bus_type *bus, char *buf)
{
- int max_domain_id;
-
- if (ap_configuration)
- max_domain_id = ap_max_domain_id ? : -1;
- else
- max_domain_id = 15;
- return snprintf(buf, PAGE_SIZE, "%d\n", max_domain_id);
+ return sysfs_emit(buf, "%d\n", ap_max_domain_id);
}
static BUS_ATTR_RO(ap_max_domain_id);
-static ssize_t apmask_show(struct bus_type *bus, char *buf)
+static ssize_t ap_max_adapter_id_show(const struct bus_type *bus, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ap_max_adapter_id);
+}
+
+static BUS_ATTR_RO(ap_max_adapter_id);
+
+static ssize_t apmask_show(const struct bus_type *bus, char *buf)
{
int rc;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
- rc = snprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.apm[0], ap_perms.apm[1],
- ap_perms.apm[2], ap_perms.apm[3]);
- mutex_unlock(&ap_perms_mutex);
+ rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.apm[0], ap_perms.apm[1],
+ ap_perms.apm[2], ap_perms.apm[3]);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
-static ssize_t apmask_store(struct bus_type *bus, const char *buf,
- size_t count)
+static int __verify_card_reservations(struct device_driver *drv, void *data)
+{
+ int rc = 0;
+ struct ap_driver *ap_drv = to_ap_drv(drv);
+ unsigned long *newapm = (unsigned long *)data;
+ unsigned long aqm_any[BITS_TO_LONGS(AP_DOMAINS)];
+
+ /*
+ * increase the driver's module refcounter to be sure it is not
+ * going away when we invoke the callback function.
+ */
+ if (!try_module_get(drv->owner))
+ return 0;
+
+ if (ap_drv->in_use) {
+ bitmap_fill(aqm_any, AP_DOMAINS);
+ rc = ap_drv->in_use(newapm, aqm_any);
+ if (rc)
+ rc = -EBUSY;
+ }
+
+ /* release the driver's module */
+ module_put(drv->owner);
+
+ return rc;
+}
+
+static int apmask_commit(unsigned long *newapm)
{
int rc;
+ unsigned long reserved[BITS_TO_LONGS(AP_DEVICES)];
+
+ /*
+ * Check if any bits in the apmask have been set which will
+ * result in queues being removed from non-default drivers
+ */
+ if (bitmap_andnot(reserved, newapm, ap_perms.apm, AP_DEVICES)) {
+ rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
+ __verify_card_reservations);
+ if (rc)
+ return rc;
+ }
- rc = ap_parse_mask_str(buf, ap_perms.apm, AP_DEVICES, &ap_perms_mutex);
+ memcpy(ap_perms.apm, newapm, APMASKSIZE);
+
+ /*
+ * Update ap_apmask_aqmask_in_use. Note that the
+ * ap_attr_mutex has to be obtained here.
+ */
+ ap_apmask_aqmask_in_use =
+ bitmap_full(ap_perms.apm, AP_DEVICES) &&
+ bitmap_full(ap_perms.aqm, AP_DOMAINS) ?
+ false : true;
+
+ return 0;
+}
+
+static ssize_t apmask_store(const struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ DECLARE_BITMAP(newapm, AP_DEVICES);
+ int rc = -EINVAL, changes = 0;
+
+ if (mutex_lock_interruptible(&ap_attr_mutex))
+ return -ERESTARTSYS;
+
+ /* Do not allow apmask/aqmask if driver override is active */
+ if (ap_driver_override_ctr)
+ goto done;
+
+ rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
+ if (rc)
+ goto done;
+
+ changes = memcmp(ap_perms.apm, newapm, APMASKSIZE);
+ if (changes)
+ rc = apmask_commit(newapm);
+
+done:
+ mutex_unlock(&ap_attr_mutex);
if (rc)
return rc;
- ap_bus_revise_bindings();
+ if (changes) {
+ ap_bus_revise_bindings();
+ ap_send_mask_changed_uevent(newapm, NULL);
+ }
return count;
}
static BUS_ATTR_RW(apmask);
-static ssize_t aqmask_show(struct bus_type *bus, char *buf)
+static ssize_t aqmask_show(const struct bus_type *bus, char *buf)
{
int rc;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
- rc = snprintf(buf, PAGE_SIZE,
- "0x%016lx%016lx%016lx%016lx\n",
- ap_perms.aqm[0], ap_perms.aqm[1],
- ap_perms.aqm[2], ap_perms.aqm[3]);
- mutex_unlock(&ap_perms_mutex);
+ rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
+ ap_perms.aqm[0], ap_perms.aqm[1],
+ ap_perms.aqm[2], ap_perms.aqm[3]);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
-static ssize_t aqmask_store(struct bus_type *bus, const char *buf,
- size_t count)
+static int __verify_queue_reservations(struct device_driver *drv, void *data)
+{
+ int rc = 0;
+ struct ap_driver *ap_drv = to_ap_drv(drv);
+ unsigned long *newaqm = (unsigned long *)data;
+ unsigned long apm_any[BITS_TO_LONGS(AP_DEVICES)];
+
+ /*
+ * increase the driver's module refcounter to be sure it is not
+ * going away when we invoke the callback function.
+ */
+ if (!try_module_get(drv->owner))
+ return 0;
+
+ if (ap_drv->in_use) {
+ bitmap_fill(apm_any, AP_DEVICES);
+ rc = ap_drv->in_use(apm_any, newaqm);
+ if (rc)
+ rc = -EBUSY;
+ }
+
+ /* release the driver's module */
+ module_put(drv->owner);
+
+ return rc;
+}
+
+static int aqmask_commit(unsigned long *newaqm)
{
int rc;
+ unsigned long reserved[BITS_TO_LONGS(AP_DOMAINS)];
+
+ /*
+ * Check if any bits in the aqmask have been set which will
+ * result in queues being removed from non-default drivers
+ */
+ if (bitmap_andnot(reserved, newaqm, ap_perms.aqm, AP_DOMAINS)) {
+ rc = bus_for_each_drv(&ap_bus_type, NULL, reserved,
+ __verify_queue_reservations);
+ if (rc)
+ return rc;
+ }
+
+ memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);
+
+ /*
+ * Update ap_apmask_aqmask_in_use. Note that the
+ * ap_attr_mutex has to be obtained here.
+ */
+ ap_apmask_aqmask_in_use =
+ bitmap_full(ap_perms.apm, AP_DEVICES) &&
+ bitmap_full(ap_perms.aqm, AP_DOMAINS) ?
+ false : true;
+
+ return 0;
+}
- rc = ap_parse_mask_str(buf, ap_perms.aqm, AP_DOMAINS, &ap_perms_mutex);
+static ssize_t aqmask_store(const struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ DECLARE_BITMAP(newaqm, AP_DOMAINS);
+ int rc = -EINVAL, changes = 0;
+
+ if (mutex_lock_interruptible(&ap_attr_mutex))
+ return -ERESTARTSYS;
+
+ /* Do not allow apmask/aqmask if driver override is active */
+ if (ap_driver_override_ctr)
+ goto done;
+
+ rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
+ if (rc)
+ goto done;
+
+ changes = memcmp(ap_perms.aqm, newaqm, APMASKSIZE);
+ if (changes)
+ rc = aqmask_commit(newaqm);
+
+done:
+ mutex_unlock(&ap_attr_mutex);
if (rc)
return rc;
- ap_bus_revise_bindings();
+ if (changes) {
+ ap_bus_revise_bindings();
+ ap_send_mask_changed_uevent(NULL, newaqm);
+ }
return count;
}
static BUS_ATTR_RW(aqmask);
-static struct bus_attribute *const ap_bus_attrs[] = {
- &bus_attr_ap_domain,
- &bus_attr_ap_control_domain_mask,
- &bus_attr_ap_usage_domain_mask,
- &bus_attr_ap_adapter_mask,
- &bus_attr_config_time,
- &bus_attr_poll_thread,
- &bus_attr_ap_interrupts,
- &bus_attr_poll_timeout,
- &bus_attr_ap_max_domain_id,
- &bus_attr_apmask,
- &bus_attr_aqmask,
+static ssize_t scans_show(const struct bus_type *bus, char *buf)
+{
+ return sysfs_emit(buf, "%llu\n", atomic64_read(&ap_scan_bus_count));
+}
+
+static ssize_t scans_store(const struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ AP_DBF_INFO("%s force AP bus rescan\n", __func__);
+
+ ap_bus_force_rescan();
+
+ return count;
+}
+
+static BUS_ATTR_RW(scans);
+
+static ssize_t bindings_show(const struct bus_type *bus, char *buf)
+{
+ int rc;
+ unsigned int apqns, n;
+
+ ap_calc_bound_apqns(&apqns, &n);
+ if (atomic64_read(&ap_scan_bus_count) >= 1 && n == apqns)
+ rc = sysfs_emit(buf, "%u/%u (complete)\n", n, apqns);
+ else
+ rc = sysfs_emit(buf, "%u/%u\n", n, apqns);
+
+ return rc;
+}
+
+static BUS_ATTR_RO(bindings);
+
+static ssize_t bindings_complete_count_show(const struct bus_type *bus,
+ char *buf)
+{
+ return sysfs_emit(buf, "%llu\n",
+ atomic64_read(&ap_bindings_complete_count));
+}
+
+static BUS_ATTR_RO(bindings_complete_count);
+
+static ssize_t features_show(const struct bus_type *bus, char *buf)
+{
+ int n = 0;
+
+ if (!ap_qci_info->flags) /* QCI not supported */
+ return sysfs_emit(buf, "-\n");
+
+ if (ap_qci_info->apsc)
+ n += sysfs_emit_at(buf, n, "APSC ");
+ if (ap_qci_info->apxa)
+ n += sysfs_emit_at(buf, n, "APXA ");
+ if (ap_qci_info->qact)
+ n += sysfs_emit_at(buf, n, "QACT ");
+ if (ap_qci_info->rc8a)
+ n += sysfs_emit_at(buf, n, "RC8A ");
+ if (ap_qci_info->apsb)
+ n += sysfs_emit_at(buf, n, "APSB ");
+
+ sysfs_emit_at(buf, n == 0 ? 0 : n - 1, "\n");
+
+ return n;
+}
+
+static BUS_ATTR_RO(features);
+
+static struct attribute *ap_bus_attrs[] = {
+ &bus_attr_ap_domain.attr,
+ &bus_attr_ap_control_domain_mask.attr,
+ &bus_attr_ap_usage_domain_mask.attr,
+ &bus_attr_ap_adapter_mask.attr,
+ &bus_attr_config_time.attr,
+ &bus_attr_poll_thread.attr,
+ &bus_attr_ap_interrupts.attr,
+ &bus_attr_poll_timeout.attr,
+ &bus_attr_ap_max_domain_id.attr,
+ &bus_attr_ap_max_adapter_id.attr,
+ &bus_attr_apmask.attr,
+ &bus_attr_aqmask.attr,
+ &bus_attr_scans.attr,
+ &bus_attr_bindings.attr,
+ &bus_attr_bindings_complete_count.attr,
+ &bus_attr_features.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ap_bus);
+
+static const struct bus_type ap_bus_type = {
+ .name = "ap",
+ .bus_groups = ap_bus_groups,
+ .match = &ap_bus_match,
+ .uevent = &ap_uevent,
+ .probe = ap_device_probe,
+ .remove = ap_device_remove,
+};
/**
* ap_select_domain(): Select an AP domain if possible and we haven't
@@ -1231,47 +1804,42 @@ static struct bus_attribute *const ap_bus_attrs[] = {
*/
static void ap_select_domain(void)
{
- int count, max_count, best_domain;
struct ap_queue_status status;
- int i, j;
+ int card, dom;
/*
- * We want to use a single domain. Either the one specified with
- * the "domain=" parameter or the domain with the maximum number
- * of devices.
+ * Choose the default domain. Either the one specified with
+ * the "domain=" parameter or the first domain with at least
+ * one valid APQN.
*/
spin_lock_bh(&ap_domain_lock);
if (ap_domain_index >= 0) {
/* Domain has already been selected. */
- spin_unlock_bh(&ap_domain_lock);
- return;
+ goto out;
}
- best_domain = -1;
- max_count = 0;
- for (i = 0; i < AP_DOMAINS; i++) {
- if (!ap_test_config_domain(i) ||
- !test_bit_inv(i, ap_perms.aqm))
+ for (dom = 0; dom <= ap_max_domain_id; dom++) {
+ if (!ap_test_config_usage_domain(dom) ||
+ !test_bit_inv(dom, ap_perms.aqm))
continue;
- count = 0;
- for (j = 0; j < AP_DEVICES; j++) {
- if (!ap_test_config_card_id(j))
+ for (card = 0; card <= ap_max_adapter_id; card++) {
+ if (!ap_test_config_card_id(card) ||
+ !test_bit_inv(card, ap_perms.apm))
continue;
- status = ap_test_queue(AP_MKQID(j, i),
+ status = ap_test_queue(AP_MKQID(card, dom),
ap_apft_available(),
NULL);
- if (status.response_code != AP_RESPONSE_NORMAL)
- continue;
- count++;
- }
- if (count > max_count) {
- max_count = count;
- best_domain = i;
+ if (status.response_code == AP_RESPONSE_NORMAL)
+ break;
}
+ if (card <= ap_max_adapter_id)
+ break;
}
- if (best_domain >= 0) {
- ap_domain_index = best_domain;
- AP_DBF(DBF_DEBUG, "new ap_domain_index=%d\n", ap_domain_index);
+ if (dom <= ap_max_domain_id) {
+ ap_domain_index = dom;
+ AP_DBF_INFO("%s new default domain is %d\n",
+ __func__, ap_domain_index);
}
+out:
spin_unlock_bh(&ap_domain_lock);
}
@@ -1284,35 +1852,41 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
{
int comp_type = 0;
- /* < CEX2A is not supported */
- if (rawtype < AP_DEVICE_TYPE_CEX2A)
+ /* < CEX4 is not supported */
+ if (rawtype < AP_DEVICE_TYPE_CEX4) {
+ AP_DBF_WARN("%s queue=%02x.%04x unsupported type %d\n",
+ __func__, AP_QID_CARD(qid),
+ AP_QID_QUEUE(qid), rawtype);
return 0;
- /* up to CEX6 known and fully supported */
- if (rawtype <= AP_DEVICE_TYPE_CEX6)
+ }
+ /* up to CEX8 known and fully supported */
+ if (rawtype <= AP_DEVICE_TYPE_CEX8)
return rawtype;
/*
- * unknown new type > CEX6, check for compatibility
+ * unknown new type > CEX8, check for compatibility
* to the highest known and supported type which is
- * currently CEX6 with the help of the QACT function.
+ * currently CEX8 with the help of the QACT function.
*/
if (ap_qact_available()) {
struct ap_queue_status status;
union ap_qact_ap_info apinfo = {0};
apinfo.mode = (func >> 26) & 0x07;
- apinfo.cat = AP_DEVICE_TYPE_CEX6;
+ apinfo.cat = AP_DEVICE_TYPE_CEX8;
status = ap_qact(qid, 0, &apinfo);
- if (status.response_code == AP_RESPONSE_NORMAL
- && apinfo.cat >= AP_DEVICE_TYPE_CEX2A
- && apinfo.cat <= AP_DEVICE_TYPE_CEX6)
+ if (status.response_code == AP_RESPONSE_NORMAL &&
+ apinfo.cat >= AP_DEVICE_TYPE_CEX4 &&
+ apinfo.cat <= AP_DEVICE_TYPE_CEX8)
comp_type = apinfo.cat;
}
if (!comp_type)
- AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n",
- AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype);
+ AP_DBF_WARN("%s queue=%02x.%04x unable to map type %d\n",
+ __func__, AP_QID_CARD(qid),
+ AP_QID_QUEUE(qid), rawtype);
else if (comp_type != rawtype)
- AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n",
- AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type);
+ AP_DBF_INFO("%s queue=%02x.%04x map type %d to %d\n",
+ __func__, AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ rawtype, comp_type);
return comp_type;
}
@@ -1320,214 +1894,653 @@ static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func)
* Helper function to be used with bus_find_dev
* matches for the card device with the given id
*/
-static int __match_card_device_with_id(struct device *dev, void *data)
+static int __match_card_device_with_id(struct device *dev, const void *data)
{
- return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long) data;
+ return is_card_dev(dev) && to_ap_card(dev)->id == (int)(long)(void *)data;
}
/*
* Helper function to be used with bus_find_dev
* matches for the queue device with a given qid
*/
-static int __match_queue_device_with_qid(struct device *dev, void *data)
+static int __match_queue_device_with_qid(struct device *dev, const void *data)
{
- return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long) data;
+ return is_queue_dev(dev) && to_ap_queue(dev)->qid == (int)(long)data;
+}
+
+/*
+ * Helper function to be used with bus_find_dev
+ * matches any queue device with given queue id
+ */
+static int __match_queue_device_with_queue_id(struct device *dev, const void *data)
+{
+ return is_queue_dev(dev) &&
+ AP_QID_QUEUE(to_ap_queue(dev)->qid) == (int)(long)data;
+}
+
+/* Helper function for notify_config_changed */
+static int __drv_notify_config_changed(struct device_driver *drv, void *data)
+{
+ struct ap_driver *ap_drv = to_ap_drv(drv);
+
+ if (try_module_get(drv->owner)) {
+ if (ap_drv->on_config_changed)
+ ap_drv->on_config_changed(ap_qci_info, ap_qci_info_old);
+ module_put(drv->owner);
+ }
+
+ return 0;
+}
+
+/* Notify all drivers about an qci config change */
+static inline void notify_config_changed(void)
+{
+ bus_for_each_drv(&ap_bus_type, NULL, NULL,
+ __drv_notify_config_changed);
+}
+
+/* Helper function for notify_scan_complete */
+static int __drv_notify_scan_complete(struct device_driver *drv, void *data)
+{
+ struct ap_driver *ap_drv = to_ap_drv(drv);
+
+ if (try_module_get(drv->owner)) {
+ if (ap_drv->on_scan_complete)
+ ap_drv->on_scan_complete(ap_qci_info,
+ ap_qci_info_old);
+ module_put(drv->owner);
+ }
+
+ return 0;
+}
+
+/* Notify all drivers about bus scan complete */
+static inline void notify_scan_complete(void)
+{
+ bus_for_each_drv(&ap_bus_type, NULL, NULL,
+ __drv_notify_scan_complete);
}
/*
* Helper function for ap_scan_bus().
- * Does the scan bus job for the given adapter id.
+ * Remove card device and associated queue devices.
*/
-static void _ap_scan_bus_adapter(int id)
+static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
{
+ bus_for_each_dev(&ap_bus_type, NULL,
+ (void *)(long)ac->id,
+ __ap_queue_devices_with_id_unregister);
+ device_unregister(&ac->ap_dev.device);
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for all the domains within
+ * a valid adapter given by an ap_card ptr.
+ */
+static inline void ap_scan_domains(struct ap_card *ac)
+{
+ struct ap_tapq_hwinfo hwinfo;
+ bool decfg, chkstop;
+ struct ap_queue *aq;
+ struct device *dev;
ap_qid_t qid;
- unsigned int func;
+ int rc, dom;
+
+ /*
+ * Go through the configuration for the domains and compare them
+ * to the existing queue devices. Also take care of the config
+ * and error state for the queue devices.
+ */
+
+ for (dom = 0; dom <= ap_max_domain_id; dom++) {
+ qid = AP_MKQID(ac->id, dom);
+ dev = bus_find_device(&ap_bus_type, NULL,
+ (void *)(long)qid,
+ __match_queue_device_with_qid);
+ aq = dev ? to_ap_queue(dev) : NULL;
+ if (!ap_test_config_usage_domain(dom)) {
+ if (dev) {
+ AP_DBF_INFO("%s(%d,%d) not in config anymore, rm queue dev\n",
+ __func__, ac->id, dom);
+ device_unregister(dev);
+ }
+ goto put_dev_and_continue;
+ }
+ /* domain is valid, get info from this APQN */
+ rc = ap_queue_info(qid, &hwinfo, &decfg, &chkstop);
+ switch (rc) {
+ case -1:
+ if (dev) {
+ AP_DBF_INFO("%s(%d,%d) queue_info() failed, rm queue dev\n",
+ __func__, ac->id, dom);
+ device_unregister(dev);
+ }
+ fallthrough;
+ case 0:
+ goto put_dev_and_continue;
+ default:
+ break;
+ }
+ /* if no queue device exists, create a new one */
+ if (!aq) {
+ aq = ap_queue_create(qid, ac);
+ if (!aq) {
+ AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
+ __func__, ac->id, dom);
+ continue;
+ }
+ aq->config = !decfg;
+ aq->chkstop = chkstop;
+ aq->se_bstate = hwinfo.bs;
+ dev = &aq->ap_dev.device;
+ dev->bus = &ap_bus_type;
+ dev->parent = &ac->ap_dev.device;
+ dev_set_name(dev, "%02x.%04x", ac->id, dom);
+ /* register queue device */
+ rc = device_register(dev);
+ if (rc) {
+ AP_DBF_WARN("%s(%d,%d) device_register() failed\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+ if (decfg) {
+ AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
+ __func__, ac->id, dom);
+ } else if (chkstop) {
+ AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
+ __func__, ac->id, dom);
+ } else {
+ /* nudge the queue's state machine */
+ ap_queue_init_state(aq);
+ AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
+ __func__, ac->id, dom);
+ }
+ goto put_dev_and_continue;
+ }
+ /* handle state changes on already existing queue device */
+ spin_lock_bh(&aq->lock);
+ /* SE bind state */
+ aq->se_bstate = hwinfo.bs;
+ /* checkstop state */
+ if (chkstop && !aq->chkstop) {
+ /* checkstop on */
+ aq->chkstop = true;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
+ }
+ spin_unlock_bh(&aq->lock);
+ pr_debug("(%d,%d) queue dev checkstop on\n",
+ ac->id, dom);
+ /* 'receive' pending messages with -EAGAIN */
+ ap_flush_queue(aq);
+ goto put_dev_and_continue;
+ } else if (!chkstop && aq->chkstop) {
+ /* checkstop off */
+ aq->chkstop = false;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ pr_debug("(%d,%d) queue dev checkstop off\n",
+ ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ /* config state change */
+ if (decfg && aq->config) {
+ /* config off this queue device */
+ aq->config = false;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
+ }
+ spin_unlock_bh(&aq->lock);
+ pr_debug("(%d,%d) queue dev config off\n",
+ ac->id, dom);
+ ap_send_config_uevent(&aq->ap_dev, aq->config);
+ /* 'receive' pending messages with -EAGAIN */
+ ap_flush_queue(aq);
+ goto put_dev_and_continue;
+ } else if (!decfg && !aq->config) {
+ /* config on this queue device */
+ aq->config = true;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ _ap_queue_init_state(aq);
+ spin_unlock_bh(&aq->lock);
+ pr_debug("(%d,%d) queue dev config on\n",
+ ac->id, dom);
+ ap_send_config_uevent(&aq->ap_dev, aq->config);
+ goto put_dev_and_continue;
+ }
+ /* handle other error states */
+ if (!decfg && aq->dev_state == AP_DEV_STATE_ERROR) {
+ spin_unlock_bh(&aq->lock);
+ /* 'receive' pending messages with -EAGAIN */
+ ap_flush_queue(aq);
+ /* re-init (with reset) the queue device */
+ ap_queue_init_state(aq);
+ AP_DBF_INFO("%s(%d,%d) queue dev reinit enforced\n",
+ __func__, ac->id, dom);
+ goto put_dev_and_continue;
+ }
+ spin_unlock_bh(&aq->lock);
+put_dev_and_continue:
+ put_device(dev);
+ }
+}
+
+/*
+ * Helper function for ap_scan_bus().
+ * Does the scan bus job for the given adapter id.
+ */
+static inline void ap_scan_adapter(int ap)
+{
+ struct ap_tapq_hwinfo hwinfo;
+ int rc, dom, comp_type;
+ bool decfg, chkstop;
struct ap_card *ac;
struct device *dev;
- struct ap_queue *aq;
- int rc, dom, depth, type, comp_type, borked;
+ ap_qid_t qid;
- /* check if there is a card device registered with this id */
+ /* Is there currently a card device for this adapter ? */
dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) id,
+ (void *)(long)ap,
__match_card_device_with_id);
ac = dev ? to_ap_card(dev) : NULL;
- if (!ap_test_config_card_id(id)) {
- if (dev) {
- /* Card device has been removed from configuration */
- bus_for_each_dev(&ap_bus_type, NULL,
- (void *)(long) id,
- __ap_queue_devices_with_id_unregister);
- device_unregister(dev);
+
+ /* Adapter not in configuration ? */
+ if (!ap_test_config_card_id(ap)) {
+ if (ac) {
+ AP_DBF_INFO("%s(%d) ap not in config any more, rm card and queue devs\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
}
return;
}
/*
- * This card id is enabled in the configuration. If we already have
- * a card device with this id, check if type and functions are still
- * the very same. Also verify that at least one queue is available.
+ * Adapter ap is valid in the current configuration. So do some checks:
+ * If no card device exists, build one. If a card device exists, check
+ * for type and functions changed. For all this we need to find a valid
+ * APQN first.
*/
- if (ac) {
- /* find the first valid queue */
- for (dom = 0; dom < AP_DOMAINS; dom++) {
- qid = AP_MKQID(id, dom);
- if (ap_query_queue(qid, &depth, &type, &func) == 0)
+
+ for (dom = 0; dom <= ap_max_domain_id; dom++)
+ if (ap_test_config_usage_domain(dom)) {
+ qid = AP_MKQID(ap, dom);
+ if (ap_queue_info(qid, &hwinfo, &decfg, &chkstop) > 0)
break;
}
- borked = 0;
- if (dom >= AP_DOMAINS) {
- /* no accessible queue on this card */
- borked = 1;
- } else if (ac->raw_hwtype != type) {
- /* card type has changed */
- AP_DBF(DBF_INFO, "card=%02x type changed.\n", id);
- borked = 1;
- } else if (ac->functions != func) {
- /* card functions have changed */
- AP_DBF(DBF_INFO, "card=%02x functions changed.\n", id);
- borked = 1;
+ if (dom > ap_max_domain_id) {
+ /* Could not find one valid APQN for this adapter */
+ if (ac) {
+ AP_DBF_INFO("%s(%d) no type info (no APQN found), rm card and queue devs\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ } else {
+ pr_debug("(%d) no type info (no APQN found), ignored\n",
+ ap);
}
- if (borked) {
- /* unregister card device and associated queues */
- bus_for_each_dev(&ap_bus_type, NULL,
- (void *)(long) id,
- __ap_queue_devices_with_id_unregister);
- device_unregister(dev);
+ return;
+ }
+ if (!hwinfo.at) {
+ /* No apdater type info available, an unusable adapter */
+ if (ac) {
+ AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
+ __func__, ap);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
- /* go back if there is no valid queue on this card */
- if (dom >= AP_DOMAINS)
- return;
- ac = NULL;
+ } else {
+ pr_debug("(%d) no valid type (0) info, ignored\n", ap);
}
+ return;
}
-
- /*
- * Go through all possible queue ids. Check and maybe create or release
- * queue devices for this card. If there exists no card device yet,
- * create a card device also.
- */
- for (dom = 0; dom < AP_DOMAINS; dom++) {
- qid = AP_MKQID(id, dom);
- dev = bus_find_device(&ap_bus_type, NULL,
- (void *)(long) qid,
- __match_queue_device_with_qid);
- aq = dev ? to_ap_queue(dev) : NULL;
- if (!ap_test_config_domain(dom)) {
- if (dev) {
- /* Queue device exists but has been
- * removed from configuration.
- */
- device_unregister(dev);
- put_device(dev);
+ hwinfo.value &= TAPQ_CARD_HWINFO_MASK; /* filter card specific hwinfo */
+ if (ac) {
+ /* Check APQN against existing card device for changes */
+ if (ac->hwinfo.at != hwinfo.at) {
+ AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
+ __func__, ap, hwinfo.at);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ ac = NULL;
+ } else if (ac->hwinfo.fac != hwinfo.fac) {
+ AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
+ __func__, ap, hwinfo.fac);
+ ap_scan_rm_card_dev_and_queue_devs(ac);
+ put_device(dev);
+ ac = NULL;
+ } else {
+ /* handle checkstop state change */
+ if (chkstop && !ac->chkstop) {
+ /* checkstop on */
+ ac->chkstop = true;
+ AP_DBF_INFO("%s(%d) card dev checkstop on\n",
+ __func__, ap);
+ } else if (!chkstop && ac->chkstop) {
+ /* checkstop off */
+ ac->chkstop = false;
+ AP_DBF_INFO("%s(%d) card dev checkstop off\n",
+ __func__, ap);
}
- continue;
- }
- /* try to fetch infos about this queue */
- rc = ap_query_queue(qid, &depth, &type, &func);
- if (dev) {
- if (rc == -ENODEV)
- borked = 1;
- else {
- spin_lock_bh(&aq->lock);
- borked = aq->state == AP_STATE_BORKED;
- spin_unlock_bh(&aq->lock);
+ /* handle config state change */
+ if (decfg && ac->config) {
+ ac->config = false;
+ AP_DBF_INFO("%s(%d) card dev config off\n",
+ __func__, ap);
+ ap_send_config_uevent(&ac->ap_dev, ac->config);
+ } else if (!decfg && !ac->config) {
+ ac->config = true;
+ AP_DBF_INFO("%s(%d) card dev config on\n",
+ __func__, ap);
+ ap_send_config_uevent(&ac->ap_dev, ac->config);
}
- if (borked) /* Remove broken device */
- device_unregister(dev);
- put_device(dev);
- continue;
}
- if (rc)
- continue;
- /* a new queue device is needed, check out comp type */
- comp_type = ap_get_compatible_type(qid, type, func);
- if (!comp_type)
- continue;
- /* maybe a card device needs to be created first */
+ }
+
+ if (!ac) {
+ /* Build a new card device */
+ comp_type = ap_get_compatible_type(qid, hwinfo.at, hwinfo.fac);
+ if (!comp_type) {
+ AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
+ __func__, ap, hwinfo.at);
+ return;
+ }
+ ac = ap_card_create(ap, hwinfo, comp_type);
if (!ac) {
- ac = ap_card_create(id, depth, type, comp_type, func);
- if (!ac)
- continue;
- ac->ap_dev.device.bus = &ap_bus_type;
- ac->ap_dev.device.parent = ap_root_device;
- dev_set_name(&ac->ap_dev.device, "card%02x", id);
- /* Register card device with AP bus */
- rc = device_register(&ac->ap_dev.device);
- if (rc) {
- put_device(&ac->ap_dev.device);
- ac = NULL;
- break;
- }
- /* get it and thus adjust reference counter */
- get_device(&ac->ap_dev.device);
+ AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
+ __func__, ap);
+ return;
}
- /* now create the new queue device */
- aq = ap_queue_create(qid, comp_type);
- if (!aq)
- continue;
- aq->card = ac;
- aq->ap_dev.device.bus = &ap_bus_type;
- aq->ap_dev.device.parent = &ac->ap_dev.device;
- dev_set_name(&aq->ap_dev.device, "%02x.%04x", id, dom);
- /* Register queue device */
- rc = device_register(&aq->ap_dev.device);
+ ac->config = !decfg;
+ ac->chkstop = chkstop;
+ dev = &ac->ap_dev.device;
+ dev->bus = &ap_bus_type;
+ dev->parent = ap_root_device;
+ dev_set_name(dev, "card%02x", ap);
+ /* maybe enlarge ap_max_msg_size to support this card */
+ if (ac->maxmsgsize > atomic_read(&ap_max_msg_size)) {
+ atomic_set(&ap_max_msg_size, ac->maxmsgsize);
+ AP_DBF_INFO("%s(%d) ap_max_msg_size update to %d byte\n",
+ __func__, ap,
+ atomic_read(&ap_max_msg_size));
+ }
+ /* Register the new card device with AP bus */
+ rc = device_register(dev);
if (rc) {
- put_device(&aq->ap_dev.device);
- continue;
+ AP_DBF_WARN("%s(%d) device_register() failed\n",
+ __func__, ap);
+ put_device(dev);
+ return;
}
- } /* end domain loop */
+ /* get it and thus adjust reference counter */
+ get_device(dev);
+ if (decfg)
+ AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
+ __func__, ap, hwinfo.at, hwinfo.fac);
+ else if (chkstop)
+ AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
+ __func__, ap, hwinfo.at, hwinfo.fac);
+ else
+ AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
+ __func__, ap, hwinfo.at, hwinfo.fac);
+ }
+
+ /* Verify the domains and the queue devices for this card */
+ ap_scan_domains(ac);
- if (ac)
- put_device(&ac->ap_dev.device);
+ /* release the card device */
+ put_device(&ac->ap_dev.device);
}
/**
- * ap_scan_bus(): Scan the AP bus for new devices
- * Runs periodically, workqueue timer (ap_config_time)
+ * ap_get_configuration - get the host AP configuration
+ *
+ * Stores the host AP configuration information returned from the previous call
+ * to Query Configuration Information (QCI), then retrieves and stores the
+ * current AP configuration returned from QCI.
+ *
+ * Return: true if the host AP configuration changed between calls to QCI;
+ * otherwise, return false.
+ */
+static bool ap_get_configuration(void)
+{
+ if (!ap_qci_info->flags) /* QCI not supported */
+ return false;
+
+ memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
+ ap_qci(ap_qci_info);
+
+ return memcmp(ap_qci_info, ap_qci_info_old,
+ sizeof(struct ap_config_info)) != 0;
+}
+
+/*
+ * ap_config_has_new_aps - Check current against old qci info if
+ * new adapters have appeared. Returns true if at least one new
+ * adapter in the apm mask is showing up. Existing adapters or
+ * receding adapters are not counted.
*/
-static void ap_scan_bus(struct work_struct *unused)
+static bool ap_config_has_new_aps(void)
{
- int id;
- AP_DBF(DBF_DEBUG, "%s running\n", __func__);
+ unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
+
+ if (!ap_qci_info->flags)
+ return false;
- ap_query_configuration(ap_configuration);
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
+ (unsigned long *)ap_qci_info_old->apm, AP_DEVICES);
+ if (!bitmap_empty(m, AP_DEVICES))
+ return true;
+
+ return false;
+}
+
+/*
+ * ap_config_has_new_doms - Check current against old qci info if
+ * new (usage) domains have appeared. Returns true if at least one
+ * new domain in the aqm mask is showing up. Existing domains or
+ * receding domains are not counted.
+ */
+static bool ap_config_has_new_doms(void)
+{
+ unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
+
+ if (!ap_qci_info->flags)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
+ (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS);
+ if (!bitmap_empty(m, AP_DOMAINS))
+ return true;
+
+ return false;
+}
+
+/**
+ * ap_scan_bus(): Scan the AP bus for new devices
+ * Always run under mutex ap_scan_bus_mutex protection
+ * which needs to get locked/unlocked by the caller!
+ * Returns true if any config change has been detected
+ * during the scan, otherwise false.
+ */
+static bool ap_scan_bus(void)
+{
+ bool config_changed;
+ int ap;
+
+ pr_debug(">\n");
+
+ /* (re-)fetch configuration via QCI */
+ config_changed = ap_get_configuration();
+ if (config_changed) {
+ if (ap_config_has_new_aps() || ap_config_has_new_doms()) {
+ /*
+ * Appearance of new adapters and/or domains need to
+ * build new ap devices which need to get bound to an
+ * device driver. Thus reset the APQN bindings complete
+ * completion.
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+ }
+ /* post a config change notify */
+ notify_config_changed();
+ }
ap_select_domain();
/* loop over all possible adapters */
- for (id = 0; id < AP_DEVICES; id++)
- _ap_scan_bus_adapter(id);
+ for (ap = 0; ap <= ap_max_adapter_id; ap++)
+ ap_scan_adapter(ap);
+
+ /* scan complete notify */
+ if (config_changed)
+ notify_scan_complete();
/* check if there is at least one queue available with default domain */
if (ap_domain_index >= 0) {
struct device *dev =
bus_find_device(&ap_bus_type, NULL,
- (void *)(long) ap_domain_index,
- __match_queue_device_with_qid);
+ (void *)(long)ap_domain_index,
+ __match_queue_device_with_queue_id);
if (dev)
put_device(dev);
else
- AP_DBF(DBF_INFO,
- "no queue device with default domain %d available\n",
- ap_domain_index);
+ AP_DBF_INFO("%s no queue device with default domain %d available\n",
+ __func__, ap_domain_index);
}
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
+ pr_debug("init scan complete\n");
+ ap_send_init_scan_done_uevent();
+ }
+
+ ap_check_bindings_complete();
+
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
+
+ pr_debug("< config_changed=%d\n", config_changed);
+
+ return config_changed;
}
-static void ap_config_timeout(struct timer_list *unused)
+/*
+ * Callback for the ap_scan_bus_timer
+ * Runs periodically, workqueue timer (ap_scan_bus_time)
+ */
+static void ap_scan_bus_timer_callback(struct timer_list *unused)
{
- if (ap_suspend_flag)
- return;
- queue_work(system_long_wq, &ap_scan_work);
+ /*
+ * schedule work into the system long wq which when
+ * the work is finally executed, calls the AP bus scan.
+ */
+ queue_work(system_long_wq, &ap_scan_bus_work);
+}
+
+/*
+ * Callback for the ap_scan_bus_work
+ */
+static void ap_scan_bus_wq_callback(struct work_struct *unused)
+{
+ /*
+ * Try to invoke an ap_scan_bus(). If the mutex acquisition
+ * fails there is currently another task already running the
+ * AP scan bus and there is no need to wait and re-trigger the
+ * scan again. Please note at the end of the scan bus function
+ * the AP scan bus timer is re-armed which triggers then the
+ * ap_scan_bus_timer_callback which enqueues a work into the
+ * system_long_wq which invokes this function here again.
+ */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_task = current;
+ ap_scan_bus_result = ap_scan_bus();
+ ap_scan_bus_task = NULL;
+ mutex_unlock(&ap_scan_bus_mutex);
+ }
}
-static int __init ap_debug_init(void)
+static inline void __exit ap_async_exit(void)
{
- ap_dbf_info = debug_register("ap", 1, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ if (ap_thread_flag)
+ ap_poll_thread_stop();
+ chsc_notifier_unregister(&ap_bus_nb);
+ cancel_work(&ap_scan_bus_work);
+ hrtimer_cancel(&ap_poll_timer);
+ timer_delete(&ap_scan_bus_timer);
+}
+
+static inline int __init ap_async_init(void)
+{
+ int rc;
+
+ /* Setup the AP bus rescan timer. */
+ timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
+
+ /*
+ * Setup the high resolution poll timer.
+ * If we are running under z/VM adjust polling to z/VM polling rate.
+ */
+ if (machine_is_vm())
+ poll_high_timeout = 1500000;
+ hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ queue_work(system_long_wq, &ap_scan_bus_work);
+
+ rc = chsc_notifier_register(&ap_bus_nb);
+ if (rc)
+ goto out;
+
+ /* Start the low priority AP bus poll thread. */
+ if (!ap_thread_flag)
+ return 0;
+
+ rc = ap_poll_thread_start();
+ if (rc)
+ goto out_notifier;
+
+ return 0;
+
+out_notifier:
+ chsc_notifier_unregister(&ap_bus_nb);
+out:
+ cancel_work(&ap_scan_bus_work);
+ hrtimer_cancel(&ap_poll_timer);
+ timer_delete(&ap_scan_bus_timer);
+ return rc;
+}
+
+static inline void ap_irq_exit(void)
+{
+ if (ap_irq_flag)
+ unregister_adapter_interrupt(&ap_airq);
+}
+
+static inline int __init ap_irq_init(void)
+{
+ int rc;
+
+ if (!ap_interrupts_available() || !ap_useirq)
+ return 0;
+
+ rc = register_adapter_interrupt(&ap_airq);
+ ap_irq_flag = (rc == 0);
+
+ return rc;
+}
+
+static inline void ap_debug_exit(void)
+{
+ debug_unregister(ap_dbf_info);
+}
+
+static inline int __init ap_debug_init(void)
+{
+ ap_dbf_info = debug_register("ap", 2, 1,
+ AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
debug_set_level(ap_dbf_info, DBF_ERR);
@@ -1536,7 +2549,7 @@ static int __init ap_debug_init(void)
static void __init ap_perms_init(void)
{
- /* all resources useable if no kernel parameter string given */
+ /* all resources usable if no kernel parameter string given */
memset(&ap_perms.ioctlm, 0xFF, sizeof(ap_perms.ioctlm));
memset(&ap_perms.apm, 0xFF, sizeof(ap_perms.apm));
memset(&ap_perms.aqm, 0xFF, sizeof(ap_perms.aqm));
@@ -1545,14 +2558,14 @@ static void __init ap_perms_init(void)
if (apm_str) {
memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
- &ap_perms_mutex);
+ &ap_attr_mutex);
}
/* aqm kernel parameter string */
if (aqm_str) {
memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
- &ap_perms_mutex);
+ &ap_attr_mutex);
}
}
@@ -1563,105 +2576,88 @@ static void __init ap_perms_init(void)
*/
static int __init ap_module_init(void)
{
- int max_domain_id;
- int rc, i;
+ int rc;
+
+ if (!ap_instructions_available()) {
+ pr_warn("The hardware system does not support AP instructions\n");
+ return -ENODEV;
+ }
rc = ap_debug_init();
if (rc)
return rc;
- if (!ap_instructions_available()) {
- pr_warn("The hardware system does not support AP instructions\n");
- return -ENODEV;
+ /* init ap_queue hashtable */
+ hash_init(ap_queues);
+
+ /* create ap msg buffer memory pool */
+ ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items,
+ AP_DEFAULT_MAX_MSG_SIZE);
+ if (!ap_msg_pool) {
+ rc = -ENOMEM;
+ goto out;
}
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
/* Get AP configuration data if available */
- ap_init_configuration();
+ ap_init_qci_info();
- if (ap_configuration)
- max_domain_id =
- ap_max_domain_id ? ap_max_domain_id : AP_DOMAINS - 1;
- else
- max_domain_id = 15;
- if (ap_domain_index < -1 || ap_domain_index > max_domain_id ||
+ /* check default domain setting */
+ if (ap_domain_index < -1 || ap_domain_index > ap_max_domain_id ||
(ap_domain_index >= 0 &&
!test_bit_inv(ap_domain_index, ap_perms.aqm))) {
pr_warn("%d is not a valid cryptographic domain\n",
ap_domain_index);
ap_domain_index = -1;
}
- /* In resume callback we need to know if the user had set the domain.
- * If so, we can not just reset it.
- */
- if (ap_domain_index >= 0)
- user_set_domain = 1;
-
- if (ap_interrupts_available()) {
- rc = register_adapter_interrupt(&ap_airq);
- ap_airq_flag = (rc == 0);
- }
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
if (rc)
goto out;
- for (i = 0; ap_bus_attrs[i]; i++) {
- rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
- if (rc)
- goto out_bus;
- }
/* Create /sys/devices/ap. */
ap_root_device = root_device_register("ap");
rc = PTR_ERR_OR_ZERO(ap_root_device);
if (rc)
goto out_bus;
+ ap_root_device->bus = &ap_bus_type;
- /* Setup the AP bus rescan timer. */
- timer_setup(&ap_config_timer, ap_config_timeout, 0);
-
- /*
- * Setup the high resultion poll timer.
- * If we are running under z/VM adjust polling to z/VM polling rate.
- */
- if (MACHINE_IS_VM)
- poll_timeout = 1500000;
- spin_lock_init(&ap_poll_timer_lock);
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
-
- /* Start the low priority AP bus poll thread. */
- if (ap_thread_flag) {
- rc = ap_poll_thread_start();
- if (rc)
- goto out_work;
- }
-
- rc = register_pm_notifier(&ap_power_notifier);
+ /* enable interrupts if available */
+ rc = ap_irq_init();
if (rc)
- goto out_pm;
+ goto out_device;
- queue_work(system_long_wq, &ap_scan_work);
- initialised = true;
+ /* Setup asynchronous work (timers, workqueue, etc). */
+ rc = ap_async_init();
+ if (rc)
+ goto out_irq;
return 0;
-out_pm:
- ap_poll_thread_stop();
-out_work:
- hrtimer_cancel(&ap_poll_timer);
+out_irq:
+ ap_irq_exit();
+out_device:
root_device_unregister(ap_root_device);
out_bus:
- while (i--)
- bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
bus_unregister(&ap_bus_type);
out:
- if (ap_using_interrupts())
- unregister_adapter_interrupt(&ap_airq);
- kfree(ap_configuration);
+ mempool_destroy(ap_msg_pool);
+ ap_debug_exit();
return rc;
}
-device_initcall(ap_module_init);
+
+static void __exit ap_module_exit(void)
+{
+ ap_async_exit();
+ ap_irq_exit();
+ root_device_unregister(ap_root_device);
+ bus_unregister(&ap_bus_type);
+ mempool_destroy(ap_msg_pool);
+ ap_debug_exit();
+}
+
+module_init(ap_module_init);
+module_exit(ap_module_exit);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index bfc66e4a9de1..51e08f27bd75 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2006, 2012
+ * Copyright IBM Corp. 2006, 2023
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
@@ -15,6 +15,7 @@
#include <linux/device.h>
#include <linux/types.h>
+#include <linux/hashtable.h>
#include <asm/isc.h>
#include <asm/ap.h>
@@ -24,95 +25,100 @@
#define AP_RESET_TIMEOUT (HZ*0.7) /* Time in ticks for reset timeouts. */
#define AP_CONFIG_TIME 30 /* Time in seconds between AP bus rescans. */
#define AP_POLL_TIME 1 /* Time in ticks between receive polls. */
+#define AP_DEFAULT_MAX_MSG_SIZE (12 * 1024)
+#define AP_TAPQ_ML_FIELD_CHUNK_SIZE (4096)
extern int ap_domain_index;
+extern atomic_t ap_max_msg_size;
-extern spinlock_t ap_list_lock;
-extern struct list_head ap_card_list;
+extern DECLARE_HASHTABLE(ap_queues, 8);
+extern spinlock_t ap_queues_lock;
static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
{
return (*ptr & (0x80000000u >> nr)) != 0;
}
-#define AP_RESPONSE_NORMAL 0x00
-#define AP_RESPONSE_Q_NOT_AVAIL 0x01
-#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
-#define AP_RESPONSE_DECONFIGURED 0x03
-#define AP_RESPONSE_CHECKSTOPPED 0x04
-#define AP_RESPONSE_BUSY 0x05
-#define AP_RESPONSE_INVALID_ADDRESS 0x06
-#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
-#define AP_RESPONSE_Q_FULL 0x10
-#define AP_RESPONSE_NO_PENDING_REPLY 0x10
-#define AP_RESPONSE_INDEX_TOO_BIG 0x11
-#define AP_RESPONSE_NO_FIRST_PART 0x13
-#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
-#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
+#define AP_RESPONSE_NORMAL 0x00
+#define AP_RESPONSE_Q_NOT_AVAIL 0x01
+#define AP_RESPONSE_RESET_IN_PROGRESS 0x02
+#define AP_RESPONSE_DECONFIGURED 0x03
+#define AP_RESPONSE_CHECKSTOPPED 0x04
+#define AP_RESPONSE_BUSY 0x05
+#define AP_RESPONSE_INVALID_ADDRESS 0x06
+#define AP_RESPONSE_OTHERWISE_CHANGED 0x07
+#define AP_RESPONSE_INVALID_GISA 0x08
+#define AP_RESPONSE_Q_BOUND_TO_ANOTHER 0x09
+#define AP_RESPONSE_STATE_CHANGE_IN_PROGRESS 0x0A
+#define AP_RESPONSE_Q_NOT_BOUND 0x0B
+#define AP_RESPONSE_Q_FULL 0x10
+#define AP_RESPONSE_NO_PENDING_REPLY 0x10
+#define AP_RESPONSE_INDEX_TOO_BIG 0x11
+#define AP_RESPONSE_NO_FIRST_PART 0x13
+#define AP_RESPONSE_MESSAGE_TOO_BIG 0x15
+#define AP_RESPONSE_REQ_FAC_NOT_INST 0x16
+#define AP_RESPONSE_Q_BIND_ERROR 0x30
+#define AP_RESPONSE_Q_NOT_AVAIL_FOR_ASSOC 0x31
+#define AP_RESPONSE_Q_NOT_EMPTY 0x32
+#define AP_RESPONSE_BIND_LIMIT_EXCEEDED 0x33
+#define AP_RESPONSE_INVALID_ASSOC_SECRET 0x34
+#define AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE 0x35
+#define AP_RESPONSE_ASSOC_FAILED 0x36
+#define AP_RESPONSE_INVALID_DOMAIN 0x42
/*
- * Known device types
+ * Supported AP device types
*/
-#define AP_DEVICE_TYPE_PCICC 3
-#define AP_DEVICE_TYPE_PCICA 4
-#define AP_DEVICE_TYPE_PCIXCC 5
-#define AP_DEVICE_TYPE_CEX2A 6
-#define AP_DEVICE_TYPE_CEX2C 7
-#define AP_DEVICE_TYPE_CEX3A 8
-#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
#define AP_DEVICE_TYPE_CEX6 12
+#define AP_DEVICE_TYPE_CEX7 13
+#define AP_DEVICE_TYPE_CEX8 14
/*
- * Known function facilities
+ * AP queue state machine states
*/
-#define AP_FUNC_MEX4K 1
-#define AP_FUNC_CRT4K 2
-#define AP_FUNC_COPRO 3
-#define AP_FUNC_ACCEL 4
-#define AP_FUNC_EP11 5
-#define AP_FUNC_APXA 6
-
-/*
- * AP interrupt states
- */
-#define AP_INTR_DISABLED 0 /* AP interrupt disabled */
-#define AP_INTR_ENABLED 1 /* AP interrupt enabled */
+enum ap_sm_state {
+ AP_SM_STATE_RESET_START = 0,
+ AP_SM_STATE_RESET_WAIT,
+ AP_SM_STATE_SETIRQ_WAIT,
+ AP_SM_STATE_IDLE,
+ AP_SM_STATE_WORKING,
+ AP_SM_STATE_QUEUE_FULL,
+ AP_SM_STATE_ASSOC_WAIT,
+ NR_AP_SM_STATES
+};
/*
- * AP device states
+ * AP queue state machine events
*/
-enum ap_state {
- AP_STATE_RESET_START,
- AP_STATE_RESET_WAIT,
- AP_STATE_SETIRQ_WAIT,
- AP_STATE_IDLE,
- AP_STATE_WORKING,
- AP_STATE_QUEUE_FULL,
- AP_STATE_SUSPEND_WAIT,
- AP_STATE_BORKED,
- NR_AP_STATES
+enum ap_sm_event {
+ AP_SM_EVENT_POLL,
+ AP_SM_EVENT_TIMEOUT,
+ NR_AP_SM_EVENTS
};
/*
- * AP device events
+ * AP queue state wait behaviour
*/
-enum ap_event {
- AP_EVENT_POLL,
- AP_EVENT_TIMEOUT,
- NR_AP_EVENTS
+enum ap_sm_wait {
+ AP_SM_WAIT_AGAIN = 0, /* retry immediately */
+ AP_SM_WAIT_HIGH_TIMEOUT, /* poll high freq, wait for timeout */
+ AP_SM_WAIT_LOW_TIMEOUT, /* poll low freq, wait for timeout */
+ AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
+ AP_SM_WAIT_NONE, /* no wait */
+ NR_AP_SM_WAIT
};
/*
- * AP wait behaviour
+ * AP queue device states
*/
-enum ap_wait {
- AP_WAIT_AGAIN, /* retry immediately */
- AP_WAIT_TIMEOUT, /* wait for timeout */
- AP_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
- AP_WAIT_NONE, /* no wait */
- NR_AP_WAIT
+enum ap_dev_state {
+ AP_DEV_STATE_UNINITIATED = 0, /* fresh and virgin, not touched */
+ AP_DEV_STATE_OPERATING, /* queue dev is working normal */
+ AP_DEV_STATE_SHUTDOWN, /* remove/unbind/shutdown in progress */
+ AP_DEV_STATE_ERROR, /* device is in error state */
+ NR_AP_DEV_STATES
};
struct ap_device;
@@ -133,139 +139,161 @@ struct ap_driver {
int (*probe)(struct ap_device *);
void (*remove)(struct ap_device *);
- void (*suspend)(struct ap_device *);
- void (*resume)(struct ap_device *);
+ int (*in_use)(unsigned long *apm, unsigned long *aqm);
+ /*
+ * Called at the start of the ap bus scan function when
+ * the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
+ */
+ void (*on_config_changed)(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
+ /*
+ * Called at the end of the ap bus scan function when
+ * the crypto config information (qci) has changed.
+ * This callback is not invoked if there is no AP
+ * QCI support available.
+ */
+ void (*on_scan_complete)(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
};
-#define to_ap_drv(x) container_of((x), struct ap_driver, driver)
+#define to_ap_drv(x) container_of_const((x), struct ap_driver, driver)
int ap_driver_register(struct ap_driver *, struct module *, char *);
void ap_driver_unregister(struct ap_driver *);
struct ap_device {
struct device device;
- struct ap_driver *drv; /* Pointer to AP device driver. */
int device_type; /* AP device type. */
+ const char *driver_override;
};
#define to_ap_dev(x) container_of((x), struct ap_device, device)
struct ap_card {
struct ap_device ap_dev;
- struct list_head list; /* Private list of AP cards. */
- struct list_head queues; /* List of assoc. AP queues */
- void *private; /* ap driver private pointer. */
- int raw_hwtype; /* AP raw hardware type. */
- unsigned int functions; /* AP device function bitfield. */
- int queue_depth; /* AP queue depth.*/
+ struct ap_tapq_hwinfo hwinfo; /* TAPQ GR2 content */
int id; /* AP card number. */
- atomic_t total_request_count; /* # requests ever for this AP device.*/
+ unsigned int maxmsgsize; /* AP msg limit for this card */
+ bool config; /* configured state */
+ bool chkstop; /* checkstop state */
+ atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
+#define TAPQ_CARD_HWINFO_MASK 0xFFFF0000FFFF0F0FUL
+#define ASSOC_IDX_INVALID 0x10000
+
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
struct ap_queue {
struct ap_device ap_dev;
- struct list_head list; /* Private list of AP queues. */
+ struct hlist_node hnode; /* Node for the ap_queues hashtable */
struct ap_card *card; /* Ptr to assoc. AP card. */
spinlock_t lock; /* Per device lock. */
- void *private; /* ap driver private pointer. */
+ enum ap_dev_state dev_state; /* queue device state */
+ bool config; /* configured state */
+ bool chkstop; /* checkstop state */
ap_qid_t qid; /* AP queue id. */
- int interrupt; /* indicate if interrupts are enabled */
+ unsigned int se_bstate; /* SE bind state (BS) */
+ unsigned int assoc_idx; /* SE association index */
int queue_count; /* # messages currently on AP queue. */
- enum ap_state state; /* State of the AP device. */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
- int total_request_count; /* # requests ever for this AP device.*/
+ u64 total_request_count; /* # requests ever for this AP device.*/
int request_timeout; /* Request timeout in jiffies. */
struct timer_list timeout; /* Timer for request timeouts. */
struct list_head pendingq; /* List of message sent to AP queue. */
struct list_head requestq; /* List of message yet to be sent. */
struct ap_message *reply; /* Per device reply message. */
+ enum ap_sm_state sm_state; /* ap queue state machine state */
+ int rapq_fbit; /* fbit arg for next rapq invocation */
+ int last_err_rc; /* last error state response code */
};
#define to_ap_queue(x) container_of((x), struct ap_queue, ap_dev.device)
-typedef enum ap_wait (ap_func_t)(struct ap_queue *queue);
+typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue);
+
+struct ap_response_type {
+ struct completion work;
+ int type;
+};
struct ap_message {
struct list_head list; /* Request queueing. */
- unsigned long long psmid; /* Message id. */
- void *message; /* Pointer to message buffer. */
- size_t length; /* Message length. */
+ unsigned long psmid; /* Message id. */
+ void *msg; /* Pointer to message buffer. */
+ size_t len; /* actual msg len in msg buffer */
+ size_t bufsize; /* allocated msg buffer size */
+ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */
int rc; /* Return code for this message */
-
- void *private; /* ap driver private pointer. */
- unsigned int special:1; /* Used for special commands. */
+ struct ap_response_type response;
/* receive is called from tasklet context */
void (*receive)(struct ap_queue *, struct ap_message *,
struct ap_message *);
};
-/**
- * ap_init_message() - Initialize ap_message.
- * Initialize a message before using. Otherwise this might result in
- * unexpected behaviour.
- */
-static inline void ap_init_message(struct ap_message *ap_msg)
-{
- memset(ap_msg, 0, sizeof(*ap_msg));
-}
-
-/**
- * ap_release_message() - Release ap_message.
- * Releases all memory used internal within the ap_message struct
- * Currently this is the message and private field.
- */
-static inline void ap_release_message(struct ap_message *ap_msg)
-{
- kzfree(ap_msg->message);
- kzfree(ap_msg->private);
-}
-
-#define for_each_ap_card(_ac) \
- list_for_each_entry(_ac, &ap_card_list, list)
+#define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */
+#define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */
+#define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */
+#define AP_MSG_FLAG_MEMPOOL 0x0008 /* ap msg buffer allocated via mempool */
-#define for_each_ap_queue(_aq, _ac) \
- list_for_each_entry(_aq, &(_ac)->queues, list)
-
-/*
- * Note: don't use ap_send/ap_recv after using ap_queue_message
- * for the first time. Otherwise the ap message queue will get
- * confused.
- */
-int ap_send(ap_qid_t, unsigned long long, void *, size_t);
-int ap_recv(ap_qid_t, unsigned long long *, void *, size_t);
+int ap_init_apmsg(struct ap_message *ap_msg, u32 flags);
+void ap_release_apmsg(struct ap_message *ap_msg);
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event);
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event);
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event);
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event);
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg);
void ap_flush_queue(struct ap_queue *aq);
+bool ap_queue_usable(struct ap_queue *aq);
void *ap_airq_ptr(void);
-void ap_wait(enum ap_wait wait);
+int ap_sb_available(void);
+bool ap_is_se_guest(void);
+void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
-void ap_bus_force_rescan(void);
+bool ap_bus_force_rescan(void);
+
+int ap_test_config_usage_domain(unsigned int domain);
+int ap_test_config_ctrl_domain(unsigned int domain);
void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
-struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac);
+void ap_queue_prepare_remove(struct ap_queue *aq);
void ap_queue_remove(struct ap_queue *aq);
-void ap_queue_suspend(struct ap_device *ap_dev);
-void ap_queue_resume(struct ap_device *ap_dev);
-void ap_queue_reinit_state(struct ap_queue *aq);
+void ap_queue_init_state(struct ap_queue *aq);
+void _ap_queue_init_state(struct ap_queue *aq);
+
+struct ap_card *ap_card_create(int id, struct ap_tapq_hwinfo info,
+ int comp_type);
-struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
- int comp_device_type, unsigned int functions);
+#define APMASKSIZE (BITS_TO_LONGS(AP_DEVICES) * sizeof(unsigned long))
+#define AQMASKSIZE (BITS_TO_LONGS(AP_DOMAINS) * sizeof(unsigned long))
struct ap_perms {
unsigned long ioctlm[BITS_TO_LONGS(AP_IOCTLS)];
unsigned long apm[BITS_TO_LONGS(AP_DEVICES)];
unsigned long aqm[BITS_TO_LONGS(AP_DOMAINS)];
+ unsigned long adm[BITS_TO_LONGS(AP_DOMAINS)];
};
+
extern struct ap_perms ap_perms;
-extern struct mutex ap_perms_mutex;
+extern bool ap_apmask_aqmask_in_use;
+extern int ap_driver_override_ctr;
+extern struct mutex ap_attr_mutex;
+
+/*
+ * Get ap_queue device for this qid.
+ * Returns ptr to the struct ap_queue device or NULL if there
+ * was no ap_queue device with this qid found. When something is
+ * found, the reference count of the embedded device is increased.
+ * So the caller has to decrease the reference count after use
+ * with a call to put_device(&aq->ap_dev.device).
+ */
+struct ap_queue *ap_get_qdev(ap_qid_t qid);
/*
* check APQN for owned/reserved by ap bus and default driver(s).
@@ -298,7 +326,7 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
* like "+1-16,-32,-0x40,+128" where only single bits or ranges of
* bits are cleared or set. Distinction is done based on the very
* first character which may be '+' or '-' for the relative string
- * and othewise assume to be an absolute value string. If parsing fails
+ * and otherwise assume to be an absolute value string. If parsing fails
* a negative errno value is returned. All arguments and bitmaps are
* big endian order.
*/
@@ -306,4 +334,45 @@ int ap_parse_mask_str(const char *str,
unsigned long *bitmap, int bits,
struct mutex *lock);
+/*
+ * ap_hex2bitmap() - Convert a string containing a hexadecimal number (str)
+ * into a bitmap (bitmap) with bits set that correspond to the bits represented
+ * by the hex string. Input and output data is in big endian order.
+ *
+ * str - Input hex string of format "0x1234abcd". The leading "0x" is optional.
+ * At least one digit is required. Must be large enough to hold the number of
+ * bits represented by the bits parameter.
+ *
+ * bitmap - Pointer to a bitmap. Upon successful completion of this function,
+ * this bitmap will have bits set to match the value of str. If bitmap is longer
+ * than str, then the rightmost bits of bitmap are padded with zeros. Must be
+ * large enough to hold the number of bits represented by the bits parameter.
+ *
+ * bits - Length, in bits, of the bitmap represented by str. Must be a multiple
+ * of 8.
+ *
+ * Returns: 0 On success
+ * -EINVAL If str format is invalid or bits is not a multiple of 8.
+ */
+int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits);
+
+/*
+ * Interface to wait for the AP bus to have done one initial ap bus
+ * scan and all detected APQNs have been bound to device drivers.
+ * If these both conditions are not fulfilled, this function blocks
+ * on a condition with wait_for_completion_killable_timeout().
+ * If these both conditions are fulfilled (before the timeout hits)
+ * the return value is 0. If the timeout (in jiffies) hits instead
+ * -ETIME is returned. On failures negative return values are
+ * returned to the caller.
+ * It may be that the AP bus scan finds new devices. Then the
+ * condition that all APQNs are bound to their device drivers
+ * is reset to false and this call again blocks until either all
+ * APQNs are bound to a device driver or the timeout hits again.
+ */
+int ap_wait_apqn_bindings_complete(unsigned long timeout);
+
+void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg);
+void ap_send_online_uevent(struct ap_device *ap_dev, int online);
+
#endif /* _AP_BUS_H_ */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 63b4cc6cd7e5..8102c8134c49 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -6,12 +6,12 @@
* Adjunct processor bus, card related code.
*/
-#define KMSG_COMPONENT "ap"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "ap: " fmt
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
+#include <asm/sclp.h>
#include "ap_bus.h"
@@ -23,7 +23,7 @@ static ssize_t hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->ap_dev.device_type);
+ return sysfs_emit(buf, "%d\n", ac->ap_dev.device_type);
}
static DEVICE_ATTR_RO(hwtype);
@@ -33,7 +33,7 @@ static ssize_t raw_hwtype_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->raw_hwtype);
+ return sysfs_emit(buf, "%d\n", ac->hwinfo.at);
}
static DEVICE_ATTR_RO(raw_hwtype);
@@ -43,7 +43,7 @@ static ssize_t depth_show(struct device *dev, struct device_attribute *attr,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", ac->queue_depth);
+ return sysfs_emit(buf, "%d\n", ac->hwinfo.qd);
}
static DEVICE_ATTR_RO(depth);
@@ -53,7 +53,7 @@ static ssize_t ap_functions_show(struct device *dev,
{
struct ap_card *ac = to_ap_card(dev);
- return snprintf(buf, PAGE_SIZE, "0x%08X\n", ac->functions);
+ return sysfs_emit(buf, "0x%08X\n", ac->hwinfo.fac);
}
static DEVICE_ATTR_RO(ap_functions);
@@ -63,27 +63,29 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
- unsigned int req_cnt;
+ u64 req_cnt;
req_cnt = 0;
- spin_lock_bh(&ap_list_lock);
- req_cnt = atomic_read(&ac->total_request_count);
- spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+ spin_lock_bh(&ap_queues_lock);
+ req_cnt = atomic64_read(&ac->total_request_count);
+ spin_unlock_bh(&ap_queues_lock);
+ return sysfs_emit(buf, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
+ struct ap_card *ac = to_ap_card(dev);
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- aq->total_request_count = 0;
- spin_unlock_bh(&ap_list_lock);
- atomic_set(&ac->total_request_count, 0);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ aq->total_request_count = 0;
+ spin_unlock_bh(&ap_queues_lock);
+ atomic64_set(&ac->total_request_count, 0);
return count;
}
@@ -93,16 +95,18 @@ static DEVICE_ATTR_RW(request_count);
static ssize_t requestq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
unsigned int reqq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
reqq_cnt = 0;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- reqq_cnt += aq->requestq_count;
- spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ reqq_cnt += aq->requestq_count;
+ spin_unlock_bh(&ap_queues_lock);
+ return sysfs_emit(buf, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -110,16 +114,18 @@ static DEVICE_ATTR_RO(requestq_count);
static ssize_t pendingq_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct ap_card *ac = to_ap_card(dev);
+ int bkt;
struct ap_queue *aq;
unsigned int penq_cnt;
+ struct ap_card *ac = to_ap_card(dev);
penq_cnt = 0;
- spin_lock_bh(&ap_list_lock);
- for_each_ap_queue(aq, ac)
- penq_cnt += aq->pendingq_count;
- spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ spin_lock_bh(&ap_queues_lock);
+ hash_for_each(ap_queues, bkt, aq, hnode)
+ if (ac == aq->card)
+ penq_cnt += aq->pendingq_count;
+ spin_unlock_bh(&ap_queues_lock);
+ return sysfs_emit(buf, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -127,11 +133,65 @@ static DEVICE_ATTR_RO(pendingq_count);
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
+ return sysfs_emit(buf, "ap:t%02X\n", to_ap_dev(dev)->device_type);
}
static DEVICE_ATTR_RO(modalias);
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return sysfs_emit(buf, "%d\n", ac->config ? 1 : 0);
+}
+
+static ssize_t config_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc = 0, cfg;
+ struct ap_card *ac = to_ap_card(dev);
+
+ if (sscanf(buf, "%d\n", &cfg) != 1 || cfg < 0 || cfg > 1)
+ return -EINVAL;
+
+ if (cfg && !ac->config)
+ rc = sclp_ap_configure(ac->id);
+ else if (!cfg && ac->config)
+ rc = sclp_ap_deconfigure(ac->id);
+ if (rc)
+ return rc;
+
+ ac->config = cfg ? true : false;
+
+ ap_send_config_uevent(&ac->ap_dev, ac->config);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(config);
+
+static ssize_t chkstop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return sysfs_emit(buf, "%d\n", ac->chkstop ? 1 : 0);
+}
+
+static DEVICE_ATTR_RO(chkstop);
+
+static ssize_t max_msg_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+
+ return sysfs_emit(buf, "%u\n", ac->maxmsgsize);
+}
+
+static DEVICE_ATTR_RO(max_msg_size);
+
static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_hwtype.attr,
&dev_attr_raw_hwtype.attr,
@@ -141,6 +201,9 @@ static struct attribute *ap_card_dev_attrs[] = {
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_modalias.attr,
+ &dev_attr_config.attr,
+ &dev_attr_chkstop.attr,
+ &dev_attr_max_msg_size.attr,
NULL
};
@@ -162,30 +225,24 @@ static void ap_card_device_release(struct device *dev)
{
struct ap_card *ac = to_ap_card(dev);
- if (!list_empty(&ac->list)) {
- spin_lock_bh(&ap_list_lock);
- list_del_init(&ac->list);
- spin_unlock_bh(&ap_list_lock);
- }
kfree(ac);
}
-struct ap_card *ap_card_create(int id, int queue_depth, int raw_type,
- int comp_type, unsigned int functions)
+struct ap_card *ap_card_create(int id, struct ap_tapq_hwinfo hwinfo,
+ int comp_type)
{
struct ap_card *ac;
ac = kzalloc(sizeof(*ac), GFP_KERNEL);
if (!ac)
return NULL;
- INIT_LIST_HEAD(&ac->list);
- INIT_LIST_HEAD(&ac->queues);
ac->ap_dev.device.release = ap_card_device_release;
ac->ap_dev.device.type = &ap_card_type;
ac->ap_dev.device_type = comp_type;
- ac->raw_hwtype = raw_type;
- ac->queue_depth = queue_depth;
- ac->functions = functions;
+ ac->hwinfo = hwinfo;
ac->id = id;
+ ac->maxmsgsize = hwinfo.ml > 0 ?
+ hwinfo.ml * AP_TAPQ_ML_FIELD_CHUNK_SIZE : AP_DEFAULT_MAX_MSG_SIZE;
+
return ac;
}
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index dc675eb5aef6..2f66271b8564 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -16,10 +16,16 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 5
+#define AP_DBF_MAX_SPRINTF_ARGS 6
#define AP_DBF(...) \
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
+#define AP_DBF_ERR(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define AP_DBF_WARN(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define AP_DBF_INFO(...) \
+ debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
extern debug_info_t *ap_dbf_info;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 576ac08777c5..4a32c1e19a1e 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -1,40 +1,72 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright IBM Corp. 2016
+ * Copyright IBM Corp. 2016, 2023
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* Adjunct processor bus, queue related code.
*/
-#define KMSG_COMPONENT "ap"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "ap: " fmt
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/facility.h>
+#define CREATE_TRACE_POINTS
+#include <asm/trace/ap.h>
+
#include "ap_bus.h"
#include "ap_debug.h"
+EXPORT_TRACEPOINT_SYMBOL(s390_ap_nqap);
+EXPORT_TRACEPOINT_SYMBOL(s390_ap_dqap);
+
static void __ap_flush_queue(struct ap_queue *aq);
+/*
+ * some AP queue helper functions
+ */
+
+static inline bool ap_q_supported_in_se(struct ap_queue *aq)
+{
+ return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
+}
+
+static inline bool ap_q_supports_bind(struct ap_queue *aq)
+{
+ return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel;
+}
+
+static inline bool ap_q_supports_assoc(struct ap_queue *aq)
+{
+ return aq->card->hwinfo.ep11;
+}
+
+static inline bool ap_q_needs_bind(struct ap_queue *aq)
+{
+ return ap_q_supports_bind(aq) && ap_sb_available();
+}
+
/**
- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
- * @qid: The AP queue number
+ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
+ * @aq: The AP queue
* @ind: the notification indicator byte
*
* Enables interruption on AP queue via ap_aqic(). Based on the return
* value it waits a while and tests the AP queue if interrupts
* have been switched on using ap_test_queue().
*/
-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
+static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
{
+ union ap_qirq_ctrl qirqctrl = { .value = 0 };
struct ap_queue_status status;
- struct ap_qirq_ctrl qirqctrl = { 0 };
qirqctrl.ir = 1;
qirqctrl.isc = AP_ISC;
- status = ap_aqic(aq->qid, qirqctrl, ind);
+ status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
+ if (status.async)
+ return -EPERM;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_OTHERWISE_CHANGED:
@@ -59,7 +91,7 @@ static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
* @qid: The AP queue number
* @psmid: The program supplied message identifier
* @msg: The message text
- * @length: The message length
+ * @msglen: The message length
* @special: Special Bit
*
* Returns AP queue status structure.
@@ -68,60 +100,27 @@ static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
* because a segment boundary was reached. The NQAP is repeated.
*/
static inline struct ap_queue_status
-__ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length,
- unsigned int special)
-{
- if (special == 1)
- qid |= 0x400000UL;
- return ap_nqap(qid, psmid, msg, length);
-}
-
-int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
+__ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
+ int special)
{
struct ap_queue_status status;
- status = __ap_send(qid, psmid, msg, length, 0);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_Q_FULL:
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- case AP_RESPONSE_REQ_FAC_NOT_INST:
- return -EINVAL;
- default: /* Device is gone. */
- return -ENODEV;
- }
-}
-EXPORT_SYMBOL(ap_send);
+ if (special)
+ qid |= 0x400000UL;
-int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
-{
- struct ap_queue_status status;
+ status = ap_nqap(qid, psmid, msg, msglen);
- if (msg == NULL)
- return -EINVAL;
- status = ap_dqap(qid, psmid, msg, length);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_NO_PENDING_REPLY:
- if (status.queue_empty)
- return -ENOENT;
- return -EBUSY;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- return -EBUSY;
- default:
- return -ENODEV;
- }
+ trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ status.value, psmid);
+
+ return status;
}
-EXPORT_SYMBOL(ap_recv);
/* State machine definitions and helpers */
-static enum ap_wait ap_sm_nop(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
{
- return AP_WAIT_NONE;
+ return AP_SM_WAIT_NONE;
}
/**
@@ -129,18 +128,41 @@ static enum ap_wait ap_sm_nop(struct ap_queue *aq)
* not change the state of the device.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
+ bool found = false;
+ size_t reslen;
+ unsigned long resgr0 = 0;
+ int parts = 0;
+
+ /*
+ * DQAP loop until response code and resgr0 indicate that
+ * the msg is totally received. As we use the very same buffer
+ * the msg is overwritten with each invocation. That's intended
+ * and the receiver of the msg is informed with a msg rc code
+ * of EMSGSIZE in such a case.
+ */
+ do {
+ status = ap_dqap(aq->qid, &aq->reply->psmid,
+ aq->reply->msg, aq->reply->bufsize,
+ &aq->reply->len, &reslen, &resgr0);
+ parts++;
+ } while (status.response_code == 0xFF && resgr0 != 0);
+
+ trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
+ status.value, aq->reply->psmid);
- status = ap_dqap(aq->qid, &aq->reply->psmid,
- aq->reply->message, aq->reply->length);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count--;
+ print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ aq->reply->msg, aq->reply->len, false);
+ aq->queue_count = max_t(int, 0, aq->queue_count - 1);
+ if (!status.queue_empty && !aq->queue_count)
+ aq->queue_count++;
if (aq->queue_count > 0)
mod_timer(&aq->timeout,
jiffies + aq->request_timeout);
@@ -149,9 +171,21 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
continue;
list_del_init(&ap_msg->list);
aq->pendingq_count--;
- ap_msg->receive(aq, ap_msg, aq->reply);
+ if (parts > 1) {
+ ap_msg->rc = -EMSGSIZE;
+ ap_msg->receive(aq, ap_msg, NULL);
+ } else {
+ ap_msg->receive(aq, ap_msg, aq->reply);
+ }
+ found = true;
break;
}
+ if (!found) {
+ AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
+ __func__, aq->reply->psmid,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ }
+ fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
@@ -159,6 +193,9 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
+ pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
+ aq->pendingq_count, aq->requestq_count);
aq->pendingq_count = 0;
break;
default:
@@ -171,105 +208,112 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
* ap_sm_read(): Receive pending reply messages from an AP queue.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
-static enum ap_wait ap_sm_read(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
{
struct ap_queue_status status;
if (!aq->reply)
- return AP_WAIT_NONE;
+ return AP_SM_WAIT_NONE;
status = ap_sm_recv(aq);
+ if (status.async)
+ return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0) {
- aq->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
+ aq->sm_state = AP_SM_STATE_WORKING;
+ return AP_SM_WAIT_AGAIN;
}
- aq->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
+ aq->sm_state = AP_SM_STATE_IDLE;
+ break;
case AP_RESPONSE_NO_PENDING_REPLY:
if (aq->queue_count > 0)
- return AP_WAIT_INTERRUPT;
- aq->state = AP_STATE_IDLE;
- return AP_WAIT_NONE;
+ return status.irq_enabled ?
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
+ aq->sm_state = AP_SM_STATE_IDLE;
+ break;
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
}
-}
-
-/**
- * ap_sm_suspend_read(): Receive pending reply messages from an AP queue
- * without changing the device state in between. In suspend mode we don't
- * allow sending new requests, therefore just fetch pending replies.
- * @aq: pointer to the AP queue
- *
- * Returns AP_WAIT_NONE or AP_WAIT_AGAIN
- */
-static enum ap_wait ap_sm_suspend_read(struct ap_queue *aq)
-{
- struct ap_queue_status status;
+ /* Check and maybe enable irq support (again) on this queue */
+ if (!status.irq_enabled && status.queue_empty) {
+ void *lsi_ptr = ap_airq_ptr();
- if (!aq->reply)
- return AP_WAIT_NONE;
- status = ap_sm_recv(aq);
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- if (aq->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fall through */
- default:
- return AP_WAIT_NONE;
+ if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
+ aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
+ return AP_SM_WAIT_AGAIN;
+ }
}
+ return AP_SM_WAIT_NONE;
}
/**
* ap_sm_write(): Send messages from the request queue to an AP queue.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
-static enum ap_wait ap_sm_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
{
struct ap_queue_status status;
struct ap_message *ap_msg;
+ ap_qid_t qid = aq->qid;
if (aq->requestq_count <= 0)
- return AP_WAIT_NONE;
+ return AP_SM_WAIT_NONE;
+
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
- status = __ap_send(aq->qid, ap_msg->psmid,
- ap_msg->message, ap_msg->length, ap_msg->special);
+ print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg->msg, ap_msg->len, false);
+ status = __ap_send(qid, ap_msg->psmid,
+ ap_msg->msg, ap_msg->len,
+ ap_msg->flags & AP_MSG_FLAG_SPECIAL);
+ if (status.async)
+ return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- aq->queue_count++;
+ aq->queue_count = max_t(int, 1, aq->queue_count + 1);
if (aq->queue_count == 1)
mod_timer(&aq->timeout, jiffies + aq->request_timeout);
list_move_tail(&ap_msg->list, &aq->pendingq);
aq->requestq_count--;
aq->pendingq_count++;
- if (aq->queue_count < aq->card->queue_depth) {
- aq->state = AP_STATE_WORKING;
- return AP_WAIT_AGAIN;
+ if (aq->queue_count < aq->card->hwinfo.qd) {
+ aq->sm_state = AP_SM_STATE_WORKING;
+ return AP_SM_WAIT_AGAIN;
}
- /* fall through */
+ fallthrough;
case AP_RESPONSE_Q_FULL:
- aq->state = AP_STATE_QUEUE_FULL;
- return AP_WAIT_INTERRUPT;
+ aq->sm_state = AP_SM_STATE_QUEUE_FULL;
+ return status.irq_enabled ?
+ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
case AP_RESPONSE_RESET_IN_PROGRESS:
- aq->state = AP_STATE_RESET_WAIT;
- return AP_WAIT_TIMEOUT;
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ return AP_SM_WAIT_LOW_TIMEOUT;
+ case AP_RESPONSE_INVALID_DOMAIN:
+ AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
+ fallthrough;
case AP_RESPONSE_MESSAGE_TOO_BIG:
case AP_RESPONSE_REQ_FAC_NOT_INST:
list_del_init(&ap_msg->list);
aq->requestq_count--;
ap_msg->rc = -EINVAL;
ap_msg->receive(aq, ap_msg, NULL);
- return AP_WAIT_AGAIN;
+ return AP_SM_WAIT_AGAIN;
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
}
}
@@ -277,38 +321,39 @@ static enum ap_wait ap_sm_write(struct ap_queue *aq)
* ap_sm_read_write(): Send and receive messages to/from an AP queue.
* @aq: pointer to the AP queue
*
- * Returns AP_WAIT_NONE, AP_WAIT_AGAIN, or AP_WAIT_INTERRUPT
+ * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
*/
-static enum ap_wait ap_sm_read_write(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
{
return min(ap_sm_read(aq), ap_sm_write(aq));
}
/**
* ap_sm_reset(): Reset an AP queue.
- * @qid: The AP queue number
+ * @aq: The AP queue
*
* Submit the Reset command to an AP queue.
*/
-static enum ap_wait ap_sm_reset(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
{
struct ap_queue_status status;
- status = ap_rapq(aq->qid);
+ status = ap_rapq(aq->qid, aq->rapq_fbit);
+ if (status.async)
+ return AP_SM_WAIT_NONE;
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_RESET_IN_PROGRESS:
- aq->state = AP_STATE_RESET_WAIT;
- aq->interrupt = AP_INTR_DISABLED;
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_BUSY:
- return AP_WAIT_TIMEOUT;
- case AP_RESPONSE_Q_NOT_AVAIL:
- case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
+ aq->sm_state = AP_SM_STATE_RESET_WAIT;
+ aq->rapq_fbit = 0;
+ return AP_SM_WAIT_LOW_TIMEOUT;
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
}
}
@@ -318,36 +363,38 @@ static enum ap_wait ap_sm_reset(struct ap_queue *aq)
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
-static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
void *lsi_ptr;
- if (aq->queue_count > 0 && aq->reply)
- /* Try to read a completed message and get the status */
- status = ap_sm_recv(aq);
- else
- /* Get the status with TAPQ */
- status = ap_tapq(aq->qid, NULL);
+ /* Get the status with TAPQ */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ aq->se_bstate = hwinfo.bs;
lsi_ptr = ap_airq_ptr();
- if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
- aq->state = AP_STATE_SETIRQ_WAIT;
+ if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
+ aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
else
- aq->state = (aq->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
- return AP_WAIT_AGAIN;
+ aq->sm_state = (aq->queue_count > 0) ?
+ AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
+ return AP_SM_WAIT_AGAIN;
case AP_RESPONSE_BUSY:
case AP_RESPONSE_RESET_IN_PROGRESS:
- return AP_WAIT_TIMEOUT;
+ return AP_SM_WAIT_LOW_TIMEOUT;
case AP_RESPONSE_Q_NOT_AVAIL:
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
}
}
@@ -357,7 +404,7 @@ static enum ap_wait ap_sm_reset_wait(struct ap_queue *aq)
*
* Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
*/
-static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
+static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
{
struct ap_queue_status status;
@@ -370,99 +417,136 @@ static enum ap_wait ap_sm_setirq_wait(struct ap_queue *aq)
if (status.irq_enabled == 1) {
/* Irqs are now enabled */
- aq->interrupt = AP_INTR_ENABLED;
- aq->state = (aq->queue_count > 0) ?
- AP_STATE_WORKING : AP_STATE_IDLE;
+ aq->sm_state = (aq->queue_count > 0) ?
+ AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
}
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
if (aq->queue_count > 0)
- return AP_WAIT_AGAIN;
- /* fallthrough */
+ return AP_SM_WAIT_AGAIN;
+ fallthrough;
case AP_RESPONSE_NO_PENDING_REPLY:
- return AP_WAIT_TIMEOUT;
+ return AP_SM_WAIT_LOW_TIMEOUT;
default:
- aq->state = AP_STATE_BORKED;
- return AP_WAIT_NONE;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+}
+
+/**
+ * ap_sm_assoc_wait(): Test queue for completion of a pending
+ * association request.
+ * @aq: pointer to the AP queue
+ */
+static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
+{
+ struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
+
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ /* handle asynchronous error on this queue */
+ if (status.async && status.response_code) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
+ }
+
+ /* update queue's SE bind state */
+ aq->se_bstate = hwinfo.bs;
+
+ /* check bs bits */
+ switch (hwinfo.bs) {
+ case AP_BS_Q_USABLE:
+ /* association is through */
+ aq->sm_state = AP_SM_STATE_IDLE;
+ pr_debug("queue 0x%02x.%04x associated with %u\n",
+ AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid), aq->assoc_idx);
+ return AP_SM_WAIT_NONE;
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ /* association still pending */
+ return AP_SM_WAIT_LOW_TIMEOUT;
+ default:
+ /* reset from 'outside' happened or no idea at all */
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ aq->dev_state = AP_DEV_STATE_ERROR;
+ aq->last_err_rc = status.response_code;
+ AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
+ __func__, hwinfo.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return AP_SM_WAIT_NONE;
}
}
/*
* AP state machine jump table
*/
-static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
- [AP_STATE_RESET_START] = {
- [AP_EVENT_POLL] = ap_sm_reset,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
+ [AP_SM_STATE_RESET_START] = {
+ [AP_SM_EVENT_POLL] = ap_sm_reset,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_RESET_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_reset_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_RESET_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_SETIRQ_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_setirq_wait,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_SETIRQ_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_IDLE] = {
- [AP_EVENT_POLL] = ap_sm_write,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_IDLE] = {
+ [AP_SM_EVENT_POLL] = ap_sm_write,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
},
- [AP_STATE_WORKING] = {
- [AP_EVENT_POLL] = ap_sm_read_write,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ [AP_SM_STATE_WORKING] = {
+ [AP_SM_EVENT_POLL] = ap_sm_read_write,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
- [AP_STATE_QUEUE_FULL] = {
- [AP_EVENT_POLL] = ap_sm_read,
- [AP_EVENT_TIMEOUT] = ap_sm_reset,
+ [AP_SM_STATE_QUEUE_FULL] = {
+ [AP_SM_EVENT_POLL] = ap_sm_read,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
- [AP_STATE_SUSPEND_WAIT] = {
- [AP_EVENT_POLL] = ap_sm_suspend_read,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
- },
- [AP_STATE_BORKED] = {
- [AP_EVENT_POLL] = ap_sm_nop,
- [AP_EVENT_TIMEOUT] = ap_sm_nop,
+ [AP_SM_STATE_ASSOC_WAIT] = {
+ [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
+ [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
},
};
-enum ap_wait ap_sm_event(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
{
- return ap_jumptable[aq->state][event](aq);
+ if (aq->config && !aq->chkstop &&
+ aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ return ap_jumptable[aq->sm_state][event](aq);
+ else
+ return AP_SM_WAIT_NONE;
}
-enum ap_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_event event)
+enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
{
- enum ap_wait wait;
+ enum ap_sm_wait wait;
- while ((wait = ap_sm_event(aq, event)) == AP_WAIT_AGAIN)
+ while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
;
return wait;
}
/*
- * Power management for queue devices
- */
-void ap_queue_suspend(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
-
- /* Poll on the device until all requests are finished. */
- spin_lock_bh(&aq->lock);
- aq->state = AP_STATE_SUSPEND_WAIT;
- while (ap_sm_event(aq, AP_EVENT_POLL) != AP_WAIT_NONE)
- ;
- aq->state = AP_STATE_BORKED;
- spin_unlock_bh(&aq->lock);
-}
-EXPORT_SYMBOL(ap_queue_suspend);
-
-void ap_queue_resume(struct ap_device *ap_dev)
-{
-}
-EXPORT_SYMBOL(ap_queue_resume);
-
-/*
* AP queue related attributes.
*/
static ssize_t request_count_show(struct device *dev,
@@ -470,12 +554,20 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
- unsigned int req_cnt;
+ bool valid = false;
+ u64 req_cnt;
spin_lock_bh(&aq->lock);
- req_cnt = aq->total_request_count;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
+ req_cnt = aq->total_request_count;
+ valid = true;
+ }
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+
+ if (valid)
+ return sysfs_emit(buf, "%llu\n", req_cnt);
+ else
+ return sysfs_emit(buf, "-\n");
}
static ssize_t request_count_store(struct device *dev,
@@ -500,9 +592,10 @@ static ssize_t requestq_count_show(struct device *dev,
unsigned int reqq_cnt = 0;
spin_lock_bh(&aq->lock);
- reqq_cnt = aq->requestq_count;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ reqq_cnt = aq->requestq_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", reqq_cnt);
+ return sysfs_emit(buf, "%d\n", reqq_cnt);
}
static DEVICE_ATTR_RO(requestq_count);
@@ -514,9 +607,10 @@ static ssize_t pendingq_count_show(struct device *dev,
unsigned int penq_cnt = 0;
spin_lock_bh(&aq->lock);
- penq_cnt = aq->pendingq_count;
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ penq_cnt = aq->pendingq_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", penq_cnt);
+ return sysfs_emit(buf, "%d\n", penq_cnt);
}
static DEVICE_ATTR_RO(pendingq_count);
@@ -528,17 +622,17 @@ static ssize_t reset_show(struct device *dev,
int rc = 0;
spin_lock_bh(&aq->lock);
- switch (aq->state) {
- case AP_STATE_RESET_START:
- case AP_STATE_RESET_WAIT:
- rc = snprintf(buf, PAGE_SIZE, "Reset in progress.\n");
+ switch (aq->sm_state) {
+ case AP_SM_STATE_RESET_START:
+ case AP_SM_STATE_RESET_WAIT:
+ rc = sysfs_emit(buf, "Reset in progress.\n");
break;
- case AP_STATE_WORKING:
- case AP_STATE_QUEUE_FULL:
- rc = snprintf(buf, PAGE_SIZE, "Reset Timer armed.\n");
+ case AP_SM_STATE_WORKING:
+ case AP_SM_STATE_QUEUE_FULL:
+ rc = sysfs_emit(buf, "Reset Timer armed.\n");
break;
default:
- rc = snprintf(buf, PAGE_SIZE, "No Reset Timer set.\n");
+ rc = sysfs_emit(buf, "No Reset Timer set.\n");
}
spin_unlock_bh(&aq->lock);
return rc;
@@ -552,12 +646,12 @@ static ssize_t reset_store(struct device *dev,
spin_lock_bh(&aq->lock);
__ap_flush_queue(aq);
- aq->state = AP_STATE_RESET_START;
- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
- AP_DBF(DBF_INFO, "reset queue=%02x.%04x triggered by user\n",
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
+ __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return count;
}
@@ -568,27 +662,243 @@ static ssize_t interrupt_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
int rc = 0;
spin_lock_bh(&aq->lock);
- if (aq->state == AP_STATE_SETIRQ_WAIT)
- rc = snprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
- else if (aq->interrupt == AP_INTR_ENABLED)
- rc = snprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
- else
- rc = snprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
+ if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
+ rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
+ } else {
+ status = ap_tapq(aq->qid, NULL);
+ if (status.irq_enabled)
+ rc = sysfs_emit(buf, "Interrupts enabled.\n");
+ else
+ rc = sysfs_emit(buf, "Interrupts disabled.\n");
+ }
spin_unlock_bh(&aq->lock);
+
return rc;
}
static DEVICE_ATTR_RO(interrupt);
+static ssize_t config_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc;
+
+ spin_lock_bh(&aq->lock);
+ rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR_RO(config);
+
+static ssize_t chkstop_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc;
+
+ spin_lock_bh(&aq->lock);
+ rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR_RO(chkstop);
+
+static ssize_t ap_functions_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
+
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ return sysfs_emit(buf, "0x%08X\n", hwinfo.fac);
+}
+
+static DEVICE_ATTR_RO(ap_functions);
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_device *ap_dev = &aq->ap_dev;
+ int rc;
+
+ device_lock(dev);
+ if (ap_dev->driver_override)
+ rc = sysfs_emit(buf, "%s\n", ap_dev->driver_override);
+ else
+ rc = sysfs_emit(buf, "\n");
+ device_unlock(dev);
+
+ return rc;
+}
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_device *ap_dev = &aq->ap_dev;
+ int rc = -EINVAL;
+ bool old_value;
+
+ if (mutex_lock_interruptible(&ap_attr_mutex))
+ return -ERESTARTSYS;
+
+ /* Do not allow driver override if apmask/aqmask is in use */
+ if (ap_apmask_aqmask_in_use)
+ goto out;
+
+ old_value = ap_dev->driver_override ? true : false;
+ rc = driver_set_override(dev, &ap_dev->driver_override, buf, count);
+ if (rc)
+ goto out;
+ if (old_value && !ap_dev->driver_override)
+ --ap_driver_override_ctr;
+ else if (!old_value && ap_dev->driver_override)
+ ++ap_driver_override_ctr;
+
+ rc = count;
+
+out:
+ mutex_unlock(&ap_attr_mutex);
+ return rc;
+}
+
+static DEVICE_ATTR_RW(driver_override);
+
+#ifdef CONFIG_AP_DEBUG
+static ssize_t states_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc = 0;
+
+ spin_lock_bh(&aq->lock);
+ /* queue device state */
+ switch (aq->dev_state) {
+ case AP_DEV_STATE_UNINITIATED:
+ rc = sysfs_emit(buf, "UNINITIATED\n");
+ break;
+ case AP_DEV_STATE_OPERATING:
+ rc = sysfs_emit(buf, "OPERATING");
+ break;
+ case AP_DEV_STATE_SHUTDOWN:
+ rc = sysfs_emit(buf, "SHUTDOWN");
+ break;
+ case AP_DEV_STATE_ERROR:
+ rc = sysfs_emit(buf, "ERROR");
+ break;
+ default:
+ rc = sysfs_emit(buf, "UNKNOWN");
+ }
+ /* state machine state */
+ if (aq->dev_state) {
+ switch (aq->sm_state) {
+ case AP_SM_STATE_RESET_START:
+ rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
+ break;
+ case AP_SM_STATE_RESET_WAIT:
+ rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
+ break;
+ case AP_SM_STATE_SETIRQ_WAIT:
+ rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
+ break;
+ case AP_SM_STATE_IDLE:
+ rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
+ break;
+ case AP_SM_STATE_WORKING:
+ rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
+ break;
+ case AP_SM_STATE_QUEUE_FULL:
+ rc += sysfs_emit_at(buf, rc, " [FULL]\n");
+ break;
+ case AP_SM_STATE_ASSOC_WAIT:
+ rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
+ break;
+ default:
+ rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
+ }
+ }
+ spin_unlock_bh(&aq->lock);
+
+ return rc;
+}
+static DEVICE_ATTR_RO(states);
+
+static ssize_t last_err_rc_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ int rc;
+
+ spin_lock_bh(&aq->lock);
+ rc = aq->last_err_rc;
+ spin_unlock_bh(&aq->lock);
+
+ switch (rc) {
+ case AP_RESPONSE_NORMAL:
+ return sysfs_emit(buf, "NORMAL\n");
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ return sysfs_emit(buf, "Q_NOT_AVAIL\n");
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
+ case AP_RESPONSE_DECONFIGURED:
+ return sysfs_emit(buf, "DECONFIGURED\n");
+ case AP_RESPONSE_CHECKSTOPPED:
+ return sysfs_emit(buf, "CHECKSTOPPED\n");
+ case AP_RESPONSE_BUSY:
+ return sysfs_emit(buf, "BUSY\n");
+ case AP_RESPONSE_INVALID_ADDRESS:
+ return sysfs_emit(buf, "INVALID_ADDRESS\n");
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
+ case AP_RESPONSE_Q_FULL:
+ return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
+ case AP_RESPONSE_INDEX_TOO_BIG:
+ return sysfs_emit(buf, "INDEX_TOO_BIG\n");
+ case AP_RESPONSE_NO_FIRST_PART:
+ return sysfs_emit(buf, "NO_FIRST_PART\n");
+ case AP_RESPONSE_MESSAGE_TOO_BIG:
+ return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
+ case AP_RESPONSE_REQ_FAC_NOT_INST:
+ return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
+ default:
+ return sysfs_emit(buf, "response code %d\n", rc);
+ }
+}
+static DEVICE_ATTR_RO(last_err_rc);
+#endif
+
static struct attribute *ap_queue_dev_attrs[] = {
&dev_attr_request_count.attr,
&dev_attr_requestq_count.attr,
&dev_attr_pendingq_count.attr,
&dev_attr_reset.attr,
&dev_attr_interrupt.attr,
+ &dev_attr_config.attr,
+ &dev_attr_chkstop.attr,
+ &dev_attr_ap_functions.attr,
+ &dev_attr_driver_override.attr,
+#ifdef CONFIG_AP_DEBUG
+ &dev_attr_states.attr,
+ &dev_attr_last_err_rc.attr,
+#endif
NULL
};
@@ -606,33 +916,285 @@ static struct device_type ap_queue_type = {
.groups = ap_queue_dev_attr_groups,
};
-static void ap_queue_device_release(struct device *dev)
+static ssize_t se_bind_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
+
+ if (!ap_q_supports_bind(aq))
+ return sysfs_emit(buf, "-\n");
+
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ /* update queue's SE bind state */
+ spin_lock_bh(&aq->lock);
+ aq->se_bstate = hwinfo.bs;
+ spin_unlock_bh(&aq->lock);
+
+ switch (hwinfo.bs) {
+ case AP_BS_Q_USABLE:
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ return sysfs_emit(buf, "bound\n");
+ default:
+ return sysfs_emit(buf, "unbound\n");
+ }
+}
+
+static ssize_t se_bind_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
+ bool value;
+ int rc;
+
+ if (!ap_q_supports_bind(aq))
+ return -EINVAL;
+
+ /* only 0 (unbind) and 1 (bind) allowed */
+ rc = kstrtobool(buf, &value);
+ if (rc)
+ return rc;
+
+ if (!value) {
+ /* Unbind. Set F bit arg and trigger RAPQ */
+ spin_lock_bh(&aq->lock);
+ __ap_flush_queue(aq);
+ aq->rapq_fbit = 1;
+ _ap_queue_init_state(aq);
+ rc = count;
+ goto out;
+ }
+
+ /* Bind. Check current SE bind state */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ /* Update BS state */
+ spin_lock_bh(&aq->lock);
+ aq->se_bstate = hwinfo.bs;
+ if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) {
+ AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n",
+ __func__, hwinfo.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Check SM state */
+ if (aq->sm_state < AP_SM_STATE_IDLE) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* invoke BAPQ */
+ status = ap_bapq(aq->qid);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EIO;
+ goto out;
+ }
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+
+ /* verify SE bind state */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EIO;
+ goto out;
+ }
+ aq->se_bstate = hwinfo.bs;
+ if (!(hwinfo.bs == AP_BS_Q_USABLE ||
+ hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) {
+ AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n",
+ __func__, hwinfo.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EIO;
+ goto out;
+ }
+
+ /* SE bind was successful */
+ AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = count;
+
+out:
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR_RW(se_bind);
+
+static ssize_t se_associate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
+
+ if (!ap_q_supports_assoc(aq))
+ return sysfs_emit(buf, "-\n");
+
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code > AP_RESPONSE_BUSY) {
+ pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n",
+ status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+
+ /* update queue's SE bind state */
+ spin_lock_bh(&aq->lock);
+ aq->se_bstate = hwinfo.bs;
+ spin_unlock_bh(&aq->lock);
+
+ switch (hwinfo.bs) {
+ case AP_BS_Q_USABLE:
+ if (aq->assoc_idx == ASSOC_IDX_INVALID) {
+ AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
+ return -EIO;
+ }
+ return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
+ case AP_BS_Q_USABLE_NO_SECURE_KEY:
+ if (aq->assoc_idx != ASSOC_IDX_INVALID)
+ return sysfs_emit(buf, "association pending\n");
+ fallthrough;
+ default:
+ return sysfs_emit(buf, "unassociated\n");
+ }
+}
+
+static ssize_t se_associate_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_queue_status status;
+ struct ap_tapq_hwinfo hwinfo;
+ unsigned int value;
+ int rc;
- if (!list_empty(&aq->list)) {
- spin_lock_bh(&ap_list_lock);
- list_del_init(&aq->list);
- spin_unlock_bh(&ap_list_lock);
+ if (!ap_q_supports_assoc(aq))
+ return -EINVAL;
+
+ /* association index needs to be >= 0 */
+ rc = kstrtouint(buf, 0, &value);
+ if (rc)
+ return rc;
+ if (value >= ASSOC_IDX_INVALID)
+ return -EINVAL;
+
+ /* check current SE bind state */
+ status = ap_test_queue(aq->qid, 1, &hwinfo);
+ if (status.response_code) {
+ AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ return -EIO;
+ }
+ spin_lock_bh(&aq->lock);
+ aq->se_bstate = hwinfo.bs;
+ if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) {
+ AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n",
+ __func__, hwinfo.bs,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* check SM state */
+ if (aq->sm_state != AP_SM_STATE_IDLE) {
+ rc = -EBUSY;
+ goto out;
}
+
+ /* trigger the asynchronous association request */
+ status = ap_aapq(aq->qid, value);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
+ aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
+ aq->assoc_idx = value;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+ break;
+ default:
+ AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ rc = -EIO;
+ goto out;
+ }
+
+ rc = count;
+
+out:
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+
+static DEVICE_ATTR_RW(se_associate);
+
+static struct attribute *ap_queue_dev_sb_attrs[] = {
+ &dev_attr_se_bind.attr,
+ &dev_attr_se_associate.attr,
+ NULL
+};
+
+static struct attribute_group ap_queue_dev_sb_attr_group = {
+ .attrs = ap_queue_dev_sb_attrs
+};
+
+static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
+ &ap_queue_dev_sb_attr_group,
+ NULL
+};
+
+static void ap_queue_device_release(struct device *dev)
+{
+ struct ap_queue *aq = to_ap_queue(dev);
+
+ spin_lock_bh(&ap_queues_lock);
+ hash_del(&aq->hnode);
+ spin_unlock_bh(&ap_queues_lock);
+
kfree(aq);
}
-struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
+struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac)
{
struct ap_queue *aq;
aq = kzalloc(sizeof(*aq), GFP_KERNEL);
if (!aq)
return NULL;
+ aq->card = ac;
aq->ap_dev.device.release = ap_queue_device_release;
aq->ap_dev.device.type = &ap_queue_type;
- aq->ap_dev.device_type = device_type;
+ aq->ap_dev.device_type = ac->ap_dev.device_type;
+ /* in SE environment add bind/associate attributes group */
+ if (ap_is_se_guest() && ap_q_supported_in_se(aq))
+ aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
aq->qid = qid;
- aq->state = AP_STATE_RESET_START;
- aq->interrupt = AP_INTR_DISABLED;
spin_lock_init(&aq->lock);
- INIT_LIST_HEAD(&aq->list);
INIT_LIST_HEAD(&aq->pendingq);
INIT_LIST_HEAD(&aq->requestq);
timer_setup(&aq->timeout, ap_request_timeout, 0);
@@ -645,7 +1207,7 @@ void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
aq->reply = reply;
spin_lock_bh(&aq->lock);
- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
spin_unlock_bh(&aq->lock);
}
EXPORT_SYMBOL(ap_queue_init_reply);
@@ -655,26 +1217,79 @@ EXPORT_SYMBOL(ap_queue_init_reply);
* @aq: The AP device to queue the message to
* @ap_msg: The message that is to be added
*/
-void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
+int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
{
- /* For asynchronous message handling a valid receive-callback
- * is required.
- */
+ int rc = 0;
+
+ /* msg needs to have a valid receive-callback */
BUG_ON(!ap_msg->receive);
spin_lock_bh(&aq->lock);
- /* Queue the message. */
- list_add_tail(&ap_msg->list, &aq->requestq);
- aq->requestq_count++;
- aq->total_request_count++;
- atomic_inc(&aq->card->total_request_count);
+
+ /* only allow to queue new messages if device state is ok */
+ if (aq->dev_state == AP_DEV_STATE_OPERATING) {
+ list_add_tail(&ap_msg->list, &aq->requestq);
+ aq->requestq_count++;
+ aq->total_request_count++;
+ atomic64_inc(&aq->card->total_request_count);
+ } else {
+ rc = -ENODEV;
+ }
+
/* Send/receive as many request from the queue as possible. */
- ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
+ ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
+
spin_unlock_bh(&aq->lock);
+
+ return rc;
}
EXPORT_SYMBOL(ap_queue_message);
/**
+ * ap_queue_usable(): Check if queue is usable just now.
+ * @aq: The AP queue device to test for usability.
+ * This function is intended for the scheduler to query if it makes
+ * sense to enqueue a message into this AP queue device by calling
+ * ap_queue_message(). The perspective is very short-term as the
+ * state machine and device state(s) may change at any time.
+ */
+bool ap_queue_usable(struct ap_queue *aq)
+{
+ bool rc = true;
+
+ spin_lock_bh(&aq->lock);
+
+ /* check for not configured or checkstopped */
+ if (!aq->config || aq->chkstop) {
+ rc = false;
+ goto unlock_and_out;
+ }
+
+ /* device state needs to be ok */
+ if (aq->dev_state != AP_DEV_STATE_OPERATING) {
+ rc = false;
+ goto unlock_and_out;
+ }
+
+ /* SE guest's queues additionally need to be bound */
+ if (ap_is_se_guest()) {
+ if (!ap_q_supported_in_se(aq)) {
+ rc = false;
+ goto unlock_and_out;
+ }
+ if (ap_q_needs_bind(aq) &&
+ !(aq->se_bstate == AP_BS_Q_USABLE ||
+ aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY))
+ rc = false;
+ }
+
+unlock_and_out:
+ spin_unlock_bh(&aq->lock);
+ return rc;
+}
+EXPORT_SYMBOL(ap_queue_usable);
+
+/**
* ap_cancel_message(): Cancel a crypto request.
* @aq: The AP device that has the message queued
* @ap_msg: The message that is to be removed
@@ -725,6 +1340,7 @@ static void __ap_flush_queue(struct ap_queue *aq)
ap_msg->rc = -EAGAIN;
ap_msg->receive(aq, ap_msg, NULL);
}
+ aq->queue_count = 0;
}
void ap_flush_queue(struct ap_queue *aq)
@@ -735,24 +1351,44 @@ void ap_flush_queue(struct ap_queue *aq)
}
EXPORT_SYMBOL(ap_flush_queue);
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
{
- ap_flush_queue(aq);
- del_timer_sync(&aq->timeout);
+ spin_lock_bh(&aq->lock);
+ /* flush queue */
+ __ap_flush_queue(aq);
+ /* move queue device state to SHUTDOWN in progress */
+ aq->dev_state = AP_DEV_STATE_SHUTDOWN;
+ spin_unlock_bh(&aq->lock);
+ timer_delete_sync(&aq->timeout);
+}
- /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+ /*
+ * all messages have been flushed and the device state
+ * is SHUTDOWN. Now reset with zero which also clears
+ * the irq registration and move the device state
+ * to the initial value AP_DEV_STATE_UNINITIATED.
+ */
spin_lock_bh(&aq->lock);
- ap_zapq(aq->qid);
- aq->state = AP_STATE_BORKED;
+ ap_zapq(aq->qid, 0);
+ aq->dev_state = AP_DEV_STATE_UNINITIATED;
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_remove);
-void ap_queue_reinit_state(struct ap_queue *aq)
+void _ap_queue_init_state(struct ap_queue *aq)
+{
+ aq->dev_state = AP_DEV_STATE_OPERATING;
+ aq->sm_state = AP_SM_STATE_RESET_START;
+ aq->last_err_rc = 0;
+ aq->assoc_idx = ASSOC_IDX_INVALID;
+ ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
+}
+
+void ap_queue_init_state(struct ap_queue *aq)
{
spin_lock_bh(&aq->lock);
- aq->state = AP_STATE_RESET_START;
- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
+ _ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
}
-EXPORT_SYMBOL(ap_queue_reinit_state);
+EXPORT_SYMBOL(ap_queue_init_state);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 2f92bbed4bf6..ad1cd699f53b 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -2,1263 +2,709 @@
/*
* pkey device driver
*
- * Copyright IBM Corp. 2017
+ * Copyright IBM Corp. 2017, 2023
+ *
* Author(s): Harald Freudenberger
*/
-#define KMSG_COMPONENT "pkey"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "pkey: " fmt
-#include <linux/fs.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
-#include <linux/kallsyms.h>
-#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/cpufeature.h>
-#include <asm/zcrypt.h>
-#include <asm/cpacf.h>
-#include <asm/pkey.h>
-#include <crypto/aes.h>
#include "zcrypt_api.h"
+#include "zcrypt_ccamisc.h"
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("s390 protected key interface");
-
-/* Size of parameter block used for all cca requests/replies */
-#define PARMBSIZE 512
-
-/* Size of vardata block used for some of the cca requests/replies */
-#define VARDATASIZE 4096
-
-/* mask of available pckmo subfunctions, fetched once at module init */
-static cpacf_mask_t pckmo_functions;
+#include "pkey_base.h"
/*
- * debug feature data and functions
+ * Helper functions
*/
-
-static debug_info_t *debug_info;
-
-#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
-#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
-#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
-#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
-
-static void __init pkey_debug_init(void)
+static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
{
- debug_info = debug_register("pkey", 1, 1, 4 * sizeof(long));
- debug_register_view(debug_info, &debug_sprintf_view);
- debug_set_level(debug_info, 3);
-}
+ int rc;
-static void __exit pkey_debug_exit(void)
-{
- debug_unregister(debug_info);
-}
+ /* try the direct way */
+ rc = pkey_handler_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype, xflags);
+
+ /* if this did not work, try the slowpath way */
+ if (rc == -ENODEV) {
+ rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype, xflags);
+ if (rc)
+ rc = -ENODEV;
+ }
-/* Key token types */
-#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
-#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal key token */
-
-/* For TOKTYPE_NON_CCA: */
-#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
-
-/* For TOKTYPE_CCA_INTERNAL: */
-#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
-
-/* header part of a key token */
-struct keytoken_header {
- u8 type; /* one of the TOKTYPE values */
- u8 res0[3];
- u8 version; /* one of the TOKVER values */
- u8 res1[3];
-} __packed;
-
-/* inside view of a secure key token (only type 0x01 version 0x04) */
-struct secaeskeytoken {
- u8 type; /* 0x01 for internal key token */
- u8 res0[3];
- u8 version; /* should be 0x04 */
- u8 res1[1];
- u8 flag; /* key flags */
- u8 res2[1];
- u64 mkvp; /* master key verification pattern */
- u8 key[32]; /* key value (encrypted) */
- u8 cv[8]; /* control vector */
- u16 bitsize; /* key bit size */
- u16 keysize; /* key byte size */
- u8 tvv[4]; /* token validation value */
-} __packed;
-
-/* inside view of a protected key token (only type 0x00 version 0x01) */
-struct protaeskeytoken {
- u8 type; /* 0x00 for PAES specific key tokens */
- u8 res0[3];
- u8 version; /* should be 0x01 for protected AES key token */
- u8 res1[3];
- u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
- u32 len; /* bytes actually stored in protkey[] */
- u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
-} __packed;
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
/*
- * Simple check if the token is a valid CCA secure AES key
- * token. If keybitsize is given, the bitsize of the key is
- * also checked. Returns 0 on success or errno value on failure.
+ * In-Kernel function: Transform a key blob (of any type) into a protected key
*/
-static int check_secaeskeytoken(const u8 *token, int keybitsize)
+int pkey_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+ int rc;
- if (t->type != TOKTYPE_CCA_INTERNAL) {
- DEBUG_ERR(
- "%s secure token check failed, type mismatch 0x%02x != 0x%02x\n",
- __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
- return -EINVAL;
- }
- if (t->version != TOKVER_CCA_AES) {
- DEBUG_ERR(
- "%s secure token check failed, version mismatch 0x%02x != 0x%02x\n",
- __func__, (int) t->version, TOKVER_CCA_AES);
- return -EINVAL;
- }
- if (keybitsize > 0 && t->bitsize != keybitsize) {
- DEBUG_ERR(
- "%s secure token check failed, bitsize mismatch %d != %d\n",
- __func__, (int) t->bitsize, keybitsize);
- return -EINVAL;
+ rc = key2protkey(NULL, 0, key, keylen,
+ protkey, protkeylen, protkeytype, xflags);
+ if (rc == -ENODEV) {
+ pkey_handler_request_modules();
+ rc = key2protkey(NULL, 0, key, keylen,
+ protkey, protkeylen, protkeytype, xflags);
}
- return 0;
+ return rc;
}
+EXPORT_SYMBOL(pkey_key2protkey);
/*
- * Allocate consecutive memory for request CPRB, request param
- * block, reply CPRB and reply param block and fill in values
- * for the common fields. Returns 0 on success or errno value
- * on failure.
+ * Ioctl functions
*/
-static int alloc_and_prep_cprbmem(size_t paramblen,
- u8 **pcprbmem,
- struct CPRBX **preqCPRB,
- struct CPRBX **prepCPRB)
+
+static void *_copy_key_from_user(void __user *ukey, size_t keylen)
{
- u8 *cprbmem;
- size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
- struct CPRBX *preqcblk, *prepcblk;
-
- /*
- * allocate consecutive memory for request CPRB, request param
- * block, reply CPRB and reply param block
- */
- cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL);
- if (!cprbmem)
- return -ENOMEM;
+ if (!ukey || keylen < MINKEYBLOBBUFSIZE || keylen > KEYBLOBBUFSIZE)
+ return ERR_PTR(-EINVAL);
- preqcblk = (struct CPRBX *) cprbmem;
- prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen);
-
- /* fill request cprb struct */
- preqcblk->cprb_len = sizeof(struct CPRBX);
- preqcblk->cprb_ver_id = 0x02;
- memcpy(preqcblk->func_id, "T2", 2);
- preqcblk->rpl_msgbl = cprbplusparamblen;
- if (paramblen) {
- preqcblk->req_parmb =
- ((u8 *) preqcblk) + sizeof(struct CPRBX);
- preqcblk->rpl_parmb =
- ((u8 *) prepcblk) + sizeof(struct CPRBX);
- }
+ return memdup_user(ukey, keylen);
+}
- *pcprbmem = cprbmem;
- *preqCPRB = preqcblk;
- *prepCPRB = prepcblk;
+static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
+{
+ if (!uapqns || nr_apqns == 0)
+ return NULL;
- return 0;
+ return memdup_array_user(uapqns, nr_apqns, sizeof(struct pkey_apqn));
}
-/*
- * Free the cprb memory allocated with the function above.
- * If the scrub value is not zero, the memory is filled
- * with zeros before freeing (useful if there was some
- * clear key material in there).
- */
-static void free_cprbmem(void *mem, size_t paramblen, int scrub)
+static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs)
{
- if (scrub)
- memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
- kfree(mem);
+ struct pkey_genseck kgs;
+ struct pkey_apqn apqn;
+ u32 keybuflen;
+ int rc;
+
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+
+ apqn.card = kgs.cardnr;
+ apqn.domain = kgs.domain;
+ keybuflen = sizeof(kgs.seckey.seckey);
+ rc = pkey_handler_gen_key(&apqn, 1,
+ kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ kgs.seckey.seckey, &keybuflen, NULL, 0);
+ pr_debug("gen_key()=%d\n", rc);
+ if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ memzero_explicit(&kgs, sizeof(kgs));
+
+ return rc;
}
-/*
- * Helper function to prepare the xcrb struct
- */
-static inline void prep_xcrb(struct ica_xcRB *pxcrb,
- u16 cardnr,
- struct CPRBX *preqcblk,
- struct CPRBX *prepcblk)
+static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs)
{
- memset(pxcrb, 0, sizeof(*pxcrb));
- pxcrb->agent_ID = 0x4341; /* 'CA' */
- pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
- pxcrb->request_control_blk_length =
- preqcblk->cprb_len + preqcblk->req_parml;
- pxcrb->request_control_blk_addr = (void __user *) preqcblk;
- pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
- pxcrb->reply_control_blk_addr = (void __user *) prepcblk;
+ struct pkey_clr2seck kcs;
+ struct pkey_apqn apqn;
+ u32 keybuflen;
+ int rc;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+
+ apqn.card = kcs.cardnr;
+ apqn.domain = kcs.domain;
+ keybuflen = sizeof(kcs.seckey.seckey);
+ rc = pkey_handler_clr_to_key(&apqn, 1,
+ kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ kcs.clrkey.clrkey,
+ pkey_keytype_aes_to_size(kcs.keytype),
+ kcs.seckey.seckey, &keybuflen, NULL, 0);
+ pr_debug("clr_to_key()=%d\n", rc);
+ if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+
+ return rc;
}
-/*
- * Helper function which calls zcrypt_send_cprb with
- * memory management segment adjusted to kernel space
- * so that the copy_from_user called within this
- * function do in fact copy from kernel space.
- */
-static inline int _zcrypt_send_cprb(struct ica_xcRB *xcrb)
+static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp)
{
+ struct pkey_sec2protk ksp;
+ struct pkey_apqn apqn;
int rc;
- mm_segment_t old_fs = get_fs();
- set_fs(KERNEL_DS);
- rc = zcrypt_send_cprb(xcrb);
- set_fs(old_fs);
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+
+ apqn.card = ksp.cardnr;
+ apqn.domain = ksp.domain;
+ ksp.protkey.len = sizeof(ksp.protkey.protkey);
+ rc = pkey_handler_key_to_protkey(&apqn, 1,
+ ksp.seckey.seckey,
+ sizeof(ksp.seckey.seckey),
+ ksp.protkey.protkey,
+ &ksp.protkey.len, &ksp.protkey.type,
+ 0);
+ pr_debug("key_to_protkey()=%d\n", rc);
+ if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
+ rc = -EFAULT;
+ memzero_explicit(&ksp, sizeof(ksp));
return rc;
}
-/*
- * Generate (random) AES secure key.
- */
-int pkey_genseckey(u16 cardnr, u16 domain,
- u32 keytype, struct pkey_seckey *seckey)
+static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp)
{
- int i, rc, keysize;
- int seckeysize;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct kgreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv1 {
- u16 len;
- char key_form[8];
- char key_length[8];
- char key_type1[8];
- char key_type2[8];
- } lv1;
- struct lv2 {
- u16 len;
- struct keyid {
- u16 len;
- u16 attr;
- u8 data[SECKEYBLOBSIZE];
- } keyid[6];
- } lv2;
- } *preqparm;
- struct kgrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 keyblocklen;
- struct {
- u16 toklen;
- u16 tokattr;
- u8 tok[0];
- /* ... some more data ... */
- } keyblock;
- } lv3;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with KG request */
- preqparm = (struct kgreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "KG", 2);
- preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
- preqparm->lv1.len = sizeof(struct lv1);
- memcpy(preqparm->lv1.key_form, "OP ", 8);
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
- break;
- default:
- DEBUG_ERR(
- "%s unknown/unsupported keytype %d\n",
- __func__, keytype);
- rc = -EINVAL;
- goto out;
- }
- memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
- preqparm->lv2.len = sizeof(struct lv2);
- for (i = 0; i < 6; i++) {
- preqparm->lv2.keyid[i].len = sizeof(struct keyid);
- preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
- }
- preqcblk->req_parml = sizeof(struct kgreqparm);
+ struct pkey_clr2protk kcp;
+ struct clearkeytoken *t;
+ u32 keylen;
+ u8 *tmpbuf;
+ int rc;
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+ if (copy_from_user(&kcp, ucp, sizeof(kcp)))
+ return -EFAULT;
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
+ /* build a 'clear key token' from the clear key value */
+ keylen = pkey_keytype_aes_to_size(kcp.keytype);
+ if (!keylen) {
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, kcp.keytype);
+ memzero_explicit(&kcp, sizeof(kcp));
+ return -EINVAL;
}
-
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s secure key generate failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
+ tmpbuf = kzalloc(sizeof(*t) + keylen, GFP_KERNEL);
+ if (!tmpbuf) {
+ memzero_explicit(&kcp, sizeof(kcp));
+ return -ENOMEM;
}
+ t = (struct clearkeytoken *)tmpbuf;
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_CLEAR_KEY;
+ t->keytype = (keylen - 8) >> 3;
+ t->len = keylen;
+ memcpy(t->clearkey, kcp.clrkey.clrkey, keylen);
+ kcp.protkey.len = sizeof(kcp.protkey.protkey);
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct kgrepparm *) prepcblk->rpl_parmb;
-
- /* check length of the returned secure key token */
- seckeysize = prepparm->lv3.keyblock.toklen
- - sizeof(prepparm->lv3.keyblock.toklen)
- - sizeof(prepparm->lv3.keyblock.tokattr);
- if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR(
- "%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
- rc = -EIO;
- goto out;
- }
+ rc = key2protkey(NULL, 0,
+ tmpbuf, sizeof(*t) + keylen,
+ kcp.protkey.protkey,
+ &kcp.protkey.len, &kcp.protkey.type, 0);
+ pr_debug("key2protkey()=%d\n", rc);
- /* check secure key token */
- rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
- if (rc) {
- rc = -EIO;
- goto out;
- }
+ kfree_sensitive(tmpbuf);
- /* copy the generated secure key token */
- memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+ if (!rc && copy_to_user(ucp, &kcp, sizeof(kcp)))
+ rc = -EFAULT;
+ memzero_explicit(&kcp, sizeof(kcp));
-out:
- free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
-EXPORT_SYMBOL(pkey_genseckey);
-/*
- * Generate an AES secure key with given key value.
- */
-int pkey_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
- const struct pkey_clrkey *clrkey,
- struct pkey_seckey *seckey)
+static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc)
{
- int rc, keysize, seckeysize;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct cmreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- char rule_array[8];
- struct lv1 {
- u16 len;
- u8 clrkey[0];
- } lv1;
- struct lv2 {
- u16 len;
- struct keyid {
- u16 len;
- u16 attr;
- u8 data[SECKEYBLOBSIZE];
- } keyid;
- } lv2;
- } *preqparm;
- struct lv2 *plv2;
- struct cmrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 keyblocklen;
- struct {
- u16 toklen;
- u16 tokattr;
- u8 tok[0];
- /* ... some more data ... */
- } keyblock;
- } lv3;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with CM request */
- preqparm = (struct cmreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "CM", 2);
- memcpy(preqparm->rule_array, "AES ", 8);
- preqparm->rule_array_len =
- sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- break;
- default:
- DEBUG_ERR(
- "%s unknown/unsupported keytype %d\n",
- __func__, keytype);
- rc = -EINVAL;
- goto out;
- }
- preqparm->lv1.len = sizeof(struct lv1) + keysize;
- memcpy(preqparm->lv1.clrkey, clrkey->clrkey, keysize);
- plv2 = (struct lv2 *) (((u8 *) &preqparm->lv2) + keysize);
- plv2->len = sizeof(struct lv2);
- plv2->keyid.len = sizeof(struct keyid);
- plv2->keyid.attr = 0x30;
- preqcblk->req_parml = sizeof(struct cmreqparm) + keysize;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
+ struct pkey_findcard kfc;
+ struct pkey_apqn *apqns;
+ size_t nr_apqns;
+ int rc;
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s clear key import failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
+ if (copy_from_user(&kfc, ufc, sizeof(kfc)))
+ return -EFAULT;
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct cmrepparm *) prepcblk->rpl_parmb;
-
- /* check length of the returned secure key token */
- seckeysize = prepparm->lv3.keyblock.toklen
- - sizeof(prepparm->lv3.keyblock.toklen)
- - sizeof(prepparm->lv3.keyblock.tokattr);
- if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR(
- "%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
- rc = -EIO;
- goto out;
- }
+ nr_apqns = MAXAPQNSINLIST;
+ apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
- /* check secure key token */
- rc = check_secaeskeytoken(prepparm->lv3.keyblock.tok, 8*keysize);
+ rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
+ sizeof(kfc.seckey.seckey),
+ PKEY_FLAGS_MATCH_CUR_MKVP,
+ apqns, &nr_apqns, 0);
+ if (rc == -ENODEV)
+ rc = pkey_handler_apqns_for_key(kfc.seckey.seckey,
+ sizeof(kfc.seckey.seckey),
+ PKEY_FLAGS_MATCH_ALT_MKVP,
+ apqns, &nr_apqns, 0);
+ pr_debug("apqns_for_key()=%d\n", rc);
if (rc) {
- rc = -EIO;
- goto out;
+ kfree(apqns);
+ return rc;
}
+ kfc.cardnr = apqns[0].card;
+ kfc.domain = apqns[0].domain;
+ kfree(apqns);
+ if (copy_to_user(ufc, &kfc, sizeof(kfc)))
+ return -EFAULT;
- /* copy the generated secure key token */
- memcpy(seckey->seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
-
-out:
- free_cprbmem(mem, PARMBSIZE, 1);
- return rc;
+ return 0;
}
-EXPORT_SYMBOL(pkey_clr2seckey);
-/*
- * Derive a proteced key from the secure key blob.
- */
-int pkey_sec2protkey(u16 cardnr, u16 domain,
- const struct pkey_seckey *seckey,
- struct pkey_protkey *protkey)
+static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp)
{
+ struct pkey_skey2pkey ksp;
int rc;
- u8 *mem;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct uskreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv1 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- } lv1;
- struct lv2 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- u8 token[0]; /* cca secure key token */
- } lv2 __packed;
- } *preqparm;
- struct uskrepparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- struct lv3 {
- u16 len;
- u16 attr_len;
- u16 attr_flags;
- struct cpacfkeyblock {
- u8 version; /* version of this struct */
- u8 flags[2];
- u8 algo;
- u8 form;
- u8 pad1[3];
- u16 keylen;
- u8 key[64]; /* the key (keylen bytes) */
- u16 keyattrlen;
- u8 keyattr[32];
- u8 pad2[1];
- u8 vptype;
- u8 vp[32]; /* verification pattern */
- } keyblock;
- } lv3 __packed;
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with USK request */
- preqparm = (struct uskreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "US", 2);
- preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
- preqparm->lv1.len = sizeof(struct lv1);
- preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
- preqparm->lv1.attr_flags = 0x0001;
- preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
- preqparm->lv2.attr_len = sizeof(struct lv2)
- - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
- preqparm->lv2.attr_flags = 0x0000;
- memcpy(preqparm->lv2.token, seckey->seckey, SECKEYBLOBSIZE);
- preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
-
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
-
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
- }
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+
+ ksp.protkey.len = sizeof(ksp.protkey.protkey);
+ rc = pkey_handler_key_to_protkey(NULL, 0,
+ ksp.seckey.seckey,
+ sizeof(ksp.seckey.seckey),
+ ksp.protkey.protkey,
+ &ksp.protkey.len,
+ &ksp.protkey.type, 0);
+ pr_debug("key_to_protkey()=%d\n", rc);
+ if (!rc && copy_to_user(usp, &ksp, sizeof(ksp)))
+ rc = -EFAULT;
+ memzero_explicit(&ksp, sizeof(ksp));
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
- }
- if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- }
+ return rc;
+}
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct uskrepparm *) prepcblk->rpl_parmb;
-
- /* check the returned keyblock */
- if (prepparm->lv3.keyblock.version != 0x01) {
- DEBUG_ERR(
- "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
- __func__, (int) prepparm->lv3.keyblock.version);
- rc = -EIO;
- goto out;
- }
+static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk)
+{
+ u32 keytype, keybitsize, flags;
+ struct pkey_verifykey kvk;
+ int rc;
- /* copy the tanslated protected key */
- switch (prepparm->lv3.keyblock.keylen) {
- case 16+32:
- protkey->type = PKEY_KEYTYPE_AES_128;
- break;
- case 24+32:
- protkey->type = PKEY_KEYTYPE_AES_192;
- break;
- case 32+32:
- protkey->type = PKEY_KEYTYPE_AES_256;
- break;
- default:
- DEBUG_ERR("%s unknown/unsupported keytype %d\n",
- __func__, prepparm->lv3.keyblock.keylen);
- rc = -EIO;
- goto out;
- }
- protkey->len = prepparm->lv3.keyblock.keylen;
- memcpy(protkey->protkey, prepparm->lv3.keyblock.key, protkey->len);
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
+
+ kvk.cardnr = 0xFFFF;
+ kvk.domain = 0xFFFF;
+ rc = pkey_handler_verify_key(kvk.seckey.seckey,
+ sizeof(kvk.seckey.seckey),
+ &kvk.cardnr, &kvk.domain,
+ &keytype, &keybitsize, &flags, 0);
+ pr_debug("verify_key()=%d\n", rc);
+ if (!rc && keytype != PKEY_TYPE_CCA_DATA)
+ rc = -EINVAL;
+ kvk.attributes = PKEY_VERIFY_ATTR_AES;
+ kvk.keysize = (u16)keybitsize;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ kvk.attributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
+ if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
+ rc = -EFAULT;
+ memzero_explicit(&kvk, sizeof(kvk));
-out:
- free_cprbmem(mem, PARMBSIZE, 0);
return rc;
}
-EXPORT_SYMBOL(pkey_sec2protkey);
-/*
- * Create a protected key from a clear key value.
- */
-int pkey_clr2protkey(u32 keytype,
- const struct pkey_clrkey *clrkey,
- struct pkey_protkey *protkey)
+static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp)
{
- long fc;
- int keysize;
- u8 paramblock[64];
-
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- fc = CPACF_PCKMO_ENC_AES_128_KEY;
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- fc = CPACF_PCKMO_ENC_AES_192_KEY;
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- fc = CPACF_PCKMO_ENC_AES_256_KEY;
- break;
- default:
- DEBUG_ERR("%s unknown/unsupported keytype %d\n",
- __func__, keytype);
- return -EINVAL;
- }
-
- /*
- * Check if the needed pckmo subfunction is available.
- * These subfunctions can be enabled/disabled by customers
- * in the LPAR profile or may even change on the fly.
- */
- if (!cpacf_test_func(&pckmo_functions, fc)) {
- DEBUG_ERR("%s pckmo functions not available\n", __func__);
- return -EOPNOTSUPP;
- }
-
- /* prepare param block */
- memset(paramblock, 0, sizeof(paramblock));
- memcpy(paramblock, clrkey->clrkey, keysize);
+ struct pkey_genprotk kgp;
+ int rc;
- /* call the pckmo instruction */
- cpacf_pckmo(fc, paramblock);
+ if (copy_from_user(&kgp, ugp, sizeof(kgp)))
+ return -EFAULT;
- /* copy created protected key */
- protkey->type = keytype;
- protkey->len = keysize + 32;
- memcpy(protkey->protkey, paramblock, keysize + 32);
+ kgp.protkey.len = sizeof(kgp.protkey.protkey);
+ rc = pkey_handler_gen_key(NULL, 0, kgp.keytype,
+ PKEY_TYPE_PROTKEY, 0, 0,
+ kgp.protkey.protkey, &kgp.protkey.len,
+ &kgp.protkey.type, 0);
+ pr_debug("gen_key()=%d\n", rc);
+ if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp)))
+ rc = -EFAULT;
+ memzero_explicit(&kgp, sizeof(kgp));
- return 0;
+ return rc;
}
-EXPORT_SYMBOL(pkey_clr2protkey);
-/*
- * query cryptographic facility from adapter
- */
-static int query_crypto_facility(u16 cardnr, u16 domain,
- const char *keyword,
- u8 *rarray, size_t *rarraylen,
- u8 *varray, size_t *varraylen)
+static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp)
{
+ struct pkey_verifyprotk kvp;
+ struct protaeskeytoken *t;
+ u32 keytype;
+ u8 *tmpbuf;
int rc;
- u16 len;
- u8 *mem, *ptr;
- struct CPRBX *preqcblk, *prepcblk;
- struct ica_xcRB xcrb;
- struct fqreqparm {
- u8 subfunc_code[2];
- u16 rule_array_len;
- char rule_array[8];
- struct lv1 {
- u16 len;
- u8 data[VARDATASIZE];
- } lv1;
- u16 dummylen;
- } *preqparm;
- size_t parmbsize = sizeof(struct fqreqparm);
- struct fqrepparm {
- u8 subfunc_code[2];
- u8 lvdata[0];
- } *prepparm;
-
- /* get already prepared memory for 2 cprbs with param block each */
- rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk);
- if (rc)
- return rc;
-
- /* fill request cprb struct */
- preqcblk->domain = domain;
-
- /* fill request cprb param block with FQ request */
- preqparm = (struct fqreqparm *) preqcblk->req_parmb;
- memcpy(preqparm->subfunc_code, "FQ", 2);
- memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
- preqparm->rule_array_len =
- sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
- preqparm->lv1.len = sizeof(preqparm->lv1);
- preqparm->dummylen = sizeof(preqparm->dummylen);
- preqcblk->req_parml = parmbsize;
- /* fill xcrb struct */
- prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+ if (copy_from_user(&kvp, uvp, sizeof(kvp)))
+ return -EFAULT;
- /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
- rc = _zcrypt_send_cprb(&xcrb);
- if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed with errno %d\n",
- __func__, (int) cardnr, (int) domain, rc);
- goto out;
+ keytype = pkey_aes_bitsize_to_keytype(8 * kvp.protkey.len);
+ if (!keytype) {
+ PKEY_DBF_ERR("%s unknown/unsupported protkey length %u\n",
+ __func__, kvp.protkey.len);
+ memzero_explicit(&kvp, sizeof(kvp));
+ return -EINVAL;
}
- /* check response returncode and reasoncode */
- if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int) prepcblk->ccp_rtcode,
- (int) prepcblk->ccp_rscode);
- rc = -EIO;
- goto out;
+ /* build a 'protected key token' from the raw protected key */
+ tmpbuf = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!tmpbuf) {
+ memzero_explicit(&kvp, sizeof(kvp));
+ return -ENOMEM;
}
+ t = (struct protaeskeytoken *)tmpbuf;
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+ t->len = kvp.protkey.len;
+ memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len);
- /* process response cprb param block */
- prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
- prepparm = (struct fqrepparm *) prepcblk->rpl_parmb;
- ptr = prepparm->lvdata;
-
- /* check and possibly copy reply rule array */
- len = *((u16 *) ptr);
- if (len > sizeof(u16)) {
- ptr += sizeof(u16);
- len -= sizeof(u16);
- if (rarray && rarraylen && *rarraylen > 0) {
- *rarraylen = (len > *rarraylen ? *rarraylen : len);
- memcpy(rarray, ptr, *rarraylen);
- }
- ptr += len;
- }
- /* check and possible copy reply var array */
- len = *((u16 *) ptr);
- if (len > sizeof(u16)) {
- ptr += sizeof(u16);
- len -= sizeof(u16);
- if (varray && varraylen && *varraylen > 0) {
- *varraylen = (len > *varraylen ? *varraylen : len);
- memcpy(varray, ptr, *varraylen);
- }
- ptr += len;
- }
+ rc = pkey_handler_verify_key(tmpbuf, sizeof(*t),
+ NULL, NULL, NULL, NULL, NULL, 0);
+ pr_debug("verify_key()=%d\n", rc);
+
+ kfree_sensitive(tmpbuf);
+ memzero_explicit(&kvp, sizeof(kvp));
-out:
- free_cprbmem(mem, parmbsize, 0);
return rc;
}
-/*
- * Fetch the current and old mkvp values via
- * query_crypto_facility from adapter.
- */
-static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2])
+static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp)
{
- int rc, found = 0;
- size_t rlen, vlen;
- u8 *rarray, *varray, *pg;
-
- pg = (u8 *) __get_free_page(GFP_KERNEL);
- if (!pg)
- return -ENOMEM;
- rarray = pg;
- varray = pg + PAGE_SIZE/2;
- rlen = vlen = PAGE_SIZE/2;
-
- rc = query_crypto_facility(cardnr, domain, "STATICSA",
- rarray, &rlen, varray, &vlen);
- if (rc == 0 && rlen > 8*8 && vlen > 184+8) {
- if (rarray[8*8] == '2') {
- /* current master key state is valid */
- mkvp[0] = *((u64 *)(varray + 184));
- mkvp[1] = *((u64 *)(varray + 172));
- found = 1;
- }
- }
-
- free_page((unsigned long) pg);
-
- return found ? 0 : -ENOENT;
-}
-
-/* struct to hold cached mkvp info for each card/domain */
-struct mkvp_info {
- struct list_head list;
- u16 cardnr;
- u16 domain;
- u64 mkvp[2];
-};
-
-/* a list with mkvp_info entries */
-static LIST_HEAD(mkvp_list);
-static DEFINE_SPINLOCK(mkvp_list_lock);
+ struct pkey_kblob2pkey ktp;
+ u8 *kkey;
+ int rc;
-static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2])
-{
- int rc = -ENOENT;
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64));
- rc = 0;
- break;
- }
- }
- spin_unlock_bh(&mkvp_list_lock);
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+ ktp.protkey.len = sizeof(ktp.protkey.protkey);
+ rc = key2protkey(NULL, 0, kkey, ktp.keylen,
+ ktp.protkey.protkey, &ktp.protkey.len,
+ &ktp.protkey.type, 0);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
+ rc = -EFAULT;
+ memzero_explicit(&ktp, sizeof(ktp));
return rc;
}
-static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2])
+static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs)
{
- int found = 0;
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
- found = 1;
- break;
- }
+ u32 klen = KEYBLOBBUFSIZE;
+ struct pkey_genseck2 kgs;
+ struct pkey_apqn *apqns;
+ u8 *kkey;
+ int rc;
+ u32 u;
+
+ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
+ return -EFAULT;
+ u = pkey_aes_bitsize_to_keytype(kgs.size);
+ if (!u) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, kgs.size);
+ return -EINVAL;
}
- if (!found) {
- ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
- if (!ptr) {
- spin_unlock_bh(&mkvp_list_lock);
- return;
- }
- ptr->cardnr = cardnr;
- ptr->domain = domain;
- memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64));
- list_add(&ptr->list, &mkvp_list);
+ apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = kzalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ return -ENOMEM;
}
- spin_unlock_bh(&mkvp_list_lock);
-}
-
-static void mkvp_cache_scrub(u16 cardnr, u16 domain)
-{
- struct mkvp_info *ptr;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry(ptr, &mkvp_list, list) {
- if (ptr->cardnr == cardnr &&
- ptr->domain == domain) {
- list_del(&ptr->list);
- kfree(ptr);
- break;
+ rc = pkey_handler_gen_key(apqns, kgs.apqn_entries,
+ u, kgs.type, kgs.size, kgs.keygenflags,
+ kkey, &klen, NULL, 0);
+ pr_debug("gen_key()=%d\n", rc);
+ kfree(apqns);
+ if (rc) {
+ kfree_sensitive(kkey);
+ return rc;
+ }
+ if (kgs.key) {
+ if (kgs.keylen < klen) {
+ kfree_sensitive(kkey);
+ return -EINVAL;
+ }
+ if (copy_to_user(kgs.key, kkey, klen)) {
+ kfree_sensitive(kkey);
+ return -EFAULT;
}
}
- spin_unlock_bh(&mkvp_list_lock);
-}
+ kgs.keylen = klen;
+ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
+ rc = -EFAULT;
+ kfree_sensitive(kkey);
-static void __exit mkvp_cache_free(void)
-{
- struct mkvp_info *ptr, *pnext;
-
- spin_lock_bh(&mkvp_list_lock);
- list_for_each_entry_safe(ptr, pnext, &mkvp_list, list) {
- list_del(&ptr->list);
- kfree(ptr);
- }
- spin_unlock_bh(&mkvp_list_lock);
+ return rc;
}
-/*
- * Search for a matching crypto card based on the Master Key
- * Verification Pattern provided inside a secure key.
- */
-int pkey_findcard(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain, int verify)
+static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
- struct zcrypt_device_status_ext *device_status;
- u16 card, dom;
- u64 mkvp[2];
- int i, rc, oi = -1;
-
- /* mkvp must not be zero */
- if (t->mkvp == 0)
+ u32 klen = KEYBLOBBUFSIZE;
+ struct pkey_clr2seck2 kcs;
+ struct pkey_apqn *apqns;
+ u8 *kkey;
+ int rc;
+ u32 u;
+
+ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
+ return -EFAULT;
+ u = pkey_aes_bitsize_to_keytype(kcs.size);
+ if (!u) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, kcs.size);
+ memzero_explicit(&kcs, sizeof(kcs));
return -EINVAL;
-
- /* fetch status of all crypto cards */
- device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
- sizeof(struct zcrypt_device_status_ext),
- GFP_KERNEL);
- if (!device_status)
+ }
+ apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
+ if (IS_ERR(apqns)) {
+ memzero_explicit(&kcs, sizeof(kcs));
+ return PTR_ERR(apqns);
+ }
+ kkey = kzalloc(klen, GFP_KERNEL);
+ if (!kkey) {
+ kfree(apqns);
+ memzero_explicit(&kcs, sizeof(kcs));
return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
-
- /* walk through all crypto cards */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- if (device_status[i].online &&
- device_status[i].functions & 0x04) {
- /* an enabled CCA Coprocessor card */
- /* try cached mkvp */
- if (mkvp_cache_fetch(card, dom, mkvp) == 0 &&
- t->mkvp == mkvp[0]) {
- if (!verify)
- break;
- /* verify: fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, mkvp) == 0) {
- mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp[0])
- break;
- }
- }
- } else {
- /* Card is offline and/or not a CCA card. */
- /* del mkvp entry from cache if it exists */
- mkvp_cache_scrub(card, dom);
- }
}
- if (i >= MAX_ZDEV_ENTRIES_EXT) {
- /* nothing found, so this time without cache */
- for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
- if (!(device_status[i].online &&
- device_status[i].functions & 0x04))
- continue;
- card = AP_QID_CARD(device_status[i].qid);
- dom = AP_QID_QUEUE(device_status[i].qid);
- /* fresh fetch mkvp from adapter */
- if (fetch_mkvp(card, dom, mkvp) == 0) {
- mkvp_cache_update(card, dom, mkvp);
- if (t->mkvp == mkvp[0])
- break;
- if (t->mkvp == mkvp[1] && oi < 0)
- oi = i;
- }
+ rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries,
+ u, kcs.type, kcs.size, kcs.keygenflags,
+ kcs.clrkey.clrkey, kcs.size / 8,
+ kkey, &klen, NULL, 0);
+ pr_debug("clr_to_key()=%d\n", rc);
+ kfree(apqns);
+ if (rc) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return rc;
+ }
+ if (kcs.key) {
+ if (kcs.keylen < klen) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -EINVAL;
}
- if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) {
- /* old mkvp matched, use this card then */
- card = AP_QID_CARD(device_status[oi].qid);
- dom = AP_QID_QUEUE(device_status[oi].qid);
+ if (copy_to_user(kcs.key, kkey, klen)) {
+ kfree_sensitive(kkey);
+ memzero_explicit(&kcs, sizeof(kcs));
+ return -EFAULT;
}
}
- if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) {
- if (pcardnr)
- *pcardnr = card;
- if (pdomain)
- *pdomain = dom;
- rc = 0;
- } else
- rc = -ENODEV;
-
- kfree(device_status);
+ kcs.keylen = klen;
+ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
+ rc = -EFAULT;
+ memzero_explicit(&kcs, sizeof(kcs));
+ kfree_sensitive(kkey);
+
return rc;
}
-EXPORT_SYMBOL(pkey_findcard);
-/*
- * Find card and transform secure key into protected key.
- */
-int pkey_skey2pkey(const struct pkey_seckey *seckey,
- struct pkey_protkey *protkey)
+static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk)
{
- u16 cardnr, domain;
- int rc, verify;
-
- /*
- * The pkey_sec2protkey call may fail when a card has been
- * addressed where the master key was changed after last fetch
- * of the mkvp into the cache. So first try without verify then
- * with verify enabled (thus refreshing the mkvp for each card).
- */
- for (verify = 0; verify < 2; verify++) {
- rc = pkey_findcard(seckey, &cardnr, &domain, verify);
- if (rc)
- continue;
- rc = pkey_sec2protkey(cardnr, domain, seckey, protkey);
- if (rc == 0)
- break;
- }
+ struct pkey_verifykey2 kvk;
+ u8 *kkey;
+ int rc;
- if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
+ return -EFAULT;
+ kkey = _copy_key_from_user(kvk.key, kvk.keylen);
+ if (IS_ERR(kkey))
+ return PTR_ERR(kkey);
+
+ rc = pkey_handler_verify_key(kkey, kvk.keylen,
+ &kvk.cardnr, &kvk.domain,
+ &kvk.type, &kvk.size, &kvk.flags, 0);
+ pr_debug("verify_key()=%d\n", rc);
+
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(uvk, &kvk, sizeof(kvk)))
+ return -EFAULT;
return rc;
}
-EXPORT_SYMBOL(pkey_skey2pkey);
-/*
- * Verify key and give back some info about the key.
- */
-int pkey_verifykey(const struct pkey_seckey *seckey,
- u16 *pcardnr, u16 *pdomain,
- u16 *pkeysize, u32 *pattributes)
+static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp)
{
- struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
- u16 cardnr, domain;
- u64 mkvp[2];
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_kblob2pkey2 ktp;
+ u8 *kkey;
int rc;
- /* check the secure key for valid AES secure key */
- rc = check_secaeskeytoken((u8 *) seckey, 0);
- if (rc)
- goto out;
- if (pattributes)
- *pattributes = PKEY_VERIFY_ATTR_AES;
- if (pkeysize)
- *pkeysize = t->bitsize;
-
- /* try to find a card which can handle this key */
- rc = pkey_findcard(seckey, &cardnr, &domain, 1);
- if (rc)
- goto out;
-
- /* check mkvp for old mkvp match */
- rc = mkvp_cache_fetch(cardnr, domain, mkvp);
- if (rc)
- goto out;
- if (t->mkvp == mkvp[1]) {
- DEBUG_DBG("%s secure key has old mkvp\n", __func__);
- if (pattributes)
- *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
- }
-
- if (pcardnr)
- *pcardnr = cardnr;
- if (pdomain)
- *pdomain = domain;
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ ktp.protkey.len = sizeof(ktp.protkey.protkey);
+ rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
+ ktp.protkey.protkey, &ktp.protkey.len,
+ &ktp.protkey.type, 0);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ if (!rc && copy_to_user(utp, &ktp, sizeof(ktp)))
+ rc = -EFAULT;
+ memzero_explicit(&ktp, sizeof(ktp));
-out:
- DEBUG_DBG("%s rc=%d\n", __func__, rc);
return rc;
}
-EXPORT_SYMBOL(pkey_verifykey);
-/*
- * Generate a random protected key
- */
-int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
+static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak)
{
- struct pkey_clrkey clrkey;
- int keysize;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_apqns4key kak;
+ size_t nr_apqns, len;
+ u8 *kkey;
int rc;
- switch (keytype) {
- case PKEY_KEYTYPE_AES_128:
- keysize = 16;
- break;
- case PKEY_KEYTYPE_AES_192:
- keysize = 24;
- break;
- case PKEY_KEYTYPE_AES_256:
- keysize = 32;
- break;
- default:
- DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
- keytype);
- return -EINVAL;
+ if (copy_from_user(&kak, uak, sizeof(kak)))
+ return -EFAULT;
+ nr_apqns = kak.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
}
-
- /* generate a dummy random clear key */
- get_random_bytes(clrkey.clrkey, keysize);
-
- /* convert it to a dummy protected key */
- rc = pkey_clr2protkey(keytype, &clrkey, protkey);
- if (rc)
+ kkey = _copy_key_from_user(kak.key, kak.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags,
+ apqns, &nr_apqns, 0);
+ pr_debug("apqns_for_key()=%d\n", rc);
+ kfree_sensitive(kkey);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
return rc;
-
- /* replace the key part of the protected key with random bytes */
- get_random_bytes(protkey->protkey, keysize);
-
- return 0;
-}
-EXPORT_SYMBOL(pkey_genprotkey);
-
-/*
- * Verify if a protected key is still valid
- */
-int pkey_verifyprotkey(const struct pkey_protkey *protkey)
-{
- unsigned long fc;
- struct {
- u8 iv[AES_BLOCK_SIZE];
- u8 key[MAXPROTKEYSIZE];
- } param;
- u8 null_msg[AES_BLOCK_SIZE];
- u8 dest_buf[AES_BLOCK_SIZE];
- unsigned int k;
-
- switch (protkey->type) {
- case PKEY_KEYTYPE_AES_128:
- fc = CPACF_KMC_PAES_128;
- break;
- case PKEY_KEYTYPE_AES_192:
- fc = CPACF_KMC_PAES_192;
- break;
- case PKEY_KEYTYPE_AES_256:
- fc = CPACF_KMC_PAES_256;
- break;
- default:
- DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
- protkey->type);
- return -EINVAL;
}
-
- memset(null_msg, 0, sizeof(null_msg));
-
- memset(param.iv, 0, sizeof(param.iv));
- memcpy(param.key, protkey->protkey, sizeof(param.key));
-
- k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
- sizeof(null_msg));
- if (k != sizeof(null_msg)) {
- DEBUG_ERR("%s protected key is not valid\n", __func__);
- return -EKEYREJECTED;
+ if (!rc && kak.apqns) {
+ if (nr_apqns > kak.apqn_entries) {
+ kfree(apqns);
+ return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kak.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
}
+ kak.apqn_entries = nr_apqns;
+ if (copy_to_user(uak, &kak, sizeof(kak)))
+ rc = -EFAULT;
+ kfree(apqns);
- return 0;
+ return rc;
}
-EXPORT_SYMBOL(pkey_verifyprotkey);
-/*
- * Transform a non-CCA key token into a protected key
- */
-static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen,
- struct pkey_protkey *protkey)
+static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
- struct protaeskeytoken *t;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_apqns4keytype kat;
+ size_t nr_apqns, len;
+ int rc;
- switch (hdr->version) {
- case TOKVER_PROTECTED_KEY:
- if (keylen != sizeof(struct protaeskeytoken))
+ if (copy_from_user(&kat, uat, sizeof(kat)))
+ return -EFAULT;
+ nr_apqns = kat.apqn_entries;
+ if (nr_apqns) {
+ apqns = kmalloc_array(nr_apqns,
+ sizeof(struct pkey_apqn),
+ GFP_KERNEL);
+ if (!apqns)
+ return -ENOMEM;
+ }
+ rc = pkey_handler_apqns_for_keytype(kat.type,
+ kat.cur_mkvp, kat.alt_mkvp,
+ kat.flags, apqns, &nr_apqns, 0);
+ pr_debug("apqns_for_keytype()=%d\n", rc);
+ if (rc && rc != -ENOSPC) {
+ kfree(apqns);
+ return rc;
+ }
+ if (!rc && kat.apqns) {
+ if (nr_apqns > kat.apqn_entries) {
+ kfree(apqns);
return -EINVAL;
-
- t = (struct protaeskeytoken *)key;
- protkey->len = t->len;
- protkey->type = t->keytype;
- memcpy(protkey->protkey, t->protkey,
- sizeof(protkey->protkey));
-
- return pkey_verifyprotkey(protkey);
- default:
- DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
- __func__, hdr->version);
- return -EINVAL;
+ }
+ len = nr_apqns * sizeof(struct pkey_apqn);
+ if (len) {
+ if (copy_to_user(kat.apqns, apqns, len)) {
+ kfree(apqns);
+ return -EFAULT;
+ }
+ }
}
+ kat.apqn_entries = nr_apqns;
+ if (copy_to_user(uat, &kat, sizeof(kat)))
+ rc = -EFAULT;
+ kfree(apqns);
+
+ return rc;
}
-/*
- * Transform a CCA internal key token into a protected key
- */
-static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
- struct pkey_protkey *protkey)
+static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp)
{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 protkeylen = PROTKEYBLOBBUFSIZE;
+ struct pkey_apqn *apqns = NULL;
+ struct pkey_kblob2pkey3 ktp;
+ u8 *kkey, *protkey;
+ int rc;
- switch (hdr->version) {
- case TOKVER_CCA_AES:
- if (keylen != sizeof(struct secaeskeytoken))
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
+ if (IS_ERR(apqns))
+ return PTR_ERR(apqns);
+ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
+ if (IS_ERR(kkey)) {
+ kfree(apqns);
+ return PTR_ERR(kkey);
+ }
+ protkey = kmalloc(protkeylen, GFP_KERNEL);
+ if (!protkey) {
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ return -ENOMEM;
+ }
+ rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen,
+ protkey, &protkeylen, &ktp.pkeytype, 0);
+ pr_debug("key2protkey()=%d\n", rc);
+ kfree(apqns);
+ kfree_sensitive(kkey);
+ if (rc) {
+ kfree_sensitive(protkey);
+ return rc;
+ }
+ if (ktp.pkey && ktp.pkeylen) {
+ if (protkeylen > ktp.pkeylen) {
+ kfree_sensitive(protkey);
return -EINVAL;
-
- return pkey_skey2pkey((struct pkey_seckey *)key,
- protkey);
- default:
- DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
- __func__, hdr->version);
- return -EINVAL;
+ }
+ if (copy_to_user(ktp.pkey, protkey, protkeylen)) {
+ kfree_sensitive(protkey);
+ return -EFAULT;
+ }
}
-}
+ kfree_sensitive(protkey);
+ ktp.pkeylen = protkeylen;
+ if (copy_to_user(utp, &ktp, sizeof(ktp)))
+ return -EFAULT;
-/*
- * Transform a key blob (of any type) into a protected key
- */
-int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
- struct pkey_protkey *protkey)
-{
- struct keytoken_header *hdr = (struct keytoken_header *)key;
-
- if (keylen < sizeof(struct keytoken_header))
- return -EINVAL;
-
- switch (hdr->type) {
- case TOKTYPE_NON_CCA:
- return pkey_nonccatok2pkey(key, keylen, protkey);
- case TOKTYPE_CCA_INTERNAL:
- return pkey_ccainttok2pkey(key, keylen, protkey);
- default:
- DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__,
- hdr->type);
- return -EINVAL;
- }
+ return 0;
}
-EXPORT_SYMBOL(pkey_keyblob2pkey);
-
-/*
- * File io functions
- */
static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
@@ -1266,164 +712,57 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
int rc;
switch (cmd) {
- case PKEY_GENSECK: {
- struct pkey_genseck __user *ugs = (void __user *) arg;
- struct pkey_genseck kgs;
-
- if (copy_from_user(&kgs, ugs, sizeof(kgs)))
- return -EFAULT;
- rc = pkey_genseckey(kgs.cardnr, kgs.domain,
- kgs.keytype, &kgs.seckey);
- DEBUG_DBG("%s pkey_genseckey()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(ugs, &kgs, sizeof(kgs)))
- return -EFAULT;
+ case PKEY_GENSECK:
+ rc = pkey_ioctl_genseck((struct pkey_genseck __user *)arg);
break;
- }
- case PKEY_CLR2SECK: {
- struct pkey_clr2seck __user *ucs = (void __user *) arg;
- struct pkey_clr2seck kcs;
-
- if (copy_from_user(&kcs, ucs, sizeof(kcs)))
- return -EFAULT;
- rc = pkey_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
- &kcs.clrkey, &kcs.seckey);
- DEBUG_DBG("%s pkey_clr2seckey()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(ucs, &kcs, sizeof(kcs)))
- return -EFAULT;
- memzero_explicit(&kcs, sizeof(kcs));
+ case PKEY_CLR2SECK:
+ rc = pkey_ioctl_clr2seck((struct pkey_clr2seck __user *)arg);
break;
- }
- case PKEY_SEC2PROTK: {
- struct pkey_sec2protk __user *usp = (void __user *) arg;
- struct pkey_sec2protk ksp;
-
- if (copy_from_user(&ksp, usp, sizeof(ksp)))
- return -EFAULT;
- rc = pkey_sec2protkey(ksp.cardnr, ksp.domain,
- &ksp.seckey, &ksp.protkey);
- DEBUG_DBG("%s pkey_sec2protkey()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(usp, &ksp, sizeof(ksp)))
- return -EFAULT;
+ case PKEY_SEC2PROTK:
+ rc = pkey_ioctl_sec2protk((struct pkey_sec2protk __user *)arg);
break;
- }
- case PKEY_CLR2PROTK: {
- struct pkey_clr2protk __user *ucp = (void __user *) arg;
- struct pkey_clr2protk kcp;
-
- if (copy_from_user(&kcp, ucp, sizeof(kcp)))
- return -EFAULT;
- rc = pkey_clr2protkey(kcp.keytype,
- &kcp.clrkey, &kcp.protkey);
- DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(ucp, &kcp, sizeof(kcp)))
- return -EFAULT;
- memzero_explicit(&kcp, sizeof(kcp));
+ case PKEY_CLR2PROTK:
+ rc = pkey_ioctl_clr2protk((struct pkey_clr2protk __user *)arg);
break;
- }
- case PKEY_FINDCARD: {
- struct pkey_findcard __user *ufc = (void __user *) arg;
- struct pkey_findcard kfc;
-
- if (copy_from_user(&kfc, ufc, sizeof(kfc)))
- return -EFAULT;
- rc = pkey_findcard(&kfc.seckey,
- &kfc.cardnr, &kfc.domain, 1);
- DEBUG_DBG("%s pkey_findcard()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(ufc, &kfc, sizeof(kfc)))
- return -EFAULT;
+ case PKEY_FINDCARD:
+ rc = pkey_ioctl_findcard((struct pkey_findcard __user *)arg);
break;
- }
- case PKEY_SKEY2PKEY: {
- struct pkey_skey2pkey __user *usp = (void __user *) arg;
- struct pkey_skey2pkey ksp;
-
- if (copy_from_user(&ksp, usp, sizeof(ksp)))
- return -EFAULT;
- rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
- DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(usp, &ksp, sizeof(ksp)))
- return -EFAULT;
+ case PKEY_SKEY2PKEY:
+ rc = pkey_ioctl_skey2pkey((struct pkey_skey2pkey __user *)arg);
break;
- }
- case PKEY_VERIFYKEY: {
- struct pkey_verifykey __user *uvk = (void __user *) arg;
- struct pkey_verifykey kvk;
-
- if (copy_from_user(&kvk, uvk, sizeof(kvk)))
- return -EFAULT;
- rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
- &kvk.keysize, &kvk.attributes);
- DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(uvk, &kvk, sizeof(kvk)))
- return -EFAULT;
+ case PKEY_VERIFYKEY:
+ rc = pkey_ioctl_verifykey((struct pkey_verifykey __user *)arg);
break;
- }
- case PKEY_GENPROTK: {
- struct pkey_genprotk __user *ugp = (void __user *) arg;
- struct pkey_genprotk kgp;
-
- if (copy_from_user(&kgp, ugp, sizeof(kgp)))
- return -EFAULT;
- rc = pkey_genprotkey(kgp.keytype, &kgp.protkey);
- DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
- if (rc)
- break;
- if (copy_to_user(ugp, &kgp, sizeof(kgp)))
- return -EFAULT;
+ case PKEY_GENPROTK:
+ rc = pkey_ioctl_genprotk((struct pkey_genprotk __user *)arg);
break;
- }
- case PKEY_VERIFYPROTK: {
- struct pkey_verifyprotk __user *uvp = (void __user *) arg;
- struct pkey_verifyprotk kvp;
-
- if (copy_from_user(&kvp, uvp, sizeof(kvp)))
- return -EFAULT;
- rc = pkey_verifyprotkey(&kvp.protkey);
- DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
+ case PKEY_VERIFYPROTK:
+ rc = pkey_ioctl_verifyprotk((struct pkey_verifyprotk __user *)arg);
break;
- }
- case PKEY_KBLOB2PROTK: {
- struct pkey_kblob2pkey __user *utp = (void __user *) arg;
- struct pkey_kblob2pkey ktp;
- __u8 __user *ukey;
- __u8 *kkey;
-
- if (copy_from_user(&ktp, utp, sizeof(ktp)))
- return -EFAULT;
- if (ktp.keylen < MINKEYBLOBSIZE ||
- ktp.keylen > MAXKEYBLOBSIZE)
- return -EINVAL;
- ukey = ktp.key;
- kkey = kmalloc(ktp.keylen, GFP_KERNEL);
- if (kkey == NULL)
- return -ENOMEM;
- if (copy_from_user(kkey, ukey, ktp.keylen)) {
- kfree(kkey);
- return -EFAULT;
- }
- rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
- DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
- kfree(kkey);
- if (rc)
- break;
- if (copy_to_user(utp, &ktp, sizeof(ktp)))
- return -EFAULT;
+ case PKEY_KBLOB2PROTK:
+ rc = pkey_ioctl_kblob2protk((struct pkey_kblob2pkey __user *)arg);
+ break;
+ case PKEY_GENSECK2:
+ rc = pkey_ioctl_genseck2((struct pkey_genseck2 __user *)arg);
+ break;
+ case PKEY_CLR2SECK2:
+ rc = pkey_ioctl_clr2seck2((struct pkey_clr2seck2 __user *)arg);
+ break;
+ case PKEY_VERIFYKEY2:
+ rc = pkey_ioctl_verifykey2((struct pkey_verifykey2 __user *)arg);
+ break;
+ case PKEY_KBLOB2PROTK2:
+ rc = pkey_ioctl_kblob2protk2((struct pkey_kblob2pkey2 __user *)arg);
+ break;
+ case PKEY_APQNS4K:
+ rc = pkey_ioctl_apqns4k((struct pkey_apqns4key __user *)arg);
+ break;
+ case PKEY_APQNS4KT:
+ rc = pkey_ioctl_apqns4kt((struct pkey_apqns4keytype __user *)arg);
+ break;
+ case PKEY_KBLOB2PROTK3:
+ rc = pkey_ioctl_kblob2protk3((struct pkey_kblob2pkey3 __user *)arg);
break;
- }
default:
/* unknown/unsupported ioctl cmd */
return -ENOTTY;
@@ -1433,242 +772,12 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
/*
- * Sysfs and file io operations
- */
-
-/*
- * Sysfs attribute read function for all protected key binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * protected key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * File io operations
*/
-static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
- loff_t off, size_t count)
-{
- struct protaeskeytoken protkeytoken;
- struct pkey_protkey protkey;
- int rc;
-
- if (off != 0 || count < sizeof(protkeytoken))
- return -EINVAL;
- if (is_xts)
- if (count < 2 * sizeof(protkeytoken))
- return -EINVAL;
-
- memset(&protkeytoken, 0, sizeof(protkeytoken));
- protkeytoken.type = TOKTYPE_NON_CCA;
- protkeytoken.version = TOKVER_PROTECTED_KEY;
- protkeytoken.keytype = keytype;
-
- rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
- if (rc)
- return rc;
-
- protkeytoken.len = protkey.len;
- memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
-
- memcpy(buf, &protkeytoken, sizeof(protkeytoken));
-
- if (is_xts) {
- rc = pkey_genprotkey(protkeytoken.keytype, &protkey);
- if (rc)
- return rc;
-
- protkeytoken.len = protkey.len;
- memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
-
- memcpy(buf + sizeof(protkeytoken), &protkeytoken,
- sizeof(protkeytoken));
-
- return 2 * sizeof(protkeytoken);
- }
-
- return sizeof(protkeytoken);
-}
-
-static ssize_t protkey_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t protkey_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
-static BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
-
-static struct bin_attribute *protkey_attrs[] = {
- &bin_attr_protkey_aes_128,
- &bin_attr_protkey_aes_192,
- &bin_attr_protkey_aes_256,
- &bin_attr_protkey_aes_128_xts,
- &bin_attr_protkey_aes_256_xts,
- NULL
-};
-
-static struct attribute_group protkey_attr_group = {
- .name = "protkey",
- .bin_attrs = protkey_attrs,
-};
-
-/*
- * Sysfs attribute read function for all secure key ccadata binary attributes.
- * The implementation can not deal with partial reads, because a new random
- * protected key blob is generated with each read. In case of partial reads
- * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
- */
-static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
- loff_t off, size_t count)
-{
- int rc;
-
- if (off != 0 || count < sizeof(struct secaeskeytoken))
- return -EINVAL;
- if (is_xts)
- if (count < 2 * sizeof(struct secaeskeytoken))
- return -EINVAL;
-
- rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
- if (rc)
- return rc;
-
- if (is_xts) {
- buf += sizeof(struct pkey_seckey);
- rc = pkey_genseckey(-1, -1, keytype, (struct pkey_seckey *)buf);
- if (rc)
- return rc;
-
- return 2 * sizeof(struct secaeskeytoken);
- }
-
- return sizeof(struct secaeskeytoken);
-}
-
-static ssize_t ccadata_aes_128_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_192_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_256_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_128_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
- off, count);
-}
-
-static ssize_t ccadata_aes_256_xts_read(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
- off, count);
-}
-
-static BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
-static BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
-
-static struct bin_attribute *ccadata_attrs[] = {
- &bin_attr_ccadata_aes_128,
- &bin_attr_ccadata_aes_192,
- &bin_attr_ccadata_aes_256,
- &bin_attr_ccadata_aes_128_xts,
- &bin_attr_ccadata_aes_256_xts,
- NULL
-};
-
-static struct attribute_group ccadata_attr_group = {
- .name = "ccadata",
- .bin_attrs = ccadata_attrs,
-};
-
-static const struct attribute_group *pkey_attr_groups[] = {
- &protkey_attr_group,
- &ccadata_attr_group,
- NULL,
-};
static const struct file_operations pkey_fops = {
.owner = THIS_MODULE,
.open = nonseekable_open,
- .llseek = no_llseek,
.unlocked_ioctl = pkey_unlocked_ioctl,
};
@@ -1680,44 +789,13 @@ static struct miscdevice pkey_dev = {
.groups = pkey_attr_groups,
};
-/*
- * Module init
- */
-static int __init pkey_init(void)
+int __init pkey_api_init(void)
{
- cpacf_mask_t kmc_functions;
-
- /*
- * The pckmo instruction should be available - even if we don't
- * actually invoke it. This instruction comes with MSA 3 which
- * is also the minimum level for the kmc instructions which
- * are able to work with protected keys.
- */
- if (!cpacf_query(CPACF_PCKMO, &pckmo_functions))
- return -EOPNOTSUPP;
-
- /* check for kmc instructions available */
- if (!cpacf_query(CPACF_KMC, &kmc_functions))
- return -EOPNOTSUPP;
- if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) ||
- !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) ||
- !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256))
- return -EOPNOTSUPP;
-
- pkey_debug_init();
-
+ /* register as a misc device */
return misc_register(&pkey_dev);
}
-/*
- * Module exit
- */
-static void __exit pkey_exit(void)
+void __exit pkey_api_exit(void)
{
misc_deregister(&pkey_dev);
- mkvp_cache_free();
- pkey_debug_exit();
}
-
-module_cpu_feature_match(MSA, pkey_init);
-module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c
new file mode 100644
index 000000000000..d60cd987c16d
--- /dev/null
+++ b/drivers/s390/crypto/pkey_base.c
@@ -0,0 +1,380 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey base: debug feature, pkey handler registry
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define pr_fmt(fmt) "pkey: " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/rculist.h>
+
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key base and api");
+
+/*
+ * pkey debug feature
+ */
+debug_info_t *pkey_dbf_info;
+EXPORT_SYMBOL(pkey_dbf_info);
+
+/*
+ * pkey handler registry
+ */
+
+static DEFINE_SPINLOCK(handler_list_write_lock);
+static LIST_HEAD(handler_list);
+
+int pkey_handler_register(struct pkey_handler *handler)
+{
+ const struct pkey_handler *h;
+
+ if (!handler ||
+ !handler->is_supported_key ||
+ !handler->is_supported_keytype)
+ return -EINVAL;
+
+ if (!try_module_get(handler->module))
+ return -ENXIO;
+
+ spin_lock(&handler_list_write_lock);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h == handler) {
+ rcu_read_unlock();
+ spin_unlock(&handler_list_write_lock);
+ module_put(handler->module);
+ return -EEXIST;
+ }
+ }
+ rcu_read_unlock();
+
+ list_add_rcu(&handler->list, &handler_list);
+ spin_unlock(&handler_list_write_lock);
+ synchronize_rcu();
+
+ module_put(handler->module);
+
+ PKEY_DBF_INFO("%s pkey handler '%s' registered\n", __func__,
+ handler->name ?: "<no name>");
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_handler_register);
+
+int pkey_handler_unregister(struct pkey_handler *handler)
+{
+ spin_lock(&handler_list_write_lock);
+ list_del_rcu(&handler->list);
+ INIT_LIST_HEAD_RCU(&handler->list);
+ spin_unlock(&handler_list_write_lock);
+ synchronize_rcu();
+
+ PKEY_DBF_INFO("%s pkey handler '%s' unregistered\n", __func__,
+ handler->name ?: "<no name>");
+
+ return 0;
+}
+EXPORT_SYMBOL(pkey_handler_unregister);
+
+/*
+ * Handler invocation functions.
+ */
+
+const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen)
+{
+ const struct pkey_handler *h;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->is_supported_key(key, keylen)) {
+ rcu_read_unlock();
+ return h;
+ }
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+EXPORT_SYMBOL(pkey_handler_get_keybased);
+
+const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt)
+{
+ const struct pkey_handler *h;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->is_supported_keytype(kt)) {
+ rcu_read_unlock();
+ return h;
+ }
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ return NULL;
+}
+EXPORT_SYMBOL(pkey_handler_get_keytypebased);
+
+void pkey_handler_put(const struct pkey_handler *handler)
+{
+ const struct pkey_handler *h;
+
+ if (!handler)
+ return;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h == handler) {
+ module_put(h->module);
+ break;
+ }
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(pkey_handler_put);
+
+int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->key_to_protkey) {
+ rc = h->key_to_protkey(apqns, nr_apqns, key, keylen,
+ protkey, protkeylen,
+ protkeytype, xflags);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_key_to_protkey);
+
+/*
+ * This handler invocation is special as there may be more than
+ * one handler providing support for the very same key (type).
+ * And the handler may not respond true on is_supported_key(),
+ * so simple try and check return value here.
+ */
+int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype, u32 xflags)
+{
+ const struct pkey_handler *h, *htmp[10];
+ int i, n = 0, rc = -ENODEV;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (!try_module_get(h->module))
+ continue;
+ if (h->slowpath_key_to_protkey && n < ARRAY_SIZE(htmp))
+ htmp[n++] = h;
+ else
+ module_put(h->module);
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n; i++) {
+ h = htmp[i];
+ if (rc)
+ rc = h->slowpath_key_to_protkey(apqns, nr_apqns,
+ key, keylen,
+ protkey, protkeylen,
+ protkeytype, xflags);
+ module_put(h->module);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey);
+
+int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->gen_key) {
+ rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo, xflags);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_gen_key);
+
+int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo,
+ u32 xflags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->clr_to_key) {
+ rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype,
+ keybitsize, flags, clrkey, clrkeylen,
+ keybuf, keybuflen, keyinfo, xflags);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_clr_to_key);
+
+int pkey_handler_verify_key(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->verify_key) {
+ rc = h->verify_key(key, keylen, card, dom,
+ keytype, keybitsize, flags, xflags);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_verify_key);
+
+int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keybased(key, keylen);
+ if (h && h->apqns_for_key)
+ rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns,
+ xflags);
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_apqns_for_key);
+
+int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags)
+{
+ const struct pkey_handler *h;
+ int rc = -ENODEV;
+
+ h = pkey_handler_get_keytypebased(keysubtype);
+ if (h && h->apqns_for_keytype) {
+ rc = h->apqns_for_keytype(keysubtype,
+ cur_mkvp, alt_mkvp, flags,
+ apqns, nr_apqns, xflags);
+ }
+ pkey_handler_put(h);
+
+ return rc;
+}
+EXPORT_SYMBOL(pkey_handler_apqns_for_keytype);
+
+void pkey_handler_request_modules(void)
+{
+#ifdef CONFIG_MODULES
+ static const char * const pkey_handler_modules[] = {
+#if IS_MODULE(CONFIG_PKEY_CCA)
+ "pkey_cca",
+#endif
+#if IS_MODULE(CONFIG_PKEY_EP11)
+ "pkey_ep11",
+#endif
+#if IS_MODULE(CONFIG_PKEY_PCKMO)
+ "pkey_pckmo",
+#endif
+#if IS_MODULE(CONFIG_PKEY_UV)
+ "pkey_uv",
+#endif
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pkey_handler_modules); i++) {
+ const struct pkey_handler *h;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(h, &handler_list, list) {
+ if (h->module &&
+ !strcmp(h->module->name, pkey_handler_modules[i])) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ if (!found) {
+ pr_debug("request_module(%s)\n", pkey_handler_modules[i]);
+ request_module(pkey_handler_modules[i]);
+ }
+ }
+#endif
+}
+EXPORT_SYMBOL(pkey_handler_request_modules);
+
+/*
+ * Module init
+ */
+static int __init pkey_init(void)
+{
+ int rc;
+
+ /* init debug feature */
+ pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
+ debug_register_view(pkey_dbf_info, &debug_sprintf_view);
+ debug_set_level(pkey_dbf_info, 4);
+
+ /* the handler registry does not need any init */
+
+ rc = pkey_api_init();
+ if (rc)
+ debug_unregister(pkey_dbf_info);
+
+ return rc;
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_exit(void)
+{
+ pkey_api_exit();
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_init);
+module_exit(pkey_exit);
diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h
new file mode 100644
index 000000000000..9cdb3e74477f
--- /dev/null
+++ b/drivers/s390/crypto/pkey_base.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2024
+ *
+ * Pkey base: debug feature, defines and structs
+ * common to all pkey code.
+ */
+
+#ifndef _PKEY_BASE_H_
+#define _PKEY_BASE_H_
+
+#include <linux/types.h>
+#include <asm/debug.h>
+#include <asm/pkey.h>
+
+/*
+ * pkey debug feature
+ */
+
+extern debug_info_t *pkey_dbf_info;
+
+#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
+#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
+#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
+
+/*
+ * common defines and common structs
+ */
+
+#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
+#define MINKEYBLOBBUFSIZE (sizeof(struct keytoken_header))
+#define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */
+#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
+#define AES_WK_VP_SIZE 32 /* Size of WK VP block appended to a prot key */
+
+/* inside view of a generic protected key token */
+struct protkeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[]; /* the protected key blob */
+} __packed;
+
+/* inside view of a protected AES key token */
+struct protaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* should be 0x01 for protected key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in protkey[] */
+ u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+} __packed;
+
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearkeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* 0x02 for clear key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE_* values */
+ u32 len; /* bytes actually stored in clearkey[] */
+ u8 clearkey[]; /* clear key value */
+} __packed;
+
+/* helper function which translates the PKEY_KEYTYPE_AES_* to their keysize */
+static inline u32 pkey_keytype_aes_to_size(u32 keytype)
+{
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ return 16;
+ case PKEY_KEYTYPE_AES_192:
+ return 24;
+ case PKEY_KEYTYPE_AES_256:
+ return 32;
+ default:
+ return 0;
+ }
+}
+
+/* helper function which translates AES key bit size into PKEY_KEYTYPE_AES_* */
+static inline u32 pkey_aes_bitsize_to_keytype(u32 keybitsize)
+{
+ switch (keybitsize) {
+ case 128:
+ return PKEY_KEYTYPE_AES_128;
+ case 192:
+ return PKEY_KEYTYPE_AES_192;
+ case 256:
+ return PKEY_KEYTYPE_AES_256;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * helper function which translates the PKEY_KEYTYPE_*
+ * to the protected key size minus the WK VP length
+ */
+static inline u32 pkey_keytype_to_size(u32 keytype)
+{
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ return 16;
+ case PKEY_KEYTYPE_AES_192:
+ return 24;
+ case PKEY_KEYTYPE_AES_256:
+ return 32;
+ case PKEY_KEYTYPE_ECC_P256:
+ return 32;
+ case PKEY_KEYTYPE_ECC_P384:
+ return 48;
+ case PKEY_KEYTYPE_ECC_P521:
+ return 80;
+ case PKEY_KEYTYPE_ECC_ED25519:
+ return 32;
+ case PKEY_KEYTYPE_ECC_ED448:
+ return 54;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ return 32;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ return 64;
+ case PKEY_KEYTYPE_HMAC_512:
+ return 64;
+ case PKEY_KEYTYPE_HMAC_1024:
+ return 128;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * pkey_api.c:
+ */
+int __init pkey_api_init(void);
+void __exit pkey_api_exit(void);
+
+/*
+ * pkey_sysfs.c:
+ */
+
+extern const struct attribute_group *pkey_attr_groups[];
+
+/*
+ * pkey handler registry
+ */
+
+struct pkey_handler {
+ struct module *module;
+ const char *name;
+ /*
+ * is_supported_key() and is_supported_keytype() are called
+ * within an rcu_read_lock() scope and thus must not sleep!
+ */
+ bool (*is_supported_key)(const u8 *key, u32 keylen);
+ bool (*is_supported_keytype)(enum pkey_key_type);
+ int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
+ int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype, u32 xflags);
+ int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags);
+ int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags);
+ int (*verify_key)(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags);
+ int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
+ int (*apqns_for_keytype)(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
+ /* used internal by pkey base */
+ struct list_head list;
+};
+
+int pkey_handler_register(struct pkey_handler *handler);
+int pkey_handler_unregister(struct pkey_handler *handler);
+
+/*
+ * invocation function for the registered pkey handlers
+ */
+
+const struct pkey_handler *pkey_handler_get_keybased(const u8 *key, u32 keylen);
+const struct pkey_handler *pkey_handler_get_keytypebased(enum pkey_key_type kt);
+void pkey_handler_put(const struct pkey_handler *handler);
+
+int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
+int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype, u32 xflags);
+int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags);
+int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo,
+ u32 xflags);
+int pkey_handler_verify_key(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags);
+int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
+int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 xflags);
+
+/*
+ * Unconditional try to load all handler modules
+ */
+void pkey_handler_request_modules(void);
+
+#endif /* _PKEY_BASE_H_ */
diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c
new file mode 100644
index 000000000000..d4550d8d8eea
--- /dev/null
+++ b/drivers/s390/crypto/pkey_cca.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey cca specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define pr_fmt(fmt) "pkey: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key CCA handler");
+
+#if IS_MODULE(CONFIG_PKEY_CCA)
+static struct ap_device_id pkey_cca_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4 },
+ { .dev_type = AP_DEVICE_TYPE_CEX5 },
+ { .dev_type = AP_DEVICE_TYPE_CEX6 },
+ { .dev_type = AP_DEVICE_TYPE_CEX7 },
+ { .dev_type = AP_DEVICE_TYPE_CEX8 },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(ap, pkey_cca_card_ids);
+#endif
+
+/*
+ * Check key blob for known and supported CCA key.
+ */
+static bool is_cca_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_CCA_INTERNAL:
+ switch (hdr->version) {
+ case TOKVER_CCA_AES:
+ case TOKVER_CCA_VLSC:
+ return true;
+ default:
+ return false;
+ }
+ case TOKTYPE_CCA_INTERNAL_PKA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool is_cca_keytype(enum pkey_key_type key_type)
+{
+ switch (key_type) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ case PKEY_TYPE_CCA_ECC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
+ int rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ if (!flags)
+ flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP;
+
+ if (keylen < sizeof(struct keytoken_header))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ minhwtype = ZCRYPT_CEX6;
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp0;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp0;
+ } else {
+ /* unknown CCA internal token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, xflags);
+ if (rc)
+ goto out;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *)key;
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (t->secid == 0x20) {
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = t->mkvp;
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = t->mkvp;
+ } else {
+ /* unknown CCA internal 2 token type */
+ return -EINVAL;
+ }
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, xflags);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_apqns4type(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns,
+ u32 pflags)
+{
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
+ int rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ zcrypt_wait_api_operational();
+
+ if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+ int minhwtype = ZCRYPT_CEX3C;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *)cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *)alt_mkvp);
+ if (ktype == PKEY_TYPE_CCA_CIPHER)
+ minhwtype = ZCRYPT_CEX6;
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, AES_MK_SET,
+ cur_mkvp, old_mkvp, xflags);
+ if (rc)
+ goto out;
+
+ } else if (ktype == PKEY_TYPE_CCA_ECC) {
+ u64 cur_mkvp = 0, old_mkvp = 0;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ cur_mkvp = *((u64 *)cur_mkvp);
+ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
+ old_mkvp = *((u64 *)alt_mkvp);
+ rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, APKA_MK_SET,
+ cur_mkvp, old_mkvp, xflags);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported key type %d",
+ __func__, (int)ktype);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 pflags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ u32 xflags;
+ int i, rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ /* CCA AES data key */
+ if (keylen < sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ /* CCA AES cipher key */
+ if (keylen < hdr->len)
+ return -EINVAL;
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ /* CCA ECC (private) key */
+ if (keylen < sizeof(struct eccprivkeytoken))
+ return -EINVAL;
+ if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ rc = cca_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags);
+ if (rc)
+ goto out;
+ apqns = _apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ rc = cca_sec2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype, xflags);
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype, xflags);
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
+ rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain,
+ key, protkey,
+ protkeylen, protkeytype, xflags);
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate CCA secure key.
+ * As of now only CCA AES Data or Cipher secure keys are
+ * supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
+{
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ /* check keytype, subtype, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ switch (subtype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ rc = cca_apqns4type(subtype, NULL, NULL, 0,
+ _apqns, &nr_apqns, pflags);
+ if (rc)
+ goto out;
+ apqns = _apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (subtype == PKEY_TYPE_CCA_CIPHER) {
+ rc = cca_gencipherkey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags,
+ keybuf, keybuflen, xflags);
+ } else {
+ /* PKEY_TYPE_CCA_DATA */
+ rc = cca_genseckey(apqns[i].card, apqns[i].domain,
+ keybitsize, keybuf, xflags);
+ *keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
+ }
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate CCA secure key with given clear key value.
+ * As of now only CCA AES Data or Cipher secure keys are
+ * supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be 0 or PKEY_TYPE_CCA_DATA or PKEY_TYPE_CCA_CIPHER,
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
+{
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ /* check keytype, subtype, clrkeylen, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ if (clrkeylen != len) {
+ PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
+ __func__, clrkeylen, len);
+ return -EINVAL;
+ }
+ switch (subtype) {
+ case PKEY_TYPE_CCA_DATA:
+ case PKEY_TYPE_CCA_CIPHER:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ rc = cca_apqns4type(subtype, NULL, NULL, 0,
+ _apqns, &nr_apqns, pflags);
+ if (rc)
+ goto out;
+ apqns = _apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (subtype == PKEY_TYPE_CCA_CIPHER) {
+ rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags, clrkey,
+ keybuf, keybuflen, xflags);
+ } else {
+ /* PKEY_TYPE_CCA_DATA */
+ rc = cca_clr2seckey(apqns[i].card, apqns[i].domain,
+ keybitsize, clrkey, keybuf, xflags);
+ *keybuflen = (rc ? 0 : SECKEYBLOBSIZE);
+ }
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int cca_verifykey(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns);
+ u32 xflags;
+ int rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_AES) {
+ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
+
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_CCA_DATA;
+ *keybitsize = t->bitsize;
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX3C, AES_MK_SET,
+ t->mkvp, 0, xflags);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ nr_apqns = ARRAY_SIZE(apqns);
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX3C, AES_MK_SET,
+ 0, t->mkvp, xflags);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL &&
+ hdr->version == TOKVER_CCA_VLSC) {
+ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
+
+ rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_CCA_CIPHER;
+ *keybitsize = PKEY_SIZE_UNKNOWN;
+ if (!t->plfver && t->wpllen == 512)
+ *keybitsize = PKEY_SIZE_AES_128;
+ else if (!t->plfver && t->wpllen == 576)
+ *keybitsize = PKEY_SIZE_AES_192;
+ else if (!t->plfver && t->wpllen == 640)
+ *keybitsize = PKEY_SIZE_AES_256;
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX6, AES_MK_SET,
+ t->mkvp0, 0, xflags);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+ if (rc == -ENODEV) {
+ nr_apqns = ARRAY_SIZE(apqns);
+ rc = cca_findcard2(apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX6, AES_MK_SET,
+ 0, t->mkvp0, xflags);
+ if (!rc)
+ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
+ }
+ if (rc)
+ goto out;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else {
+ /* unknown/unsupported key blob */
+ rc = -EINVAL;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * This function provides an alternate but usually slow way
+ * to convert a 'clear key token' with AES key material into
+ * a protected key. This is done via an intermediate step
+ * which creates a CCA AES DATA secure key first and then
+ * derives the protected key from this secure key.
+ */
+static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype, u32 pflags)
+{
+ const struct keytoken_header *hdr = (const struct keytoken_header *)key;
+ const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u8 tmpbuf[SECKEYBLOBSIZE]; /* 64 bytes */
+ u32 tmplen, keysize = 0;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_CLEAR_KEY)
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ if (!keysize || t->len != keysize)
+ return -EINVAL;
+
+ /* try two times in case of failure */
+ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
+ tmplen = SECKEYBLOBSIZE;
+ rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA,
+ 8 * keysize, 0, t->clearkey, t->len,
+ tmpbuf, &tmplen, NULL, pflags);
+ pr_debug("cca_clr2key()=%d\n", rc);
+ if (rc)
+ continue;
+ rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen,
+ protkey, protkeylen, protkeytype, pflags);
+ pr_debug("cca_key2protkey()=%d\n", rc);
+ }
+
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler cca_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY CCA handler",
+ .is_supported_key = is_cca_key,
+ .is_supported_keytype = is_cca_keytype,
+ .key_to_protkey = cca_key2protkey,
+ .slowpath_key_to_protkey = cca_slowpath_key2protkey,
+ .gen_key = cca_gen_key,
+ .clr_to_key = cca_clr2key,
+ .verify_key = cca_verifykey,
+ .apqns_for_key = cca_apqns4key,
+ .apqns_for_keytype = cca_apqns4type,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_cca_init(void)
+{
+ /* register this module as pkey handler for all the cca stuff */
+ return pkey_handler_register(&cca_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_cca_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&cca_handler);
+}
+
+module_init(pkey_cca_init);
+module_exit(pkey_cca_exit);
diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c
new file mode 100644
index 000000000000..654eed20d0d9
--- /dev/null
+++ b/drivers/s390/crypto/pkey_ep11.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey ep11 specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define pr_fmt(fmt) "pkey: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key EP11 handler");
+
+#if IS_MODULE(CONFIG_PKEY_EP11)
+static struct ap_device_id pkey_ep11_card_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4 },
+ { .dev_type = AP_DEVICE_TYPE_CEX5 },
+ { .dev_type = AP_DEVICE_TYPE_CEX6 },
+ { .dev_type = AP_DEVICE_TYPE_CEX7 },
+ { .dev_type = AP_DEVICE_TYPE_CEX8 },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(ap, pkey_ep11_card_ids);
+#endif
+
+/*
+ * Check key blob for known and supported EP11 key.
+ */
+static bool is_ep11_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ switch (hdr->version) {
+ case TOKVER_EP11_AES:
+ case TOKVER_EP11_AES_WITH_HEADER:
+ case TOKVER_EP11_ECC_WITH_HEADER:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_ep11_keytype(enum pkey_key_type key_type)
+{
+ switch (key_type) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ case PKEY_TYPE_EP11_ECC:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
+ int rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ if (!flags)
+ flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ if (keylen < sizeof(struct keytoken_header) || flags == 0)
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ (hdr->version == TOKVER_EP11_AES_WITH_HEADER ||
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER) &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)
+ (key + sizeof(struct ep11kblob_header));
+ int minhwtype = 0, api = 0;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ }
+ rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp, xflags);
+ if (rc)
+ goto out;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ int minhwtype = 0, api = 0;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ }
+ rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp, xflags);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_apqns4type(enum pkey_key_type ktype,
+ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
+ struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags)
+{
+ u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns);
+ u32 xflags;
+ int rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ zcrypt_wait_api_operational();
+
+ if (ktype == PKEY_TYPE_EP11 ||
+ ktype == PKEY_TYPE_EP11_AES ||
+ ktype == PKEY_TYPE_EP11_ECC) {
+ u8 *wkvp = NULL;
+ int api;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ wkvp = cur_mkvp;
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, api, wkvp, xflags);
+ if (rc)
+ goto out;
+
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int)ktype);
+ return -EINVAL;
+ }
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
+ }
+ *nr_apqns = _nr_apqns;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 pflags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ u32 xflags;
+ int i, rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 AES key blob with header */
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ /* EP11 ECC key blob with header */
+ if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
+ return -EINVAL;
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ /* EP11 AES key blob with header in session field */
+ if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
+ return -EINVAL;
+ } else {
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d version %d\n",
+ __func__, hdr->type, hdr->version);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ rc = ep11_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags);
+ if (rc)
+ goto out;
+ apqns = _apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype, xflags);
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
+ is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype, xflags);
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES &&
+ is_ep11_keyblob(key)) {
+ rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain,
+ key, hdr->len, protkey,
+ protkeylen, protkeytype, xflags);
+ } else {
+ rc = -EINVAL;
+ break;
+ }
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate EP11 secure key.
+ * As of now only EP11 AES secure keys are supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
+ * or 0 (results in subtype PKEY_TYPE_EP11_AES),
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
+{
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ /* check keytype, subtype, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ switch (subtype) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ rc = ep11_apqns4type(subtype, NULL, NULL, 0,
+ _apqns, &nr_apqns, pflags);
+ if (rc)
+ goto out;
+ apqns = _apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ rc = ep11_genaeskey(apqns[i].card, apqns[i].domain,
+ keybitsize, flags,
+ keybuf, keybuflen, subtype, xflags);
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate EP11 secure key with given clear key value.
+ * As of now only EP11 AES secure keys are supported.
+ * keytype is one of the PKEY_KEYTYPE_* constants,
+ * subtype may be PKEY_TYPE_EP11 or PKEY_TYPE_EP11_AES
+ * or 0 (assumes PKEY_TYPE_EP11_AES then).
+ * keybitsize is the bit size of the key (may be 0 for
+ * keytype PKEY_KEYTYPE_AES_*).
+ */
+static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns,
+ u32 keytype, u32 subtype,
+ u32 keybitsize, u32 flags,
+ const u8 *clrkey, u32 clrkeylen,
+ u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags)
+{
+ struct pkey_apqn _apqns[MAXAPQNSINLIST];
+ int i, len, rc;
+ u32 xflags;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ /* check keytype, subtype, clrkeylen, keybitsize */
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ len = pkey_keytype_aes_to_size(keytype);
+ if (keybitsize && keybitsize != 8 * len) {
+ PKEY_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ keybitsize = 8 * len;
+ if (clrkeylen != len) {
+ PKEY_DBF_ERR("%s invalid clear key len %d != %d\n",
+ __func__, clrkeylen, len);
+ return -EINVAL;
+ }
+ switch (subtype) {
+ case PKEY_TYPE_EP11:
+ case PKEY_TYPE_EP11_AES:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ zcrypt_wait_api_operational();
+
+ if (!apqns || (nr_apqns == 1 &&
+ apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) {
+ nr_apqns = MAXAPQNSINLIST;
+ rc = ep11_apqns4type(subtype, NULL, NULL, 0,
+ _apqns, &nr_apqns, pflags);
+ if (rc)
+ goto out;
+ apqns = _apqns;
+ }
+
+ for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) {
+ rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain,
+ keybitsize, flags, clrkey,
+ keybuf, keybuflen, subtype, xflags);
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int ep11_verifykey(const u8 *key, u32 keylen,
+ u16 *card, u16 *dom,
+ u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns);
+ u32 xflags;
+ int rc;
+
+ xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ zcrypt_wait_api_operational();
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+ int api;
+
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_EP11;
+ *keybitsize = kb->head.bitlen;
+
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen), xflags);
+ if (rc)
+ goto out;
+
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
+ struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
+ int api;
+
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
+ if (rc)
+ goto out;
+ *keytype = PKEY_TYPE_EP11_AES;
+ *keybitsize = kh->bitlen;
+
+ api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4;
+ rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom,
+ ZCRYPT_CEX7, api,
+ ep11_kb_wkvp(key, keylen), xflags);
+ if (rc)
+ goto out;
+
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *card = ((struct pkey_apqn *)apqns)->card;
+ *dom = ((struct pkey_apqn *)apqns)->domain;
+
+ } else {
+ /* unknown/unsupported key blob */
+ rc = -EINVAL;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * This function provides an alternate but usually slow way
+ * to convert a 'clear key token' with AES key material into
+ * a protected key. That is done via an intermediate step
+ * which creates an EP11 AES secure key first and then derives
+ * the protected key from this secure key.
+ */
+static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns,
+ size_t nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype, u32 pflags)
+{
+ const struct keytoken_header *hdr = (const struct keytoken_header *)key;
+ const struct clearkeytoken *t = (const struct clearkeytoken *)key;
+ u8 tmpbuf[MAXEP11AESKEYBLOBSIZE]; /* 336 bytes */
+ u32 tmplen, keysize = 0;
+ int i, rc;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+
+ if (hdr->type == TOKTYPE_NON_CCA &&
+ hdr->version == TOKVER_CLEAR_KEY)
+ keysize = pkey_keytype_aes_to_size(t->keytype);
+ if (!keysize || t->len != keysize)
+ return -EINVAL;
+
+ /* try two times in case of failure */
+ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) {
+ tmplen = MAXEP11AESKEYBLOBSIZE;
+ rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11,
+ 8 * keysize, 0, t->clearkey, t->len,
+ tmpbuf, &tmplen, NULL, pflags);
+ pr_debug("ep11_clr2key()=%d\n", rc);
+ if (rc)
+ continue;
+ rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen,
+ protkey, protkeylen, protkeytype, pflags);
+ pr_debug("ep11_key2protkey()=%d\n", rc);
+ }
+
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler ep11_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY EP11 handler",
+ .is_supported_key = is_ep11_key,
+ .is_supported_keytype = is_ep11_keytype,
+ .key_to_protkey = ep11_key2protkey,
+ .slowpath_key_to_protkey = ep11_slowpath_key2protkey,
+ .gen_key = ep11_gen_key,
+ .clr_to_key = ep11_clr2key,
+ .verify_key = ep11_verifykey,
+ .apqns_for_key = ep11_apqns4key,
+ .apqns_for_keytype = ep11_apqns4type,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_ep11_init(void)
+{
+ /* register this module as pkey handler for all the ep11 stuff */
+ return pkey_handler_register(&ep11_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_ep11_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&ep11_handler);
+}
+
+module_init(pkey_ep11_init);
+module_exit(pkey_ep11_exit);
diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c
new file mode 100644
index 000000000000..793326c4c59a
--- /dev/null
+++ b/drivers/s390/crypto/pkey_pckmo.c
@@ -0,0 +1,473 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey pckmo specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define pr_fmt(fmt) "pkey: " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cpufeature.h>
+#include <asm/cpacf.h>
+#include <crypto/aes.h>
+#include <linux/random.h>
+
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key PCKMO handler");
+
+/*
+ * Check key blob for known and supported here.
+ */
+static bool is_pckmo_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ struct clearkeytoken *t = (struct clearkeytoken *)key;
+
+ if (keylen < sizeof(*hdr))
+ return false;
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+ switch (hdr->version) {
+ case TOKVER_CLEAR_KEY:
+ if (pkey_keytype_to_size(t->keytype))
+ return true;
+ return false;
+ case TOKVER_PROTECTED_KEY:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_pckmo_keytype(enum pkey_key_type keytype)
+{
+ switch (keytype) {
+ case PKEY_TYPE_PROTKEY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * Create a protected key from a clear key value via PCKMO instruction.
+ */
+static int pckmo_clr2protkey(u32 keytype, const u8 *clrkey, u32 clrkeylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ /* mask of available pckmo subfunctions */
+ static cpacf_mask_t pckmo_functions;
+
+ int keysize, rc = -EINVAL;
+ u8 paramblock[160];
+ u32 pkeytype = 0;
+ unsigned int fc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ fc = CPACF_PCKMO_ENC_AES_128_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_192:
+ fc = CPACF_PCKMO_ENC_AES_192_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_256:
+ fc = CPACF_PCKMO_ENC_AES_256_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P256:
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P256_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P384:
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P384_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_P521:
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_P521_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_ED25519:
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_ED25519_KEY;
+ break;
+ case PKEY_KEYTYPE_ECC_ED448:
+ pkeytype = PKEY_KEYTYPE_ECC;
+ fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_128:
+ fc = CPACF_PCKMO_ENC_AES_XTS_128_DOUBLE_KEY;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ fc = CPACF_PCKMO_ENC_AES_XTS_256_DOUBLE_KEY;
+ break;
+ case PKEY_KEYTYPE_HMAC_512:
+ fc = CPACF_PCKMO_ENC_HMAC_512_KEY;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ fc = CPACF_PCKMO_ENC_HMAC_1024_KEY;
+ break;
+ default:
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, keytype);
+ goto out;
+ }
+
+ keysize = pkey_keytype_to_size(keytype);
+ pkeytype = pkeytype ?: keytype;
+
+ if (clrkeylen && clrkeylen < keysize) {
+ PKEY_DBF_ERR("%s clear key size too small: %u < %d\n",
+ __func__, clrkeylen, keysize);
+ goto out;
+ }
+ if (*protkeylen < keysize + AES_WK_VP_SIZE) {
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
+ __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
+ goto out;
+ }
+
+ /* Did we already check for PCKMO ? */
+ if (!pckmo_functions.bytes[0]) {
+ /* no, so check now */
+ if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) {
+ PKEY_DBF_ERR("%s cpacf_query() failed\n", __func__);
+ rc = -ENODEV;
+ goto out;
+ }
+ }
+ /* check for the pckmo subfunction we need now */
+ if (!cpacf_test_func(&pckmo_functions, fc)) {
+ PKEY_DBF_ERR("%s pckmo fc 0x%02x not available\n",
+ __func__, fc);
+ rc = -ENODEV;
+ goto out;
+ }
+
+ /* prepare param block */
+ memset(paramblock, 0, sizeof(paramblock));
+ memcpy(paramblock, clrkey, keysize);
+
+ /* call the pckmo instruction */
+ cpacf_pckmo(fc, paramblock);
+
+ /* copy created protected key to key buffer including the wkvp block */
+ *protkeylen = keysize + AES_WK_VP_SIZE;
+ memcpy(protkey, paramblock, *protkeylen);
+ *protkeytype = pkeytype;
+
+ rc = 0;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Verify a raw protected key blob.
+ */
+static int pckmo_verify_protkey(const u8 *protkey, u32 protkeylen,
+ u32 protkeytype)
+{
+ u8 clrkey[16] = { 0 }, tmpkeybuf[16 + AES_WK_VP_SIZE];
+ u32 tmpkeybuflen, tmpkeytype;
+ int keysize, rc = -EINVAL;
+ u8 *wkvp;
+
+ /* check protkey type and size */
+ keysize = pkey_keytype_to_size(protkeytype);
+ if (!keysize) {
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
+ protkeytype);
+ goto out;
+ }
+ if (protkeylen < keysize + AES_WK_VP_SIZE)
+ goto out;
+
+ /* generate a dummy AES 128 protected key */
+ tmpkeybuflen = sizeof(tmpkeybuf);
+ rc = pckmo_clr2protkey(PKEY_KEYTYPE_AES_128,
+ clrkey, sizeof(clrkey),
+ tmpkeybuf, &tmpkeybuflen, &tmpkeytype);
+ if (rc)
+ goto out;
+ memzero_explicit(tmpkeybuf, 16);
+ wkvp = tmpkeybuf + 16;
+
+ /* compare WK VP from the temp key with that of the given prot key */
+ if (memcmp(wkvp, protkey + keysize, AES_WK_VP_SIZE)) {
+ PKEY_DBF_ERR("%s protected key WK VP mismatch\n", __func__);
+ rc = -EKEYREJECTED;
+ goto out;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int pckmo_key2protkey(const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ int rc = -EINVAL;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+ if (hdr->type != TOKTYPE_NON_CCA)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY: {
+ struct protkeytoken *t = (struct protkeytoken *)key;
+ u32 keysize;
+
+ if (keylen < sizeof(*t))
+ goto out;
+ keysize = pkey_keytype_to_size(t->keytype);
+ if (!keysize) {
+ PKEY_DBF_ERR("%s protected key token: unknown keytype %u\n",
+ __func__, t->keytype);
+ goto out;
+ }
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ if (t->len != keysize + AES_WK_VP_SIZE ||
+ keylen < sizeof(struct protaeskeytoken))
+ goto out;
+ rc = pckmo_verify_protkey(t->protkey, t->len,
+ t->keytype);
+ if (rc)
+ goto out;
+ break;
+ default:
+ if (t->len != keysize + AES_WK_VP_SIZE ||
+ keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE)
+ goto out;
+ break;
+ }
+ memcpy(protkey, t->protkey, t->len);
+ *protkeylen = t->len;
+ *protkeytype = t->keytype;
+ rc = 0;
+ break;
+ }
+ case TOKVER_CLEAR_KEY: {
+ struct clearkeytoken *t = (struct clearkeytoken *)key;
+ u32 keysize;
+
+ if (keylen < sizeof(*t) ||
+ keylen < sizeof(*t) + t->len)
+ goto out;
+ keysize = pkey_keytype_to_size(t->keytype);
+ if (!keysize) {
+ PKEY_DBF_ERR("%s clear key token: unknown keytype %u\n",
+ __func__, t->keytype);
+ goto out;
+ }
+ if (t->len != keysize) {
+ PKEY_DBF_ERR("%s clear key token: invalid key len %u\n",
+ __func__, t->len);
+ goto out;
+ }
+ rc = pckmo_clr2protkey(t->keytype, t->clearkey, t->len,
+ protkey, protkeylen, protkeytype);
+ break;
+ }
+ default:
+ PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
+ __func__, hdr->version);
+ break;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Generate a random protected key.
+ */
+static int pckmo_gen_protkey(u32 keytype, u32 subtype,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ u8 clrkey[128];
+ int keysize;
+ int rc;
+
+ keysize = pkey_keytype_to_size(keytype);
+ if (!keysize) {
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+ if (subtype != PKEY_TYPE_PROTKEY) {
+ PKEY_DBF_ERR("%s unknown/unsupported subtype %d\n",
+ __func__, subtype);
+ return -EINVAL;
+ }
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ case PKEY_KEYTYPE_AES_XTS_128:
+ case PKEY_KEYTYPE_AES_XTS_256:
+ case PKEY_KEYTYPE_HMAC_512:
+ case PKEY_KEYTYPE_HMAC_1024:
+ break;
+ default:
+ PKEY_DBF_ERR("%s unsupported keytype %d\n",
+ __func__, keytype);
+ return -EINVAL;
+ }
+
+ /* generate a dummy random clear key */
+ get_random_bytes(clrkey, keysize);
+
+ /* convert it to a dummy protected key */
+ rc = pckmo_clr2protkey(keytype, clrkey, keysize,
+ protkey, protkeylen, protkeytype);
+ if (rc)
+ goto out;
+
+ /* replace the key part of the protected key with random bytes */
+ get_random_bytes(protkey, keysize);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Verify a protected key token blob.
+ */
+static int pckmo_verify_key(const u8 *key, u32 keylen)
+{
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+ int rc = -EINVAL;
+
+ if (keylen < sizeof(*hdr))
+ return -EINVAL;
+ if (hdr->type != TOKTYPE_NON_CCA)
+ return -EINVAL;
+
+ switch (hdr->version) {
+ case TOKVER_PROTECTED_KEY: {
+ struct protkeytoken *t = (struct protkeytoken *)key;
+ u32 keysize;
+
+ if (keylen < sizeof(*t))
+ goto out;
+ keysize = pkey_keytype_to_size(t->keytype);
+ if (!keysize || t->len != keysize + AES_WK_VP_SIZE)
+ goto out;
+ switch (t->keytype) {
+ case PKEY_KEYTYPE_AES_128:
+ case PKEY_KEYTYPE_AES_192:
+ case PKEY_KEYTYPE_AES_256:
+ if (keylen < sizeof(struct protaeskeytoken))
+ goto out;
+ break;
+ default:
+ if (keylen < sizeof(*t) + keysize + AES_WK_VP_SIZE)
+ goto out;
+ break;
+ }
+ rc = pckmo_verify_protkey(t->protkey, t->len, t->keytype);
+ break;
+ }
+ default:
+ PKEY_DBF_ERR("%s unknown non-CCA token version %d\n",
+ __func__, hdr->version);
+ break;
+ }
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+/*
+ * Wrapper functions used for the pkey handler struct
+ */
+
+static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns,
+ size_t _nr_apqns,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *keyinfo,
+ u32 _xflags __always_unused)
+{
+ return pckmo_key2protkey(key, keylen,
+ protkey, protkeylen, keyinfo);
+}
+
+static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns,
+ u32 keytype, u32 keysubtype,
+ u32 _keybitsize, u32 _flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo,
+ u32 _xflags __always_unused)
+{
+ return pckmo_gen_protkey(keytype, keysubtype,
+ keybuf, keybuflen, keyinfo);
+}
+
+static int pkey_pckmo_verifykey(const u8 *key, u32 keylen,
+ u16 *_card, u16 *_dom,
+ u32 *_keytype, u32 *_keybitsize,
+ u32 *_flags, u32 _xflags __always_unused)
+{
+ return pckmo_verify_key(key, keylen);
+}
+
+static struct pkey_handler pckmo_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY PCKMO handler",
+ .is_supported_key = is_pckmo_key,
+ .is_supported_keytype = is_pckmo_keytype,
+ .key_to_protkey = pkey_pckmo_key2protkey,
+ .gen_key = pkey_pckmo_gen_key,
+ .verify_key = pkey_pckmo_verifykey,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_pckmo_init(void)
+{
+ cpacf_mask_t func_mask;
+
+ /*
+ * The pckmo instruction should be available - even if we don't
+ * actually invoke it. This instruction comes with MSA 3 which
+ * is also the minimum level for the kmc instructions which
+ * are able to work with protected keys.
+ */
+ if (!cpacf_query(CPACF_PCKMO, &func_mask))
+ return -ENODEV;
+
+ /* register this module as pkey handler for all the pckmo stuff */
+ return pkey_handler_register(&pckmo_handler);
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_pckmo_exit(void)
+{
+ /* unregister this module as pkey handler */
+ pkey_handler_unregister(&pckmo_handler);
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_MSA, pkey_pckmo_init);
+module_exit(pkey_pckmo_exit);
diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c
new file mode 100644
index 000000000000..b6b0a46cb8a8
--- /dev/null
+++ b/drivers/s390/crypto/pkey_sysfs.c
@@ -0,0 +1,646 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey module sysfs related functions
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define pr_fmt(fmt) "pkey: " fmt
+
+#include <linux/sysfs.h>
+
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
+
+#include "pkey_base.h"
+
+/*
+ * Wrapper around pkey_handler_gen_key() which deals with the
+ * ENODEV return code and then tries to enforce a pkey handler
+ * module load.
+ */
+static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype,
+ u32 keybitsize, u32 flags,
+ u8 *keybuf, u32 *keybuflen, u32 *keyinfo)
+{
+ int rc;
+
+ rc = pkey_handler_gen_key(NULL, 0,
+ keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo, 0);
+ if (rc == -ENODEV) {
+ pkey_handler_request_modules();
+ rc = pkey_handler_gen_key(NULL, 0,
+ keytype, keysubtype,
+ keybitsize, flags,
+ keybuf, keybuflen, keyinfo, 0);
+ }
+
+ return rc;
+}
+
+/*
+ * Sysfs attribute read function for all protected key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct protaeskeytoken protkeytoken;
+ struct pkey_protkey protkey;
+ int rc;
+
+ if (off != 0 || count < sizeof(protkeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(protkeytoken))
+ return -EINVAL;
+
+ memset(&protkeytoken, 0, sizeof(protkeytoken));
+ protkeytoken.type = TOKTYPE_NON_CCA;
+ protkeytoken.version = TOKVER_PROTECTED_KEY;
+ protkeytoken.keytype = keytype;
+
+ protkey.len = sizeof(protkey.protkey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ protkey.protkey, &protkey.len,
+ &protkey.type);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf, &protkeytoken, sizeof(protkeytoken));
+
+ if (is_xts) {
+ /* xts needs a second protected key, reuse protkey struct */
+ protkey.len = sizeof(protkey.protkey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ protkey.protkey, &protkey.len,
+ &protkey.type);
+ if (rc)
+ return rc;
+
+ protkeytoken.len = protkey.len;
+ memcpy(&protkeytoken.protkey, &protkey.protkey, protkey.len);
+
+ memcpy(buf + sizeof(protkeytoken), &protkeytoken,
+ sizeof(protkeytoken));
+
+ return 2 * sizeof(protkeytoken);
+ }
+
+ return sizeof(protkeytoken);
+}
+
+/*
+ * Sysfs attribute read function for the AES XTS prot key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_aes_xts_attr_read(u32 keytype, char *buf,
+ loff_t off, size_t count)
+{
+ struct protkeytoken *t = (struct protkeytoken *)buf;
+ u32 protlen, prottype;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_AES_XTS_128:
+ protlen = 64;
+ break;
+ case PKEY_KEYTYPE_AES_XTS_256:
+ protlen = 96;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off != 0 || count < sizeof(*t) + protlen)
+ return -EINVAL;
+
+ memset(t, 0, sizeof(*t) + protlen);
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ t->protkey, &protlen, &prottype);
+ if (rc)
+ return rc;
+
+ t->len = protlen;
+
+ return sizeof(*t) + protlen;
+}
+
+/*
+ * Sysfs attribute read function for the HMAC prot key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_protkey_hmac_attr_read(u32 keytype, char *buf,
+ loff_t off, size_t count)
+{
+ struct protkeytoken *t = (struct protkeytoken *)buf;
+ u32 protlen, prottype;
+ int rc;
+
+ switch (keytype) {
+ case PKEY_KEYTYPE_HMAC_512:
+ protlen = 96;
+ break;
+ case PKEY_KEYTYPE_HMAC_1024:
+ protlen = 160;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off != 0 || count < sizeof(*t) + protlen)
+ return -EINVAL;
+
+ memset(t, 0, sizeof(*t) + protlen);
+ t->type = TOKTYPE_NON_CCA;
+ t->version = TOKVER_PROTECTED_KEY;
+ t->keytype = keytype;
+
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_PROTKEY, 0, 0,
+ t->protkey, &protlen, &prottype);
+ if (rc)
+ return rc;
+
+ t->len = protlen;
+
+ return sizeof(*t) + protlen;
+}
+
+static ssize_t protkey_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static ssize_t protkey_aes_xts_128_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_128,
+ buf, off, count);
+}
+
+static ssize_t protkey_aes_xts_256_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_aes_xts_attr_read(PKEY_KEYTYPE_AES_XTS_256,
+ buf, off, count);
+}
+
+static ssize_t protkey_hmac_512_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_512,
+ buf, off, count);
+}
+
+static ssize_t protkey_hmac_1024_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_protkey_hmac_attr_read(PKEY_KEYTYPE_HMAC_1024,
+ buf, off, count);
+}
+
+static const BIN_ATTR_RO(protkey_aes_128, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_192, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256, sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_128_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_256_xts, 2 * sizeof(struct protaeskeytoken));
+static const BIN_ATTR_RO(protkey_aes_xts_128, sizeof(struct protkeytoken) + 64);
+static const BIN_ATTR_RO(protkey_aes_xts_256, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_512, sizeof(struct protkeytoken) + 96);
+static const BIN_ATTR_RO(protkey_hmac_1024, sizeof(struct protkeytoken) + 160);
+
+static const struct bin_attribute *const protkey_attrs[] = {
+ &bin_attr_protkey_aes_128,
+ &bin_attr_protkey_aes_192,
+ &bin_attr_protkey_aes_256,
+ &bin_attr_protkey_aes_128_xts,
+ &bin_attr_protkey_aes_256_xts,
+ &bin_attr_protkey_aes_xts_128,
+ &bin_attr_protkey_aes_xts_256,
+ &bin_attr_protkey_hmac_512,
+ &bin_attr_protkey_hmac_1024,
+ NULL
+};
+
+static const struct attribute_group protkey_attr_group = {
+ .name = "protkey",
+ .bin_attrs = protkey_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all secure key ccadata binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * protected key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccadata_aes_attr_read(u32 keytype, bool is_xts, char *buf,
+ loff_t off, size_t count)
+{
+ struct pkey_seckey *seckey = (struct pkey_seckey *)buf;
+ u32 buflen;
+ int rc;
+
+ if (off != 0 || count < sizeof(struct secaeskeytoken))
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * sizeof(struct secaeskeytoken))
+ return -EINVAL;
+
+ buflen = sizeof(seckey->seckey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ seckey->seckey, &buflen, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ seckey++;
+ buflen = sizeof(seckey->seckey);
+ rc = sys_pkey_handler_gen_key(keytype, PKEY_TYPE_CCA_DATA, 0, 0,
+ seckey->seckey, &buflen, NULL);
+ if (rc)
+ return rc;
+
+ return 2 * sizeof(struct secaeskeytoken);
+ }
+
+ return sizeof(struct secaeskeytoken);
+}
+
+static ssize_t ccadata_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccadata_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccadata_aes_attr_read(PKEY_KEYTYPE_AES_256, true, buf,
+ off, count);
+}
+
+static const BIN_ATTR_RO(ccadata_aes_128, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_192, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256, sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_128_xts, 2 * sizeof(struct secaeskeytoken));
+static const BIN_ATTR_RO(ccadata_aes_256_xts, 2 * sizeof(struct secaeskeytoken));
+
+static const struct bin_attribute *const ccadata_attrs[] = {
+ &bin_attr_ccadata_aes_128,
+ &bin_attr_ccadata_aes_192,
+ &bin_attr_ccadata_aes_256,
+ &bin_attr_ccadata_aes_128_xts,
+ &bin_attr_ccadata_aes_256_xts,
+ NULL
+};
+
+static const struct attribute_group ccadata_attr_group = {
+ .name = "ccadata",
+ .bin_attrs = ccadata_attrs,
+};
+
+#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
+
+/*
+ * Sysfs attribute read function for all secure key ccacipher binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ */
+static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ u32 keysize = CCACIPHERTOKENSIZE;
+ int rc;
+
+ if (off != 0 || count < CCACIPHERTOKENSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * CCACIPHERTOKENSIZE)
+ return -EINVAL;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_CCA_CIPHER, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ buf += CCACIPHERTOKENSIZE;
+ rc = sys_pkey_handler_gen_key(
+ pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_CCA_CIPHER, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+ return 2 * CCACIPHERTOKENSIZE;
+ }
+
+ return CCACIPHERTOKENSIZE;
+}
+
+static ssize_t ccacipher_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static const BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
+static const BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
+
+static const struct bin_attribute *const ccacipher_attrs[] = {
+ &bin_attr_ccacipher_aes_128,
+ &bin_attr_ccacipher_aes_192,
+ &bin_attr_ccacipher_aes_256,
+ &bin_attr_ccacipher_aes_128_xts,
+ &bin_attr_ccacipher_aes_256_xts,
+ NULL
+};
+
+static const struct attribute_group ccacipher_attr_group = {
+ .name = "ccacipher",
+ .bin_attrs = ccacipher_attrs,
+};
+
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 336 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ u32 keysize = MAXEP11AESKEYBLOBSIZE;
+ int rc;
+
+ if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ rc = sys_pkey_handler_gen_key(pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_EP11_AES, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = MAXEP11AESKEYBLOBSIZE;
+ buf += MAXEP11AESKEYBLOBSIZE;
+ rc = sys_pkey_handler_gen_key(
+ pkey_aes_bitsize_to_keytype(keybits),
+ PKEY_TYPE_EP11_AES, keybits, 0,
+ buf, &keysize, NULL);
+ if (rc)
+ return rc;
+ return 2 * MAXEP11AESKEYBLOBSIZE;
+ }
+
+ return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ const struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static const BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static const BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static const struct bin_attribute *const ep11_attrs[] = {
+ &bin_attr_ep11_aes_128,
+ &bin_attr_ep11_aes_192,
+ &bin_attr_ep11_aes_256,
+ &bin_attr_ep11_aes_128_xts,
+ &bin_attr_ep11_aes_256_xts,
+ NULL
+};
+
+static const struct attribute_group ep11_attr_group = {
+ .name = "ep11",
+ .bin_attrs = ep11_attrs,
+};
+
+const struct attribute_group *pkey_attr_groups[] = {
+ &protkey_attr_group,
+ &ccadata_attr_group,
+ &ccacipher_attr_group,
+ &ep11_attr_group,
+ NULL,
+};
diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c
new file mode 100644
index 000000000000..6cd3c49384b5
--- /dev/null
+++ b/drivers/s390/crypto/pkey_uv.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * pkey uv specific code
+ *
+ * Copyright IBM Corp. 2024
+ */
+
+#define pr_fmt(fmt) "pkey: " fmt
+
+#include <linux/cpufeature.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <asm/uv.h>
+
+#include "zcrypt_ccamisc.h"
+#include "pkey_base.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("s390 protected key UV handler");
+
+/*
+ * One pre-allocated uv_secret_list for use with uv_find_secret()
+ */
+static struct uv_secret_list *uv_list;
+static DEFINE_MUTEX(uv_list_mutex);
+
+/*
+ * UV secret token struct and defines.
+ */
+
+#define TOKVER_UV_SECRET 0x09
+
+struct uvsecrettoken {
+ u8 type; /* 0x00 = TOKTYPE_NON_CCA */
+ u8 res0[3];
+ u8 version; /* 0x09 = TOKVER_UV_SECRET */
+ u8 res1[3];
+ u16 secret_type; /* one of enum uv_secret_types from uv.h */
+ u16 secret_len; /* length in bytes of the secret */
+ u8 secret_id[UV_SECRET_ID_LEN]; /* the secret id for this secret */
+} __packed;
+
+/*
+ * Check key blob for known and supported UV key.
+ */
+static bool is_uv_key(const u8 *key, u32 keylen)
+{
+ struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+
+ if (keylen < sizeof(*t))
+ return false;
+
+ switch (t->type) {
+ case TOKTYPE_NON_CCA:
+ switch (t->version) {
+ case TOKVER_UV_SECRET:
+ switch (t->secret_type) {
+ case UV_SECRET_AES_128:
+ case UV_SECRET_AES_192:
+ case UV_SECRET_AES_256:
+ case UV_SECRET_AES_XTS_128:
+ case UV_SECRET_AES_XTS_256:
+ case UV_SECRET_HMAC_SHA_256:
+ case UV_SECRET_HMAC_SHA_512:
+ case UV_SECRET_ECDSA_P256:
+ case UV_SECRET_ECDSA_P384:
+ case UV_SECRET_ECDSA_P521:
+ case UV_SECRET_ECDSA_ED25519:
+ case UV_SECRET_ECDSA_ED448:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static bool is_uv_keytype(enum pkey_key_type keytype)
+{
+ switch (keytype) {
+ case PKEY_TYPE_UVSECRET:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN],
+ struct uv_secret_list_item_hdr *secret)
+{
+ int rc;
+
+ mutex_lock(&uv_list_mutex);
+ memset(uv_list, 0, sizeof(*uv_list));
+ rc = uv_find_secret(secret_id, uv_list, secret);
+ mutex_unlock(&uv_list_mutex);
+
+ return rc;
+}
+
+static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN],
+ u16 *secret_type, u8 *buf, u32 *buflen)
+{
+ struct uv_secret_list_item_hdr secret_meta_data;
+ int rc;
+
+ rc = get_secret_metadata(secret_id, &secret_meta_data);
+ if (rc)
+ return rc;
+
+ if (*buflen < secret_meta_data.length)
+ return -EINVAL;
+
+ rc = uv_retrieve_secret(secret_meta_data.index,
+ buf, secret_meta_data.length);
+ if (rc)
+ return rc;
+
+ *secret_type = secret_meta_data.type;
+ *buflen = secret_meta_data.length;
+
+ return 0;
+}
+
+static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype)
+{
+ int rc = 0;
+
+ switch (secret_type) {
+ case UV_SECRET_AES_128:
+ *pkeysize = 16 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case UV_SECRET_AES_192:
+ *pkeysize = 24 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case UV_SECRET_AES_256:
+ *pkeysize = 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ case UV_SECRET_AES_XTS_128:
+ *pkeysize = 16 + 16 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_XTS_128;
+ break;
+ case UV_SECRET_AES_XTS_256:
+ *pkeysize = 32 + 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_AES_XTS_256;
+ break;
+ case UV_SECRET_HMAC_SHA_256:
+ *pkeysize = 64 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_HMAC_512;
+ break;
+ case UV_SECRET_HMAC_SHA_512:
+ *pkeysize = 128 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_HMAC_1024;
+ break;
+ case UV_SECRET_ECDSA_P256:
+ *pkeysize = 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_P256;
+ break;
+ case UV_SECRET_ECDSA_P384:
+ *pkeysize = 48 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_P384;
+ break;
+ case UV_SECRET_ECDSA_P521:
+ *pkeysize = 80 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_P521;
+ break;
+ case UV_SECRET_ECDSA_ED25519:
+ *pkeysize = 32 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_ED25519;
+ break;
+ case UV_SECRET_ECDSA_ED448:
+ *pkeysize = 64 + AES_WK_VP_SIZE;
+ *pkeytype = PKEY_KEYTYPE_ECC_ED448;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused,
+ size_t _nr_apqns __always_unused,
+ const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *keyinfo,
+ u32 _xflags __always_unused)
+{
+ struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+ u32 pkeysize, pkeytype;
+ u16 secret_type;
+ int rc;
+
+ rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype);
+ if (rc)
+ goto out;
+
+ if (*protkeylen < pkeysize) {
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %u\n",
+ __func__, *protkeylen, pkeysize);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = retrieve_secret(t->secret_id, &secret_type, protkey, protkeylen);
+ if (rc) {
+ PKEY_DBF_ERR("%s retrieve_secret() failed with %d\n",
+ __func__, rc);
+ goto out;
+ }
+ if (secret_type != t->secret_type) {
+ PKEY_DBF_ERR("%s retrieved secret type %u != expected type %u\n",
+ __func__, secret_type, t->secret_type);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (keyinfo)
+ *keyinfo = pkeytype;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static int uv_verifykey(const u8 *key, u32 keylen,
+ u16 *_card __always_unused,
+ u16 *_dom __always_unused,
+ u32 *keytype, u32 *keybitsize, u32 *flags,
+ u32 xflags __always_unused)
+{
+ struct uvsecrettoken *t = (struct uvsecrettoken *)key;
+ struct uv_secret_list_item_hdr secret_meta_data;
+ u32 pkeysize, pkeytype, bitsize;
+ int rc;
+
+ rc = uv_get_size_and_type(t->secret_type, &pkeysize, &pkeytype);
+ if (rc)
+ goto out;
+
+ rc = get_secret_metadata(t->secret_id, &secret_meta_data);
+ if (rc)
+ goto out;
+
+ if (secret_meta_data.type != t->secret_type) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* set keytype; keybitsize and flags are not supported */
+ if (keytype)
+ *keytype = PKEY_TYPE_UVSECRET;
+ if (keybitsize) {
+ bitsize = 8 * pkey_keytype_to_size(pkeytype);
+ *keybitsize = bitsize ?: PKEY_SIZE_UNKNOWN;
+ }
+ if (flags)
+ *flags = pkeytype;
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
+}
+
+static struct pkey_handler uv_handler = {
+ .module = THIS_MODULE,
+ .name = "PKEY UV handler",
+ .is_supported_key = is_uv_key,
+ .is_supported_keytype = is_uv_keytype,
+ .key_to_protkey = uv_key2protkey,
+ .verify_key = uv_verifykey,
+};
+
+/*
+ * Module init
+ */
+static int __init pkey_uv_init(void)
+{
+ int rc;
+
+ if (!is_prot_virt_guest())
+ return -ENODEV;
+
+ if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list))
+ return -ENODEV;
+
+ uv_list = kmalloc(sizeof(*uv_list), GFP_KERNEL);
+ if (!uv_list)
+ return -ENOMEM;
+
+ rc = pkey_handler_register(&uv_handler);
+ if (rc)
+ kfree(uv_list);
+
+ return rc;
+}
+
+/*
+ * Module exit
+ */
+static void __exit pkey_uv_exit(void)
+{
+ pkey_handler_unregister(&uv_handler);
+ mutex_lock(&uv_list_mutex);
+ kvfree(uv_list);
+ mutex_unlock(&uv_list_mutex);
+}
+
+module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init);
+module_exit(pkey_uv_exit);
diff --git a/drivers/s390/crypto/vfio_ap_debug.h b/drivers/s390/crypto/vfio_ap_debug.h
new file mode 100644
index 000000000000..180156121421
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_debug.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2022
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ */
+#ifndef VFIO_AP_DEBUG_H
+#define VFIO_AP_DEBUG_H
+
+#include <asm/debug.h>
+
+#define DBF_ERR 3 /* error conditions */
+#define DBF_WARN 4 /* warning conditions */
+#define DBF_INFO 5 /* informational */
+#define DBF_DEBUG 6 /* for debugging only */
+
+#define DBF_MAX_SPRINTF_ARGS 10
+
+#define VFIO_AP_DBF(...) \
+ debug_sprintf_event(vfio_ap_dbf_info, ##__VA_ARGS__)
+#define VFIO_AP_DBF_ERR(...) \
+ debug_sprintf_event(vfio_ap_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define VFIO_AP_DBF_WARN(...) \
+ debug_sprintf_event(vfio_ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define VFIO_AP_DBF_INFO(...) \
+ debug_sprintf_event(vfio_ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
+#define VFIO_AP_DBF_DBG(...) \
+ debug_sprintf_event(vfio_ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
+
+extern debug_info_t *vfio_ap_dbf_info;
+
+#endif /* VFIO_AP_DEBUG_H */
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index 31c6c847eaca..67a807e2e75b 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -5,6 +5,7 @@
* Copyright IBM Corp. 2018
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*/
#include <linux/module.h>
@@ -13,22 +14,29 @@
#include <linux/string.h>
#include <asm/facility.h>
#include "vfio_ap_private.h"
+#include "vfio_ap_debug.h"
#define VFIO_AP_ROOT_NAME "vfio_ap"
-#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
#define VFIO_AP_DEV_NAME "matrix"
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
MODULE_LICENSE("GPL v2");
-static struct ap_driver vfio_ap_drv;
+struct ap_matrix_dev *matrix_dev;
+debug_info_t *vfio_ap_dbf_info;
-static struct device_type vfio_ap_dev_type = {
- .name = VFIO_AP_DEV_TYPE_NAME,
-};
+static ssize_t features_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "guest_matrix hotplug ap_config\n");
+}
+static DEVICE_ATTR_RO(features);
-struct ap_matrix_dev *matrix_dev;
+static struct attribute *matrix_dev_attrs[] = {
+ &dev_attr_features.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(matrix_dev);
/* Only type 10 adapters (CEX4 and later) are supported
* by the AP matrix device driver
@@ -40,28 +48,41 @@ static struct ap_device_id ap_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX8,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of sibling */ },
};
-MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
-
-static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
-{
- return 0;
-}
-
-static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
-{
- /* Nothing to do yet */
-}
+static struct ap_driver vfio_ap_drv = {
+ .probe = vfio_ap_mdev_probe_queue,
+ .remove = vfio_ap_mdev_remove_queue,
+ .in_use = vfio_ap_mdev_resource_in_use,
+ .on_config_changed = vfio_ap_on_cfg_changed,
+ .on_scan_complete = vfio_ap_on_scan_complete,
+ .ids = ap_queue_ids,
+};
static void vfio_ap_matrix_dev_release(struct device *dev)
{
- struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
+ struct ap_matrix_dev *matrix_dev;
+ matrix_dev = container_of(dev, struct ap_matrix_dev, device);
kfree(matrix_dev);
}
+static const struct bus_type matrix_bus = {
+ .name = "matrix",
+};
+
+static struct device_driver matrix_driver = {
+ .name = "vfio_ap",
+ .bus = &matrix_bus,
+ .suppress_bind_attrs = true,
+ .dev_groups = matrix_dev_groups,
+};
+
static int vfio_ap_matrix_dev_create(void)
{
int ret;
@@ -71,6 +92,10 @@ static int vfio_ap_matrix_dev_create(void)
if (IS_ERR(root_device))
return PTR_ERR(root_device);
+ ret = bus_register(&matrix_bus);
+ if (ret)
+ goto bus_register_err;
+
matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
if (!matrix_dev) {
ret = -ENOMEM;
@@ -84,39 +109,69 @@ static int vfio_ap_matrix_dev_create(void)
goto matrix_alloc_err;
}
- mutex_init(&matrix_dev->lock);
+ mutex_init(&matrix_dev->mdevs_lock);
INIT_LIST_HEAD(&matrix_dev->mdev_list);
+ mutex_init(&matrix_dev->guests_lock);
- matrix_dev->device.type = &vfio_ap_dev_type;
dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
matrix_dev->device.parent = root_device;
+ matrix_dev->device.bus = &matrix_bus;
matrix_dev->device.release = vfio_ap_matrix_dev_release;
- matrix_dev->device.driver = &vfio_ap_drv.driver;
+ matrix_dev->vfio_ap_drv = &vfio_ap_drv;
ret = device_register(&matrix_dev->device);
if (ret)
goto matrix_reg_err;
+ ret = driver_register(&matrix_driver);
+ if (ret)
+ goto matrix_drv_err;
+
return 0;
+matrix_drv_err:
+ device_del(&matrix_dev->device);
matrix_reg_err:
put_device(&matrix_dev->device);
matrix_alloc_err:
+ bus_unregister(&matrix_bus);
+bus_register_err:
root_device_unregister(root_device);
-
return ret;
}
static void vfio_ap_matrix_dev_destroy(void)
{
+ struct device *root_device = matrix_dev->device.parent;
+
+ driver_unregister(&matrix_driver);
device_unregister(&matrix_dev->device);
- root_device_unregister(matrix_dev->device.parent);
+ bus_unregister(&matrix_bus);
+ root_device_unregister(root_device);
+}
+
+static int __init vfio_ap_dbf_info_init(void)
+{
+ vfio_ap_dbf_info = debug_register("vfio_ap", 1, 1,
+ DBF_MAX_SPRINTF_ARGS * sizeof(long));
+
+ if (!vfio_ap_dbf_info)
+ return -ENOENT;
+
+ debug_register_view(vfio_ap_dbf_info, &debug_sprintf_view);
+ debug_set_level(vfio_ap_dbf_info, DBF_WARN);
+
+ return 0;
}
static int __init vfio_ap_init(void)
{
int ret;
+ ret = vfio_ap_dbf_info_init();
+ if (ret)
+ return ret;
+
/* If there are no AP instructions, there is nothing to pass through. */
if (!ap_instructions_available())
return -ENODEV;
@@ -125,11 +180,6 @@ static int __init vfio_ap_init(void)
if (ret)
return ret;
- memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv));
- vfio_ap_drv.probe = vfio_ap_queue_dev_probe;
- vfio_ap_drv.remove = vfio_ap_queue_dev_remove;
- vfio_ap_drv.ids = ap_queue_ids;
-
ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
if (ret) {
vfio_ap_matrix_dev_destroy();
@@ -152,6 +202,7 @@ static void __exit vfio_ap_exit(void)
vfio_ap_mdev_unregister();
ap_driver_unregister(&vfio_ap_drv);
vfio_ap_matrix_dev_destroy();
+ debug_unregister(vfio_ap_dbf_info);
}
module_init(vfio_ap_init);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 272ef427dcc0..48da32ad0493 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -16,238 +16,923 @@
#include <linux/bitops.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
+#include <linux/uuid.h>
#include <asm/kvm.h>
#include <asm/zcrypt.h>
#include "vfio_ap_private.h"
+#include "vfio_ap_debug.h"
#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
-static void vfio_ap_matrix_init(struct ap_config_info *info,
- struct ap_matrix *matrix)
+#define AP_QUEUE_ASSIGNED "assigned"
+#define AP_QUEUE_UNASSIGNED "unassigned"
+#define AP_QUEUE_IN_USE "in use"
+
+#define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
+
+static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev);
+static int vfio_ap_mdev_reset_qlist(struct list_head *qlist);
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
+static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
+static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
+
+/**
+ * get_update_locks_for_kvm: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be taken.
+ */
+static inline void get_update_locks_for_kvm(struct kvm *kvm)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (kvm)
+ mutex_lock(&kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_kvm: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @kvm: a pointer to a struct kvm object containing the KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @kvm is NULL, the KVM lock will not be released.
+ */
+static inline void release_update_locks_for_kvm(struct kvm *kvm)
{
- matrix->apm_max = info->apxa ? info->Na : 63;
- matrix->aqm_max = info->apxa ? info->Nd : 15;
- matrix->adm_max = info->apxa ? info->Nd : 15;
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (kvm)
+ mutex_unlock(&kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
}
-static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+/**
+ * get_update_locks_for_mdev: Acquire the locks required to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be taken.
+ */
+static inline void get_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_lock(&matrix_dev->guests_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
+
+/**
+ * release_update_locks_for_mdev: Release the locks used to dynamically update a
+ * KVM guest's APCB in the proper order.
+ *
+ * @matrix_mdev: a pointer to a struct ap_matrix_mdev object containing the AP
+ * configuration data to use to update a KVM guest's APCB.
+ *
+ * The proper unlocking order is:
+ * 1. matrix_dev->mdevs_lock
+ * 2. matrix_mdev->kvm->lock
+ * 3. matrix_dev->guests_lock
+ *
+ * Note: If @matrix_mdev is NULL or is not attached to a KVM guest, the KVM
+ * lock will not be released.
+ */
+static inline void release_update_locks_for_mdev(struct ap_matrix_mdev *matrix_mdev)
+{
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ if (matrix_mdev && matrix_mdev->kvm)
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+/**
+ * get_update_locks_by_apqn: Find the mdev to which an APQN is assigned and
+ * acquire the locks required to update the APCB of
+ * the KVM guest to which the mdev is attached.
+ *
+ * @apqn: the APQN of a queue device.
+ *
+ * The proper locking order is:
+ * 1. matrix_dev->guests_lock: required to use the KVM pointer to update a KVM
+ * guest's APCB.
+ * 2. matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in a matrix_mdev
+ *
+ * Note: If @apqn is not assigned to a matrix_mdev, the matrix_mdev->kvm->lock
+ * will not be taken.
+ *
+ * Return: the ap_matrix_mdev object to which @apqn is assigned or NULL if @apqn
+ * is not assigned to an ap_matrix_mdev.
+ */
+static struct ap_matrix_mdev *get_update_locks_by_apqn(int apqn)
{
struct ap_matrix_mdev *matrix_mdev;
- if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
- return -EPERM;
+ mutex_lock(&matrix_dev->guests_lock);
- matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
- if (!matrix_mdev) {
- atomic_inc(&matrix_dev->available_instances);
- return -ENOMEM;
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm) &&
+ test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) {
+ if (matrix_mdev->kvm)
+ mutex_lock(&matrix_mdev->kvm->lock);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ return matrix_mdev;
+ }
}
- vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
- mdev_set_drvdata(mdev, matrix_mdev);
- mutex_lock(&matrix_dev->lock);
- list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
- mutex_unlock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
- return 0;
+ return NULL;
}
-static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+/**
+ * get_update_locks_for_queue: get the locks required to update the APCB of the
+ * KVM guest to which the matrix mdev linked to a
+ * vfio_ap_queue object is attached.
+ *
+ * @q: a pointer to a vfio_ap_queue object.
+ *
+ * The proper locking order is:
+ * 1. q->matrix_dev->guests_lock: required to use the KVM pointer to update a
+ * KVM guest's APCB.
+ * 2. q->matrix_mdev->kvm->lock: required to update a guest's APCB
+ * 3. matrix_dev->mdevs_lock: required to access data stored in matrix_mdev
+ *
+ * Note: if @queue is not linked to an ap_matrix_mdev object, the KVM lock
+ * will not be taken.
+ */
+static inline void get_update_locks_for_queue(struct vfio_ap_queue *q)
{
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
-
- if (matrix_mdev->kvm)
- return -EBUSY;
+ mutex_lock(&matrix_dev->guests_lock);
+ if (q->matrix_mdev && q->matrix_mdev->kvm)
+ mutex_lock(&q->matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+}
- mutex_lock(&matrix_dev->lock);
- list_del(&matrix_mdev->node);
- mutex_unlock(&matrix_dev->lock);
+/**
+ * vfio_ap_mdev_get_queue - retrieve a queue with a specific APQN from a
+ * hash table of queues assigned to a matrix mdev
+ * @matrix_mdev: the matrix mdev
+ * @apqn: The APQN of a queue device
+ *
+ * Return: the pointer to the vfio_ap_queue struct representing the queue or
+ * NULL if the queue is not assigned to @matrix_mdev
+ */
+static struct vfio_ap_queue *vfio_ap_mdev_get_queue(
+ struct ap_matrix_mdev *matrix_mdev,
+ int apqn)
+{
+ struct vfio_ap_queue *q;
- kfree(matrix_mdev);
- mdev_set_drvdata(mdev, NULL);
- atomic_inc(&matrix_dev->available_instances);
+ hash_for_each_possible(matrix_mdev->qtable.queues, q, mdev_qnode,
+ apqn) {
+ if (q && q->apqn == apqn)
+ return q;
+ }
- return 0;
+ return NULL;
}
-static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
+/**
+ * vfio_ap_wait_for_irqclear - clears the IR bit or gives up after 5 tries
+ * @apqn: The AP Queue number
+ *
+ * Checks the IRQ bit for the status of this APQN using ap_tapq.
+ * Returns if the ap_tapq function succeeded and the bit is clear.
+ * Returns if ap_tapq function failed with invalid, deconfigured or
+ * checkstopped AP.
+ * Otherwise retries up to 5 times after waiting 20ms.
+ */
+static void vfio_ap_wait_for_irqclear(int apqn)
{
- return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
-}
+ struct ap_queue_status status;
+ int retry = 5;
-static MDEV_TYPE_ATTR_RO(name);
+ do {
+ status = ap_tapq(apqn, NULL);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (!status.irq_enabled)
+ return;
+ fallthrough;
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ default:
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x\n", __func__,
+ status.response_code, apqn);
+ return;
+ }
+ } while (--retry);
-static ssize_t available_instances_show(struct kobject *kobj,
- struct device *dev, char *buf)
-{
- return sprintf(buf, "%d\n",
- atomic_read(&matrix_dev->available_instances));
+ WARN_ONCE(1, "%s: tapq rc %02x: %04x could not clear IR bit\n",
+ __func__, status.response_code, apqn);
}
-static MDEV_TYPE_ATTR_RO(available_instances);
-
-static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
- char *buf)
+/**
+ * vfio_ap_free_aqic_resources - free vfio_ap_queue resources
+ * @q: The vfio_ap_queue
+ *
+ * Unregisters the ISC in the GIB when the saved ISC not invalid.
+ * Unpins the guest's page holding the NIB when it exists.
+ * Resets the saved_iova and saved_isc to invalid values.
+ */
+static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q)
{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
+ if (!q)
+ return;
+ if (q->saved_isc != VFIO_AP_ISC_INVALID &&
+ !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) {
+ kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc);
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ }
+ if (q->saved_iova && !WARN_ON(!q->matrix_mdev)) {
+ vfio_unpin_pages(&q->matrix_mdev->vdev, q->saved_iova, 1);
+ q->saved_iova = 0;
+ }
}
-static MDEV_TYPE_ATTR_RO(device_api);
+/**
+ * vfio_ap_irq_disable - disables and clears an ap_queue interrupt
+ * @q: The vfio_ap_queue
+ *
+ * Uses ap_aqic to disable the interruption and in case of success, reset
+ * in progress or IRQ disable command already proceeded: calls
+ * vfio_ap_wait_for_irqclear() to check for the IRQ bit to be clear
+ * and calls vfio_ap_free_aqic_resources() to free the resources associated
+ * with the AP interrupt handling.
+ *
+ * In the case the AP is busy, or a reset is in progress,
+ * retries after 20ms, up to 5 times.
+ *
+ * Returns if ap_aqic function failed with invalid, deconfigured or
+ * checkstopped AP.
+ *
+ * Return: &struct ap_queue_status
+ */
+static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q)
+{
+ union ap_qirq_ctrl aqic_gisa = { .value = 0 };
+ struct ap_queue_status status;
+ int retries = 5;
-static struct attribute *vfio_ap_mdev_type_attrs[] = {
- &mdev_type_attr_name.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_available_instances.attr,
- NULL,
-};
+ do {
+ status = ap_aqic(q->apqn, aqic_gisa, 0);
+ switch (status.response_code) {
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ case AP_RESPONSE_NORMAL:
+ vfio_ap_wait_for_irqclear(q->apqn);
+ goto end_free;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ case AP_RESPONSE_Q_NOT_AVAIL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_INVALID_ADDRESS:
+ default:
+ /* All cases in default means AP not operational */
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+ goto end_free;
+ }
+ } while (retries--);
-static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
- .name = VFIO_AP_MDEV_TYPE_HWVIRT,
- .attrs = vfio_ap_mdev_type_attrs,
-};
+ WARN_ONCE(1, "%s: ap_aqic status %d\n", __func__,
+ status.response_code);
+end_free:
+ vfio_ap_free_aqic_resources(q);
+ return status;
+}
-static struct attribute_group *vfio_ap_mdev_type_groups[] = {
- &vfio_ap_mdev_hwvirt_type_group,
- NULL,
-};
+/**
+ * vfio_ap_validate_nib - validate a notification indicator byte (nib) address.
+ *
+ * @vcpu: the object representing the vcpu executing the PQAP(AQIC) instruction.
+ * @nib: the location for storing the nib address.
+ *
+ * When the PQAP(AQIC) instruction is executed, general register 2 contains the
+ * address of the notification indicator byte (nib) used for IRQ notification.
+ * This function parses and validates the nib from gr2.
+ *
+ * Return: returns zero if the nib address is a valid; otherwise, returns
+ * -EINVAL.
+ */
+static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
+{
+ *nib = vcpu->run->s.regs.gprs[2];
-struct vfio_ap_queue_reserved {
- unsigned long *apid;
- unsigned long *apqi;
- bool reserved;
-};
+ if (!*nib)
+ return -EINVAL;
+ if (!kvm_s390_is_gpa_in_memslot(vcpu->kvm, *nib))
+ return -EINVAL;
+
+ return 0;
+}
/**
- * vfio_ap_has_queue
+ * ensure_nib_shared() - Ensure the address of the NIB is secure and shared
+ * @addr: the physical (absolute) address of the NIB
+ *
+ * This function checks whether the NIB page, which has been pinned with
+ * vfio_pin_pages(), is a shared page belonging to a secure guest.
+ *
+ * It will call uv_pin_shared() on it; if the page was already pinned shared
+ * (i.e. if the NIB belongs to a secure guest and is shared), then 0
+ * (success) is returned. If the NIB was not shared, vfio_pin_pages() had
+ * exported it and now it does not belong to the secure guest anymore. In
+ * that case, an error is returned.
+ *
+ * Context: the NIB (at physical address @addr) has to be pinned with
+ * vfio_pin_pages() before calling this function.
*
- * @dev: an AP queue device
- * @data: a struct vfio_ap_queue_reserved reference
+ * Return: 0 in case of success, otherwise an error < 0.
+ */
+static int ensure_nib_shared(unsigned long addr)
+{
+ /*
+ * The nib has to be located in shared storage since guest and
+ * host access it. vfio_pin_pages() will do a pin shared and
+ * if that fails (possibly because it's not a shared page) it
+ * calls export. We try to do a second pin shared here so that
+ * the UV gives us an error code if we try to pin a non-shared
+ * page.
+ *
+ * If the page is already pinned shared the UV will return a success.
+ */
+ return uv_pin_shared(addr);
+}
+
+/**
+ * vfio_ap_irq_enable - Enable Interruption for a APQN
*
- * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
- * apid or apqi specified in @data:
+ * @q: the vfio_ap_queue holding AQIC parameters
+ * @isc: the guest ISC to register with the GIB interface
+ * @vcpu: the vcpu object containing the registers specifying the parameters
+ * passed to the PQAP(AQIC) instruction.
*
- * - If @data contains both an apid and apqi value, then @data will be flagged
- * as reserved if the APID and APQI fields for the AP queue device matches
+ * Pin the NIB saved in *q
+ * Register the guest ISC to GIB interface and retrieve the
+ * host ISC to issue the host side PQAP/AQIC
*
- * - If @data contains only an apid value, @data will be flagged as
- * reserved if the APID field in the AP queue device matches
+ * status.response_code may be set to AP_RESPONSE_INVALID_ADDRESS in case the
+ * vfio_pin_pages or kvm_s390_gisc_register failed.
*
- * - If @data contains only an apqi value, @data will be flagged as
- * reserved if the APQI field in the AP queue device matches
+ * Otherwise return the ap_queue_status returned by the ap_aqic(),
+ * all retry handling will be done by the guest.
*
- * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
- * @data does not contain either an apid or apqi.
+ * Return: &struct ap_queue_status
*/
-static int vfio_ap_has_queue(struct device *dev, void *data)
+static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q,
+ int isc,
+ struct kvm_vcpu *vcpu)
{
- struct vfio_ap_queue_reserved *qres = data;
- struct ap_queue *ap_queue = to_ap_queue(dev);
- ap_qid_t qid;
- unsigned long id;
+ union ap_qirq_ctrl aqic_gisa = { .value = 0 };
+ struct ap_queue_status status = {};
+ struct kvm_s390_gisa *gisa;
+ struct page *h_page;
+ int nisc;
+ struct kvm *kvm;
+ phys_addr_t h_nib;
+ dma_addr_t nib;
+ int ret;
- if (qres->apid && qres->apqi) {
- qid = AP_MKQID(*qres->apid, *qres->apqi);
- if (qid == ap_queue->qid)
- qres->reserved = true;
- } else if (qres->apid && !qres->apqi) {
- id = AP_QID_CARD(ap_queue->qid);
- if (id == *qres->apid)
- qres->reserved = true;
- } else if (!qres->apid && qres->apqi) {
- id = AP_QID_QUEUE(ap_queue->qid);
- if (id == *qres->apqi)
- qres->reserved = true;
- } else {
- return -EINVAL;
+ /* Verify that the notification indicator byte address is valid */
+ if (vfio_ap_validate_nib(vcpu, &nib)) {
+ VFIO_AP_DBF_WARN("%s: invalid NIB address: nib=%pad, apqn=%#04x\n",
+ __func__, &nib, q->apqn);
+
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
}
- return 0;
+ /* The pin will probably be successful even if the NIB was not shared */
+ ret = vfio_pin_pages(&q->matrix_mdev->vdev, nib, 1,
+ IOMMU_READ | IOMMU_WRITE, &h_page);
+ switch (ret) {
+ case 1:
+ break;
+ default:
+ VFIO_AP_DBF_WARN("%s: vfio_pin_pages failed: rc=%d,"
+ "nib=%pad, apqn=%#04x\n",
+ __func__, ret, &nib, q->apqn);
+
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ kvm = q->matrix_mdev->kvm;
+ gisa = kvm->arch.gisa_int.origin;
+
+ h_nib = page_to_phys(h_page) | (nib & ~PAGE_MASK);
+ aqic_gisa.gisc = isc;
+
+ /* NIB in non-shared storage is a rc 6 for PV guests */
+ if (kvm_s390_pv_cpu_is_protected(vcpu) &&
+ ensure_nib_shared(h_nib & PAGE_MASK)) {
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ nisc = kvm_s390_gisc_register(kvm, isc);
+ if (nisc < 0) {
+ VFIO_AP_DBF_WARN("%s: gisc registration failed: nisc=%d, isc=%d, apqn=%#04x\n",
+ __func__, nisc, isc, q->apqn);
+
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ status.response_code = AP_RESPONSE_INVALID_ADDRESS;
+ return status;
+ }
+
+ aqic_gisa.isc = nisc;
+ aqic_gisa.ir = 1;
+ aqic_gisa.gisa = virt_to_phys(gisa) >> 4;
+
+ status = ap_aqic(q->apqn, aqic_gisa, h_nib);
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ /* See if we did clear older IRQ configuration */
+ vfio_ap_free_aqic_resources(q);
+ q->saved_iova = nib;
+ q->saved_isc = isc;
+ break;
+ case AP_RESPONSE_OTHERWISE_CHANGED:
+ /* We could not modify IRQ settings: clear new configuration */
+ ret = kvm_s390_gisc_unregister(kvm, isc);
+ if (ret)
+ VFIO_AP_DBF_WARN("%s: kvm_s390_gisc_unregister: rc=%d isc=%d, apqn=%#04x\n",
+ __func__, ret, isc, q->apqn);
+ vfio_unpin_pages(&q->matrix_mdev->vdev, nib, 1);
+ break;
+ default:
+ pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn,
+ status.response_code);
+ vfio_ap_irq_disable(q);
+ break;
+ }
+
+ if (status.response_code != AP_RESPONSE_NORMAL) {
+ VFIO_AP_DBF_WARN("%s: PQAP(AQIC) failed with status=%#02x: "
+ "zone=%#x, ir=%#x, gisc=%#x, f=%#x,"
+ "gisa=%#x, isc=%#x, apqn=%#04x\n",
+ __func__, status.response_code,
+ aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc,
+ aqic_gisa.gf, aqic_gisa.gisa, aqic_gisa.isc,
+ q->apqn);
+ }
+
+ return status;
+}
+
+/**
+ * vfio_ap_le_guid_to_be_uuid - convert a little endian guid array into an array
+ * of big endian elements that can be passed by
+ * value to an s390dbf sprintf event function to
+ * format a UUID string.
+ *
+ * @guid: the object containing the little endian guid
+ * @uuid: a six-element array of long values that can be passed by value as
+ * arguments for a formatting string specifying a UUID.
+ *
+ * The S390 Debug Feature (s390dbf) allows the use of "%s" in the sprintf
+ * event functions if the memory for the passed string is available as long as
+ * the debug feature exists. Since a mediated device can be removed at any
+ * time, it's name can not be used because %s passes the reference to the string
+ * in memory and the reference will go stale once the device is removed .
+ *
+ * The s390dbf string formatting function allows a maximum of 9 arguments for a
+ * message to be displayed in the 'sprintf' view. In order to use the bytes
+ * comprising the mediated device's UUID to display the mediated device name,
+ * they will have to be converted into an array whose elements can be passed by
+ * value to sprintf. For example:
+ *
+ * guid array: { 83, 78, 17, 62, bb, f1, f0, 47, 91, 4d, 32, a2, 2e, 3a, 88, 04 }
+ * mdev name: 62177883-f1bb-47f0-914d-32a22e3a8804
+ * array returned: { 62177883, f1bb, 47f0, 914d, 32a2, 2e3a8804 }
+ * formatting string: "%08lx-%04lx-%04lx-%04lx-%02lx%04lx"
+ */
+static void vfio_ap_le_guid_to_be_uuid(guid_t *guid, unsigned long *uuid)
+{
+ /*
+ * The input guid is ordered in little endian, so it needs to be
+ * reordered for displaying a UUID as a string. This specifies the
+ * guid indices in proper order.
+ */
+ uuid[0] = le32_to_cpup((__le32 *)guid);
+ uuid[1] = le16_to_cpup((__le16 *)&guid->b[4]);
+ uuid[2] = le16_to_cpup((__le16 *)&guid->b[6]);
+ uuid[3] = *((__u16 *)&guid->b[8]);
+ uuid[4] = *((__u16 *)&guid->b[10]);
+ uuid[5] = *((__u32 *)&guid->b[12]);
}
/**
- * vfio_ap_verify_queue_reserved
+ * handle_pqap - PQAP instruction callback
*
- * @matrix_dev: a mediated matrix device
- * @apid: an AP adapter ID
- * @apqi: an AP queue index
+ * @vcpu: The vcpu on which we received the PQAP instruction
*
- * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
- * driver according to the following rules:
+ * Get the general register contents to initialize internal variables.
+ * REG[0]: APQN
+ * REG[1]: IR and ISC
+ * REG[2]: NIB
*
- * - If both @apid and @apqi are not NULL, then there must be an AP queue
- * device bound to the vfio_ap driver with the APQN identified by @apid and
- * @apqi
+ * Response.status may be set to following Response Code:
+ * - AP_RESPONSE_Q_NOT_AVAIL: if the queue is not available
+ * - AP_RESPONSE_DECONFIGURED: if the queue is not configured
+ * - AP_RESPONSE_NORMAL (0) : in case of success
+ * Check vfio_ap_setirq() and vfio_ap_clrirq() for other possible RC.
+ * We take the matrix_dev lock to ensure serialization on queues and
+ * mediated device access.
*
- * - If only @apid is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apid
+ * Return: 0 if we could handle the request inside KVM.
+ * Otherwise, returns -EOPNOTSUPP to let QEMU handle the fault.
+ */
+static int handle_pqap(struct kvm_vcpu *vcpu)
+{
+ uint64_t status;
+ uint16_t apqn;
+ unsigned long uuid[6];
+ struct vfio_ap_queue *q;
+ struct ap_queue_status qstatus = {
+ .response_code = AP_RESPONSE_Q_NOT_AVAIL, };
+ struct ap_matrix_mdev *matrix_mdev;
+
+ apqn = vcpu->run->s.regs.gprs[0] & 0xffff;
+
+ /* If we do not use the AIV facility just go to userland */
+ if (!(vcpu->arch.sie_block->eca & ECA_AIV)) {
+ VFIO_AP_DBF_WARN("%s: AIV facility not installed: apqn=0x%04x, eca=0x%04x\n",
+ __func__, apqn, vcpu->arch.sie_block->eca);
+
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ if (!vcpu->kvm->arch.crypto.pqap_hook) {
+ VFIO_AP_DBF_WARN("%s: PQAP(AQIC) hook not registered with the vfio_ap driver: apqn=0x%04x\n",
+ __func__, apqn);
+
+ goto out_unlock;
+ }
+
+ matrix_mdev = container_of(vcpu->kvm->arch.crypto.pqap_hook,
+ struct ap_matrix_mdev, pqap_hook);
+
+ /* If the there is no guest using the mdev, there is nothing to do */
+ if (!matrix_mdev->kvm) {
+ vfio_ap_le_guid_to_be_uuid(&matrix_mdev->mdev->uuid, uuid);
+ VFIO_AP_DBF_WARN("%s: mdev %08lx-%04lx-%04lx-%04lx-%04lx%08lx not in use: apqn=0x%04x\n",
+ __func__, uuid[0], uuid[1], uuid[2],
+ uuid[3], uuid[4], uuid[5], apqn);
+ goto out_unlock;
+ }
+
+ q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
+ if (!q) {
+ VFIO_AP_DBF_WARN("%s: Queue %02x.%04x not bound to the vfio_ap driver\n",
+ __func__, AP_QID_CARD(apqn),
+ AP_QID_QUEUE(apqn));
+ goto out_unlock;
+ }
+
+ status = vcpu->run->s.regs.gprs[1];
+
+ /* If IR bit(16) is set we enable the interrupt */
+ if ((status >> (63 - 16)) & 0x01)
+ qstatus = vfio_ap_irq_enable(q, status & 0x07, vcpu);
+ else
+ qstatus = vfio_ap_irq_disable(q);
+
+out_unlock:
+ memcpy(&vcpu->run->s.regs.gprs[1], &qstatus, sizeof(qstatus));
+ vcpu->run->s.regs.gprs[1] >>= 32;
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ return 0;
+}
+
+static void vfio_ap_matrix_init(struct ap_config_info *info,
+ struct ap_matrix *matrix)
+{
+ matrix->apm_max = info->apxa ? info->na : 63;
+ matrix->aqm_max = info->apxa ? info->nd : 15;
+ matrix->adm_max = info->apxa ? info->nd : 15;
+}
+
+static void signal_guest_ap_cfg_changed(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_signal(matrix_mdev->cfg_chg_trigger);
+}
+
+static void vfio_ap_mdev_update_guest_apcb(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (matrix_mdev->kvm) {
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm,
+ matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.adm);
+
+ signal_guest_ap_cfg_changed(matrix_mdev);
+ }
+}
+
+static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
+{
+ DECLARE_BITMAP(prev_shadow_adm, AP_DOMAINS);
+
+ bitmap_copy(prev_shadow_adm, matrix_mdev->shadow_apcb.adm, AP_DOMAINS);
+ bitmap_and(matrix_mdev->shadow_apcb.adm, matrix_mdev->matrix.adm,
+ (unsigned long *)matrix_dev->info.adm, AP_DOMAINS);
+
+ return !bitmap_equal(prev_shadow_adm, matrix_mdev->shadow_apcb.adm,
+ AP_DOMAINS);
+}
+
+static bool _queue_passable(struct vfio_ap_queue *q)
+{
+ if (!q)
+ return false;
+
+ switch (q->reset_status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
+ * to ensure no queue devices are passed through to
+ * the guest that are not bound to the vfio_ap
+ * device driver.
+ *
+ * @matrix_mdev: the matrix mdev whose matrix is to be filtered.
+ * @apm_filtered: a 256-bit bitmap for storing the APIDs filtered from the
+ * guest's AP configuration that are still in the host's AP
+ * configuration.
*
- * - If only @apqi is not NULL, then there must be an AP queue device bound
- * to the vfio_ap driver with an APQN containing @apqi
+ * Note: If an APQN referencing a queue device that is not bound to the vfio_ap
+ * driver, its APID will be filtered from the guest's APCB. The matrix
+ * structure precludes filtering an individual APQN, so its APID will be
+ * filtered. Consequently, all queues associated with the adapter that
+ * are in the host's AP configuration must be reset. If queues are
+ * subsequently made available again to the guest, they should re-appear
+ * in a reset state
*
- * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ * Return: a boolean value indicating whether the KVM guest's APCB was changed
+ * by the filtering or not.
*/
-static int vfio_ap_verify_queue_reserved(unsigned long *apid,
- unsigned long *apqi)
+static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apm_filtered)
+{
+ unsigned long apid, apqi, apqn;
+ DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
+ DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
+
+ bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
+ bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+ bitmap_clear(apm_filtered, 0, AP_DEVICES);
+
+ /*
+ * Copy the adapters, domains and control domains to the shadow_apcb
+ * from the matrix mdev, but only those that are assigned to the host's
+ * AP configuration.
+ */
+ bitmap_and(matrix_mdev->shadow_apcb.apm, matrix_mdev->matrix.apm,
+ (unsigned long *)matrix_dev->info.apm, AP_DEVICES);
+ bitmap_and(matrix_mdev->shadow_apcb.aqm, matrix_mdev->matrix.aqm,
+ (unsigned long *)matrix_dev->info.aqm, AP_DOMAINS);
+
+ for_each_set_bit_inv(apid, matrix_mdev->shadow_apcb.apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm,
+ AP_DOMAINS) {
+ /*
+ * If the APQN is not bound to the vfio_ap device
+ * driver, then we can't assign it to the guest's
+ * AP configuration. The AP architecture won't
+ * allow filtering of a single APQN, so let's filter
+ * the APID since an adapter represents a physical
+ * hardware device.
+ */
+ apqn = AP_MKQID(apid, apqi);
+ if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+
+ /*
+ * If the adapter was previously plugged into
+ * the guest, let's let the caller know that
+ * the APID was filtered.
+ */
+ if (test_bit_inv(apid, prev_shadow_apm))
+ set_bit_inv(apid, apm_filtered);
+
+ break;
+ }
+ }
+ }
+
+ return !bitmap_equal(prev_shadow_apm, matrix_mdev->shadow_apcb.apm,
+ AP_DEVICES) ||
+ !bitmap_equal(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm,
+ AP_DOMAINS);
+}
+
+static int vfio_ap_mdev_init_dev(struct vfio_device *vdev)
+{
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
+
+ matrix_mdev->mdev = to_mdev_device(vdev->dev);
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
+ matrix_mdev->pqap_hook = handle_pqap;
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->shadow_apcb);
+ hash_init(matrix_mdev->qtable.queues);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_probe(struct mdev_device *mdev)
{
+ struct ap_matrix_mdev *matrix_mdev;
int ret;
- struct vfio_ap_queue_reserved qres;
- qres.apid = apid;
- qres.apqi = apqi;
- qres.reserved = false;
+ matrix_mdev = vfio_alloc_device(ap_matrix_mdev, vdev, &mdev->dev,
+ &vfio_ap_matrix_dev_ops);
+ if (IS_ERR(matrix_mdev))
+ return PTR_ERR(matrix_mdev);
- ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
- vfio_ap_has_queue);
+ ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
- return ret;
+ goto err_put_vdev;
+ matrix_mdev->req_trigger = NULL;
+ matrix_mdev->cfg_chg_trigger = NULL;
+ dev_set_drvdata(&mdev->dev, matrix_mdev);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ return 0;
- if (qres.reserved)
- return 0;
+err_put_vdev:
+ vfio_put_device(&matrix_mdev->vdev);
+ return ret;
+}
+
+static void vfio_ap_mdev_link_queue(struct ap_matrix_mdev *matrix_mdev,
+ struct vfio_ap_queue *q)
+{
+ if (!q || vfio_ap_mdev_get_queue(matrix_mdev, q->apqn))
+ return;
- return -EADDRNOTAVAIL;
+ q->matrix_mdev = matrix_mdev;
+ hash_add(matrix_mdev->qtable.queues, &q->mdev_qnode, q->apqn);
}
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apid)
+static void vfio_ap_mdev_link_apqn(struct ap_matrix_mdev *matrix_mdev, int apqn)
{
- int ret;
- unsigned long apqi;
- unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
+ struct vfio_ap_queue *q;
- if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(&apid, NULL);
+ q = vfio_ap_find_queue(apqn);
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+}
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
+static void vfio_ap_unlink_queue_fr_mdev(struct vfio_ap_queue *q)
+{
+ hash_del(&q->mdev_qnode);
+}
+
+static void vfio_ap_unlink_mdev_fr_queue(struct vfio_ap_queue *q)
+{
+ q->matrix_mdev = NULL;
+}
+
+static void vfio_ap_mdev_unlink_fr_queues(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct vfio_ap_queue *q;
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ AP_DOMAINS) {
+ q = vfio_ap_mdev_get_queue(matrix_mdev,
+ AP_MKQID(apid, apqi));
+ if (q)
+ q->matrix_mdev = NULL;
+ }
}
+}
- return 0;
+static void vfio_ap_mdev_remove(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(&mdev->dev);
+
+ vfio_unregister_group_dev(&matrix_mdev->vdev);
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ vfio_ap_mdev_reset_queues(matrix_mdev);
+ vfio_ap_mdev_unlink_fr_queues(matrix_mdev);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+ vfio_put_device(&matrix_mdev->vdev);
+}
+
+#define MDEV_SHARING_ERR "Userspace may not assign queue %02lx.%04lx to mdev: already assigned to %s"
+
+#define MDEV_IN_USE_ERR "Can not reserve queue %02lx.%04lx for host driver: in use by mdev"
+
+static void vfio_ap_mdev_log_sharing_err(struct ap_matrix_mdev *assignee,
+ struct ap_matrix_mdev *assigned_to,
+ unsigned long *apm, unsigned long *aqm)
+{
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ dev_warn(mdev_dev(assignee->mdev), MDEV_SHARING_ERR,
+ apid, apqi, dev_name(mdev_dev(assigned_to->mdev)));
+ }
+ }
+}
+
+static void vfio_ap_mdev_log_in_use_err(struct ap_matrix_mdev *assignee,
+ unsigned long *apm, unsigned long *aqm)
+{
+ unsigned long apid, apqi;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS)
+ dev_warn(mdev_dev(assignee->mdev), MDEV_IN_USE_ERR, apid, apqi);
+ }
}
/**
- * vfio_ap_mdev_verify_no_sharing
+ * vfio_ap_mdev_verify_no_sharing - verify APQNs are not shared by matrix mdevs
*
- * Verifies that the APQNs derived from the cross product of the AP adapter IDs
- * and AP queue indexes comprising the AP matrix are not configured for another
- * mediated device. AP queue sharing is not allowed.
+ * @assignee: the matrix mdev to which @mdev_apm and @mdev_aqm are being
+ * assigned; or, NULL if this function was called by the AP bus
+ * driver in_use callback to verify none of the APQNs being reserved
+ * for the host device driver are in use by a vfio_ap mediated device
+ * @mdev_apm: mask indicating the APIDs of the APQNs to be verified
+ * @mdev_aqm: mask indicating the APQIs of the APQNs to be verified
*
- * @matrix_mdev: the mediated matrix device
+ * Verifies that each APQN derived from the Cartesian product of APIDs
+ * represented by the bits set in @mdev_apm and the APQIs of the bits set in
+ * @mdev_aqm is not assigned to a mediated device other than the mdev to which
+ * the APQN is being assigned (@assignee). AP queue sharing is not allowed.
*
- * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ * Return: 0 if the APQNs are not shared; otherwise return -EADDRINUSE.
*/
-static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *assignee,
+ unsigned long *mdev_apm,
+ unsigned long *mdev_aqm)
{
- struct ap_matrix_mdev *lstdev;
+ struct ap_matrix_mdev *assigned_to;
DECLARE_BITMAP(apm, AP_DEVICES);
DECLARE_BITMAP(aqm, AP_DOMAINS);
- list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
- if (matrix_mdev == lstdev)
+ list_for_each_entry(assigned_to, &matrix_dev->mdev_list, node) {
+ /*
+ * If the mdev to which the mdev_apm and mdev_aqm is being
+ * assigned is the same as the mdev being verified
+ */
+ if (assignee == assigned_to)
continue;
memset(apm, 0, sizeof(apm));
@@ -257,14 +942,17 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* We work on full longs, as we can only exclude the leftover
* bits in non-inverse order. The leftover is all zeros.
*/
- if (!bitmap_and(apm, matrix_mdev->matrix.apm,
- lstdev->matrix.apm, AP_DEVICES))
+ if (!bitmap_and(apm, mdev_apm, assigned_to->matrix.apm, AP_DEVICES))
continue;
- if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
- lstdev->matrix.aqm, AP_DOMAINS))
+ if (!bitmap_and(aqm, mdev_aqm, assigned_to->matrix.aqm, AP_DOMAINS))
continue;
+ if (assignee)
+ vfio_ap_mdev_log_sharing_err(assignee, assigned_to, apm, aqm);
+ else
+ vfio_ap_mdev_log_in_use_err(assigned_to, apm, aqm);
+
return -EADDRINUSE;
}
@@ -272,7 +960,85 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
}
/**
- * assign_adapter_store
+ * vfio_ap_mdev_validate_masks - verify that the APQNs assigned to the mdev are
+ * not reserved for the default zcrypt driver and
+ * are not assigned to another mdev.
+ *
+ * @matrix_mdev: the mdev to which the APQNs being validated are assigned.
+ *
+ * Return: One of the following values:
+ * o the error returned from the ap_apqn_in_matrix_owned_by_def_drv() function,
+ * most likely -EBUSY indicating the ap_attr_mutex lock is already held.
+ * o EADDRNOTAVAIL if an APQN assigned to @matrix_mdev is reserved for the
+ * zcrypt default driver.
+ * o EADDRINUSE if an APQN assigned to @matrix_mdev is assigned to another mdev
+ * o A zero indicating validation succeeded.
+ */
+static int vfio_ap_mdev_validate_masks(struct ap_matrix_mdev *matrix_mdev)
+{
+ if (ap_apqn_in_matrix_owned_by_def_drv(matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm))
+ return -EADDRNOTAVAIL;
+
+ return vfio_ap_mdev_verify_no_sharing(matrix_mdev,
+ matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm);
+}
+
+static void vfio_ap_mdev_link_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ unsigned long apqi;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
+}
+
+static void collect_queues_to_reset(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+ struct list_head *qlist)
+{
+ struct vfio_ap_queue *q;
+ unsigned long apqi;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS) {
+ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
+ if (q)
+ list_add_tail(&q->reset_qnode, qlist);
+ }
+}
+
+static void reset_queues_for_apid(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ struct list_head qlist;
+
+ INIT_LIST_HEAD(&qlist);
+ collect_queues_to_reset(matrix_mdev, apid, &qlist);
+ vfio_ap_mdev_reset_qlist(&qlist);
+}
+
+static int reset_queues_for_apids(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apm_reset)
+{
+ struct list_head qlist;
+ unsigned long apid;
+
+ if (bitmap_empty(apm_reset, AP_DEVICES))
+ return 0;
+
+ INIT_LIST_HEAD(&qlist);
+
+ for_each_set_bit_inv(apid, apm_reset, AP_DEVICES)
+ collect_queues_to_reset(matrix_mdev, apid, &qlist);
+
+ return vfio_ap_mdev_reset_qlist(&qlist);
+}
+
+/**
+ * assign_adapter_store - parses the APID from @buf and sets the
+ * corresponding bit in the mediated matrix device's APM
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_adapter attribute
@@ -280,10 +1046,7 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* be assigned
* @count: the number of bytes in @buf
*
- * Parses the APID from @buf and sets the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
* returns one of the following errors:
*
* 1. -EINVAL
@@ -302,6 +1065,10 @@ static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
* An APQN derived from the cross product of the APID being assigned
* and the APQIs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * A lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_adapter_store(struct device *dev,
struct device_attribute *attr,
@@ -309,61 +1076,142 @@ static ssize_t assign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- /* If the guest is running, disallow assignment of adapter */
- if (matrix_mdev->kvm)
- return -EBUSY;
+ mutex_lock(&ap_attr_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
- return ret;
-
- if (apid > matrix_mdev->matrix.apm_max)
- return -ENODEV;
+ goto done;
- /*
- * Set the bit in the AP mask (APM) corresponding to the AP adapter
- * number (APID). The bits in the mask, from most significant to least
- * significant bit, correspond to APIDs 0-255.
- */
- mutex_lock(&matrix_dev->lock);
+ if (apid > matrix_mdev->matrix.apm_max) {
+ ret = -ENODEV;
+ goto done;
+ }
- ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
- if (ret)
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
goto done;
+ }
set_bit_inv(apid, matrix_mdev->matrix.apm);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
+ goto done;
+ }
- ret = count;
- goto done;
+ vfio_ap_mdev_link_adapter(matrix_mdev, apid);
+
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ reset_queues_for_apids(matrix_mdev, apm_filtered);
+ }
-share_err:
- clear_bit_inv(apid, matrix_mdev->matrix.apm);
+ ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_attr_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_adapter);
+static struct vfio_ap_queue
+*vfio_ap_unlink_apqn_fr_mdev(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid, unsigned long apqi)
+{
+ struct vfio_ap_queue *q = NULL;
+
+ q = vfio_ap_mdev_get_queue(matrix_mdev, AP_MKQID(apid, apqi));
+ /* If the queue is assigned to the matrix mdev, unlink it. */
+ if (q)
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ return q;
+}
+
/**
- * unassign_adapter_store
+ * vfio_ap_mdev_unlink_adapter - unlink all queues associated with unassigned
+ * adapter from the matrix mdev to which the
+ * adapter was assigned.
+ * @matrix_mdev: the matrix mediated device to which the adapter was assigned.
+ * @apid: the APID of the unassigned adapter.
+ * @qlist: list for storing queues associated with unassigned adapter that
+ * need to be reset.
+ */
+static void vfio_ap_mdev_unlink_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid,
+ struct list_head *qlist)
+{
+ unsigned long apqi;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_adapters(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apids)
+{
+ struct vfio_ap_queue *q, *tmpq;
+ struct list_head qlist;
+ unsigned long apid;
+ bool apcb_update = false;
+
+ INIT_LIST_HEAD(&qlist);
+
+ for_each_set_bit_inv(apid, apids, AP_DEVICES) {
+ vfio_ap_mdev_unlink_adapter(matrix_mdev, apid, &qlist);
+
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm)) {
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ apcb_update = true;
+ }
+ }
+
+ /* Only update apcb if needed to avoid impacting guest */
+ if (apcb_update)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ vfio_ap_mdev_reset_qlist(&qlist);
+
+ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ list_del(&q->reset_qnode);
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_adapter(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ DECLARE_BITMAP(apids, AP_DEVICES);
+
+ bitmap_zero(apids, AP_DEVICES);
+ set_bit_inv(apid, apids);
+ vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, apids);
+}
+
+/**
+ * unassign_adapter_store - parses the APID from @buf and clears the
+ * corresponding bit in the mediated matrix device's APM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_adapter attribute
* @buf: a buffer containing the adapter number (APID) to be unassigned
* @count: the number of bytes in @buf
*
- * Parses the APID from @buf and clears the corresponding bit in the mediated
- * matrix device's APM.
- *
- * Returns the number of bytes processed if the APID is valid; otherwise,
+ * Return: the number of bytes processed if the APID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the APID is not a number
* -ENODEV if the APID it exceeds the maximum value configured for the
@@ -375,50 +1223,46 @@ static ssize_t unassign_adapter_store(struct device *dev,
{
int ret;
unsigned long apid;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- /* If the guest is running, disallow un-assignment of adapter */
- if (matrix_mdev->kvm)
- return -EBUSY;
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apid);
if (ret)
- return ret;
+ goto done;
- if (apid > matrix_mdev->matrix.apm_max)
- return -ENODEV;
+ if (apid > matrix_mdev->matrix.apm_max) {
+ ret = -ENODEV;
+ goto done;
+ }
- mutex_lock(&matrix_dev->lock);
- clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
- mutex_unlock(&matrix_dev->lock);
+ if (!test_bit_inv(apid, matrix_mdev->matrix.apm)) {
+ ret = count;
+ goto done;
+ }
- return count;
+ clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ vfio_ap_mdev_hot_unplug_adapter(matrix_mdev, apid);
+ ret = count;
+done:
+ release_update_locks_for_mdev(matrix_mdev);
+ return ret;
}
static DEVICE_ATTR_WO(unassign_adapter);
-static int
-vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
- unsigned long apqi)
+static void vfio_ap_mdev_link_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
{
- int ret;
unsigned long apid;
- unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
-
- if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
- return vfio_ap_verify_queue_reserved(NULL, &apqi);
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
- ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
- if (ret)
- return ret;
- }
-
- return 0;
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES)
+ vfio_ap_mdev_link_apqn(matrix_mdev,
+ AP_MKQID(apid, apqi));
}
/**
- * assign_domain_store
+ * assign_domain_store - parses the APQI from @buf and sets the
+ * corresponding bit in the mediated matrix device's AQM
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_domain attribute
@@ -426,10 +1270,7 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
* be assigned
* @count: the number of bytes in @buf
*
- * Parses the APQI from @buf and sets the corresponding bit in the mediated
- * matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * Return: the number of bytes processed if the APQI is valid; otherwise returns
* one of the following errors:
*
* 1. -EINVAL
@@ -448,6 +1289,10 @@ vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
* An APQN derived from the cross product of the APQI being assigned
* and the APIDs previously assigned is being used by another mediated
* matrix device
+ *
+ * 5. -EAGAIN
+ * The lock required to validate the mdev's AP configuration could not
+ * be obtained.
*/
static ssize_t assign_domain_store(struct device *dev,
struct device_attribute *attr,
@@ -455,47 +1300,112 @@ static ssize_t assign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
+ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- /* If the guest is running, disallow assignment of domain */
- if (matrix_mdev->kvm)
- return -EBUSY;
+ mutex_lock(&ap_attr_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
- return ret;
- if (apqi > max_apqi)
- return -ENODEV;
+ goto done;
- mutex_lock(&matrix_dev->lock);
+ if (apqi > matrix_mdev->matrix.aqm_max) {
+ ret = -ENODEV;
+ goto done;
+ }
- ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
- if (ret)
+ if (test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
goto done;
+ }
set_bit_inv(apqi, matrix_mdev->matrix.aqm);
- ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
- if (ret)
- goto share_err;
+ ret = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (ret) {
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
+ goto done;
+ }
- ret = count;
- goto done;
+ vfio_ap_mdev_link_domain(matrix_mdev, apqi);
+
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ reset_queues_for_apids(matrix_mdev, apm_filtered);
+ }
-share_err:
- clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
+ ret = count;
done:
- mutex_unlock(&matrix_dev->lock);
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_attr_mutex);
return ret;
}
static DEVICE_ATTR_WO(assign_domain);
+static void vfio_ap_mdev_unlink_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi,
+ struct list_head *qlist)
+{
+ unsigned long apid;
+ struct vfio_ap_queue *q;
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, AP_DEVICES) {
+ q = vfio_ap_unlink_apqn_fr_mdev(matrix_mdev, apid, apqi);
+
+ if (q && qlist) {
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ list_add_tail(&q->reset_qnode, qlist);
+ }
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_domains(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *apqis)
+{
+ struct vfio_ap_queue *q, *tmpq;
+ struct list_head qlist;
+ unsigned long apqi;
+ bool apcb_update = false;
+
+ INIT_LIST_HEAD(&qlist);
+
+ for_each_set_bit_inv(apqi, apqis, AP_DOMAINS) {
+ vfio_ap_mdev_unlink_domain(matrix_mdev, apqi, &qlist);
+
+ if (test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ clear_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm);
+ apcb_update = true;
+ }
+ }
+
+ /* Only update apcb if needed to avoid impacting guest */
+ if (apcb_update)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ vfio_ap_mdev_reset_qlist(&qlist);
+
+ list_for_each_entry_safe(q, tmpq, &qlist, reset_qnode) {
+ vfio_ap_unlink_mdev_fr_queue(q);
+ list_del(&q->reset_qnode);
+ }
+}
+
+static void vfio_ap_mdev_hot_unplug_domain(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ DECLARE_BITMAP(apqis, AP_DOMAINS);
+
+ bitmap_zero(apqis, AP_DEVICES);
+ set_bit_inv(apqi, apqis);
+ vfio_ap_mdev_hot_unplug_domains(matrix_mdev, apqis);
+}
/**
- * unassign_domain_store
+ * unassign_domain_store - parses the APQI from @buf and clears the
+ * corresponding bit in the mediated matrix device's AQM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_domain attribute
@@ -503,10 +1413,7 @@ static DEVICE_ATTR_WO(assign_domain);
* be unassigned
* @count: the number of bytes in @buf
*
- * Parses the APQI from @buf and clears the corresponding bit in the
- * mediated matrix device's AQM.
- *
- * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * Return: the number of bytes processed if the APQI is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the APQI is not a number
* -ENODEV if the APQI exceeds the maximum value configured for the system
@@ -517,40 +1424,44 @@ static ssize_t unassign_domain_store(struct device *dev,
{
int ret;
unsigned long apqi;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- /* If the guest is running, disallow un-assignment of domain */
- if (matrix_mdev->kvm)
- return -EBUSY;
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &apqi);
if (ret)
- return ret;
+ goto done;
- if (apqi > matrix_mdev->matrix.aqm_max)
- return -ENODEV;
+ if (apqi > matrix_mdev->matrix.aqm_max) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (!test_bit_inv(apqi, matrix_mdev->matrix.aqm)) {
+ ret = count;
+ goto done;
+ }
- mutex_lock(&matrix_dev->lock);
clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
- mutex_unlock(&matrix_dev->lock);
+ vfio_ap_mdev_hot_unplug_domain(matrix_mdev, apqi);
+ ret = count;
- return count;
+done:
+ release_update_locks_for_mdev(matrix_mdev);
+ return ret;
}
static DEVICE_ATTR_WO(unassign_domain);
/**
- * assign_control_domain_store
+ * assign_control_domain_store - parses the domain ID from @buf and sets
+ * the corresponding bit in the mediated matrix device's ADM
*
* @dev: the matrix device
* @attr: the mediated matrix device's assign_control_domain attribute
* @buf: a buffer containing the domain ID to be assigned
* @count: the number of bytes in @buf
*
- * Parses the domain ID from @buf and sets the corresponding bit in the mediated
- * matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the ID is not a number
* -ENODEV if the ID exceeds the maximum value configured for the system
@@ -561,45 +1472,50 @@ static ssize_t assign_control_domain_store(struct device *dev,
{
int ret;
unsigned long id;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- /* If the guest is running, disallow assignment of control domain */
- if (matrix_mdev->kvm)
- return -EBUSY;
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &id);
if (ret)
- return ret;
+ goto done;
- if (id > matrix_mdev->matrix.adm_max)
- return -ENODEV;
+ if (id > matrix_mdev->matrix.adm_max) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (test_bit_inv(id, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
/* Set the bit in the ADM (bitmask) corresponding to the AP control
* domain number (id). The bits in the mask, from most significant to
* least significant, correspond to IDs 0 up to the one less than the
* number of control domains that can be assigned.
*/
- mutex_lock(&matrix_dev->lock);
set_bit_inv(id, matrix_mdev->matrix.adm);
- mutex_unlock(&matrix_dev->lock);
+ if (vfio_ap_mdev_filter_cdoms(matrix_mdev))
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
- return count;
+ ret = count;
+done:
+ release_update_locks_for_mdev(matrix_mdev);
+ return ret;
}
static DEVICE_ATTR_WO(assign_control_domain);
/**
- * unassign_control_domain_store
+ * unassign_control_domain_store - parses the domain ID from @buf and
+ * clears the corresponding bit in the mediated matrix device's ADM
*
* @dev: the matrix device
* @attr: the mediated matrix device's unassign_control_domain attribute
* @buf: a buffer containing the domain ID to be unassigned
* @count: the number of bytes in @buf
*
- * Parses the domain ID from @buf and clears the corresponding bit in the
- * mediated matrix device's ADM.
- *
- * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * Return: the number of bytes processed if the domain ID is valid; otherwise,
* returns one of the following errors:
* -EINVAL if the ID is not a number
* -ENODEV if the ID exceeds the maximum value configured for the system
@@ -610,25 +1526,35 @@ static ssize_t unassign_control_domain_store(struct device *dev,
{
int ret;
unsigned long domid;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- unsigned long max_domid = matrix_mdev->matrix.adm_max;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
- /* If the guest is running, disallow un-assignment of control domain */
- if (matrix_mdev->kvm)
- return -EBUSY;
+ get_update_locks_for_mdev(matrix_mdev);
ret = kstrtoul(buf, 0, &domid);
if (ret)
- return ret;
- if (domid > max_domid)
- return -ENODEV;
+ goto done;
+
+ if (domid > matrix_mdev->matrix.adm_max) {
+ ret = -ENODEV;
+ goto done;
+ }
+
+ if (!test_bit_inv(domid, matrix_mdev->matrix.adm)) {
+ ret = count;
+ goto done;
+ }
- mutex_lock(&matrix_dev->lock);
clear_bit_inv(domid, matrix_mdev->matrix.adm);
- mutex_unlock(&matrix_dev->lock);
- return count;
+ if (test_bit_inv(domid, matrix_mdev->shadow_apcb.adm)) {
+ clear_bit_inv(domid, matrix_mdev->shadow_apcb.adm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ }
+
+ ret = count;
+done:
+ release_update_locks_for_mdev(matrix_mdev);
+ return ret;
}
static DEVICE_ATTR_WO(unassign_control_domain);
@@ -637,75 +1563,228 @@ static ssize_t control_domains_show(struct device *dev,
char *buf)
{
unsigned long id;
- int nchars = 0;
- int n;
- char *bufpos = buf;
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
unsigned long max_domid = matrix_mdev->matrix.adm_max;
+ int nchars = 0;
- mutex_lock(&matrix_dev->lock);
- for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
- n = sprintf(bufpos, "%04lx\n", id);
- bufpos += n;
- nchars += n;
- }
- mutex_unlock(&matrix_dev->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1)
+ nchars += sysfs_emit_at(buf, nchars, "%04lx\n", id);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(control_domains);
-static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t vfio_ap_mdev_matrix_show(struct ap_matrix *matrix, char *buf)
{
- struct mdev_device *mdev = mdev_from_dev(dev);
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- char *bufpos = buf;
unsigned long apid;
unsigned long apqi;
unsigned long apid1;
unsigned long apqi1;
- unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
- unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ unsigned long napm_bits = matrix->apm_max + 1;
+ unsigned long naqm_bits = matrix->aqm_max + 1;
int nchars = 0;
- int n;
- apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
- apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
-
- mutex_lock(&matrix_dev->lock);
+ apid1 = find_first_bit_inv(matrix->apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix->aqm, naqm_bits);
if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
- naqm_bits) {
- n = sprintf(bufpos, "%02lx.%04lx\n", apid,
- apqi);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+ nchars += sysfs_emit_at(buf, nchars, "%02lx.%04lx\n", apid, apqi);
}
} else if (apid1 < napm_bits) {
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
- n = sprintf(bufpos, "%02lx.\n", apid);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apid, matrix->apm, napm_bits)
+ nchars += sysfs_emit_at(buf, nchars, "%02lx.\n", apid);
} else if (apqi1 < naqm_bits) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
- n = sprintf(bufpos, ".%04lx\n", apqi);
- bufpos += n;
- nchars += n;
- }
+ for_each_set_bit_inv(apqi, matrix->aqm, naqm_bits)
+ nchars += sysfs_emit_at(buf, nchars, ".%04lx\n", apqi);
}
- mutex_unlock(&matrix_dev->lock);
+ return nchars;
+}
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->matrix, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
return nchars;
}
static DEVICE_ATTR_RO(matrix);
+static ssize_t guest_matrix_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t nchars;
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ nchars = vfio_ap_mdev_matrix_show(&matrix_mdev->shadow_apcb, buf);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(guest_matrix);
+
+static ssize_t write_ap_bitmap(unsigned long *bitmap, char *buf, int offset, char sep)
+{
+ return sysfs_emit_at(buf, offset, "0x%016lx%016lx%016lx%016lx%c",
+ bitmap[0], bitmap[1], bitmap[2], bitmap[3], sep);
+}
+
+static ssize_t ap_config_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+ int idx = 0;
+
+ idx += write_ap_bitmap(matrix_mdev->matrix.apm, buf, idx, ',');
+ idx += write_ap_bitmap(matrix_mdev->matrix.aqm, buf, idx, ',');
+ idx += write_ap_bitmap(matrix_mdev->matrix.adm, buf, idx, '\n');
+
+ return idx;
+}
+
+/* Number of characters needed for a complete hex mask representing the bits in .. */
+#define AP_DEVICES_STRLEN (AP_DEVICES / 4 + 3)
+#define AP_DOMAINS_STRLEN (AP_DOMAINS / 4 + 3)
+#define AP_CONFIG_STRLEN (AP_DEVICES_STRLEN + 2 * AP_DOMAINS_STRLEN)
+
+static int parse_bitmap(char **strbufptr, unsigned long *bitmap, int nbits)
+{
+ char *curmask;
+
+ curmask = strsep(strbufptr, ",\n");
+ if (!curmask)
+ return -EINVAL;
+
+ bitmap_clear(bitmap, 0, nbits);
+ return ap_hex2bitmap(curmask, bitmap, nbits);
+}
+
+static int ap_matrix_overflow_check(struct ap_matrix_mdev *matrix_mdev)
+{
+ unsigned long bit;
+
+ for_each_set_bit_inv(bit, matrix_mdev->matrix.apm, AP_DEVICES) {
+ if (bit > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+ }
+
+ for_each_set_bit_inv(bit, matrix_mdev->matrix.aqm, AP_DOMAINS) {
+ if (bit > matrix_mdev->matrix.aqm_max)
+ return -ENODEV;
+ }
+
+ for_each_set_bit_inv(bit, matrix_mdev->matrix.adm, AP_DOMAINS) {
+ if (bit > matrix_mdev->matrix.adm_max)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ap_matrix_copy(struct ap_matrix *dst, struct ap_matrix *src)
+{
+ /* This check works around false positive gcc -Wstringop-overread */
+ if (!src)
+ return;
+
+ bitmap_copy(dst->apm, src->apm, AP_DEVICES);
+ bitmap_copy(dst->aqm, src->aqm, AP_DOMAINS);
+ bitmap_copy(dst->adm, src->adm, AP_DOMAINS);
+}
+
+static ssize_t ap_config_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ap_matrix_mdev *matrix_mdev = dev_get_drvdata(dev);
+ struct ap_matrix m_new, m_old, m_added, m_removed;
+ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ unsigned long newbit;
+ char *newbuf, *rest;
+ int rc = count;
+ bool do_update;
+
+ newbuf = kstrndup(buf, AP_CONFIG_STRLEN, GFP_KERNEL);
+ if (!newbuf)
+ return -ENOMEM;
+ rest = newbuf;
+
+ mutex_lock(&ap_attr_mutex);
+ get_update_locks_for_mdev(matrix_mdev);
+
+ /* Save old state */
+ ap_matrix_copy(&m_old, &matrix_mdev->matrix);
+ if (parse_bitmap(&rest, m_new.apm, AP_DEVICES) ||
+ parse_bitmap(&rest, m_new.aqm, AP_DOMAINS) ||
+ parse_bitmap(&rest, m_new.adm, AP_DOMAINS)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ bitmap_andnot(m_removed.apm, m_old.apm, m_new.apm, AP_DEVICES);
+ bitmap_andnot(m_removed.aqm, m_old.aqm, m_new.aqm, AP_DOMAINS);
+ bitmap_andnot(m_added.apm, m_new.apm, m_old.apm, AP_DEVICES);
+ bitmap_andnot(m_added.aqm, m_new.aqm, m_old.aqm, AP_DOMAINS);
+
+ /* Need new bitmaps in matrix_mdev for validation */
+ ap_matrix_copy(&matrix_mdev->matrix, &m_new);
+
+ /* Ensure new state is valid, else undo new state */
+ rc = vfio_ap_mdev_validate_masks(matrix_mdev);
+ if (rc) {
+ ap_matrix_copy(&matrix_mdev->matrix, &m_old);
+ goto out;
+ }
+ rc = ap_matrix_overflow_check(matrix_mdev);
+ if (rc) {
+ ap_matrix_copy(&matrix_mdev->matrix, &m_old);
+ goto out;
+ }
+ rc = count;
+
+ /* Need old bitmaps in matrix_mdev for unplug/unlink */
+ ap_matrix_copy(&matrix_mdev->matrix, &m_old);
+
+ /* Unlink removed adapters/domains */
+ vfio_ap_mdev_hot_unplug_adapters(matrix_mdev, m_removed.apm);
+ vfio_ap_mdev_hot_unplug_domains(matrix_mdev, m_removed.aqm);
+
+ /* Need new bitmaps in matrix_mdev for linking new adapters/domains */
+ ap_matrix_copy(&matrix_mdev->matrix, &m_new);
+
+ /* Link newly added adapters */
+ for_each_set_bit_inv(newbit, m_added.apm, AP_DEVICES)
+ vfio_ap_mdev_link_adapter(matrix_mdev, newbit);
+
+ for_each_set_bit_inv(newbit, m_added.aqm, AP_DOMAINS)
+ vfio_ap_mdev_link_domain(matrix_mdev, newbit);
+
+ /* filter resources not bound to vfio-ap */
+ do_update = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
+ do_update |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ /* Apply changes to shadow apbc if things changed */
+ if (do_update) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ reset_queues_for_apids(matrix_mdev, apm_filtered);
+ }
+out:
+ release_update_locks_for_mdev(matrix_mdev);
+ mutex_unlock(&ap_attr_mutex);
+ kfree(newbuf);
+ return rc;
+}
+static DEVICE_ATTR_RW(ap_config);
+
static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_assign_adapter.attr,
&dev_attr_unassign_adapter.attr,
@@ -713,8 +1792,10 @@ static struct attribute *vfio_ap_mdev_attrs[] = {
&dev_attr_unassign_domain.attr,
&dev_attr_assign_control_domain.attr,
&dev_attr_unassign_control_domain.attr,
+ &dev_attr_ap_config.attr,
&dev_attr_control_domains.attr,
&dev_attr_matrix.attr,
+ &dev_attr_guest_matrix.attr,
NULL,
};
@@ -728,15 +1809,13 @@ static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
};
/**
- * vfio_ap_mdev_set_kvm
+ * vfio_ap_mdev_set_kvm - sets all data for @matrix_mdev that are needed
+ * to manage AP resources for the guest whose state is represented by @kvm
*
* @matrix_mdev: a mediated matrix device
* @kvm: reference to KVM instance
*
- * Verifies no other mediated matrix device has @kvm and sets a reference to
- * it in @matrix_mdev->kvm.
- *
- * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * Return: 0 if no other mediated matrix device has a reference to @kvm;
* otherwise, returns an -EPERM.
*/
static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
@@ -744,135 +1823,279 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
{
struct ap_matrix_mdev *m;
- mutex_lock(&matrix_dev->lock);
+ if (kvm->arch.crypto.crycbd) {
+ down_write(&kvm->arch.crypto.pqap_hook_rwsem);
+ kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook;
+ up_write(&kvm->arch.crypto.pqap_hook_rwsem);
+
+ get_update_locks_for_kvm(kvm);
- list_for_each_entry(m, &matrix_dev->mdev_list, node) {
- if ((m != matrix_mdev) && (m->kvm == kvm)) {
- mutex_unlock(&matrix_dev->lock);
- return -EPERM;
+ list_for_each_entry(m, &matrix_dev->mdev_list, node) {
+ if (m != matrix_mdev && m->kvm == kvm) {
+ release_update_locks_for_kvm(kvm);
+ return -EPERM;
+ }
}
- }
- matrix_mdev->kvm = kvm;
- mutex_unlock(&matrix_dev->lock);
+ kvm_get_kvm(kvm);
+ matrix_mdev->kvm = kvm;
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ release_update_locks_for_kvm(kvm);
+ }
return 0;
}
-static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
+static void unmap_iova(struct ap_matrix_mdev *matrix_mdev, u64 iova, u64 length)
{
- int ret;
- struct ap_matrix_mdev *matrix_mdev;
+ struct ap_queue_table *qtable = &matrix_mdev->qtable;
+ struct vfio_ap_queue *q;
+ int loop_cursor;
+
+ hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
+ if (q->saved_iova >= iova && q->saved_iova < iova + length)
+ vfio_ap_irq_disable(q);
+ }
+}
+
+static void vfio_ap_mdev_dma_unmap(struct vfio_device *vdev, u64 iova,
+ u64 length)
+{
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ unmap_iova(matrix_mdev, iova, length);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+}
- if (action != VFIO_GROUP_NOTIFY_SET_KVM)
- return NOTIFY_OK;
+/**
+ * vfio_ap_mdev_unset_kvm - performs clean-up of resources no longer needed
+ * by @matrix_mdev.
+ *
+ * @matrix_mdev: a matrix mediated device
+ */
+static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct kvm *kvm = matrix_mdev->kvm;
+
+ if (kvm && kvm->arch.crypto.crycbd) {
+ down_write(&kvm->arch.crypto.pqap_hook_rwsem);
+ kvm->arch.crypto.pqap_hook = NULL;
+ up_write(&kvm->arch.crypto.pqap_hook_rwsem);
- matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+ get_update_locks_for_kvm(kvm);
- if (!data) {
+ kvm_arch_crypto_clear_masks(kvm);
+ vfio_ap_mdev_reset_queues(matrix_mdev);
+ kvm_put_kvm(kvm);
matrix_mdev->kvm = NULL;
- return NOTIFY_OK;
+
+ release_update_locks_for_kvm(kvm);
}
+}
- ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
- if (ret)
- return NOTIFY_DONE;
+static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
+{
+ struct ap_queue *queue;
+ struct vfio_ap_queue *q = NULL;
+
+ queue = ap_get_qdev(apqn);
+ if (!queue)
+ return NULL;
- /* If there is no CRYCB pointer, then we can't copy the masks */
- if (!matrix_mdev->kvm->arch.crypto.crycbd)
- return NOTIFY_DONE;
+ if (queue->ap_dev.device.driver == &matrix_dev->vfio_ap_drv->driver)
+ q = dev_get_drvdata(&queue->ap_dev.device);
- kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
- matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.adm);
+ put_device(&queue->ap_dev.device);
- return NOTIFY_OK;
+ return q;
}
-static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
- unsigned int retry)
+static int apq_status_check(int apqn, struct ap_queue_status *status)
{
- struct ap_queue_status status;
+ switch (status->response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ return 0;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ return -EBUSY;
+ case AP_RESPONSE_ASSOC_SECRET_NOT_UNIQUE:
+ case AP_RESPONSE_ASSOC_FAILED:
+ /*
+ * These asynchronous response codes indicate a PQAP(AAPQ)
+ * instruction to associate a secret with the guest failed. All
+ * subsequent AP instructions will end with the asynchronous
+ * response code until the AP queue is reset; so, let's return
+ * a value indicating a reset needs to be performed again.
+ */
+ return -EAGAIN;
+ default:
+ WARN(true,
+ "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n",
+ AP_QID_CARD(apqn), AP_QID_QUEUE(apqn),
+ status->response_code);
+ return -EIO;
+ }
+}
- do {
- status = ap_zapq(AP_MKQID(apid, apqi));
- switch (status.response_code) {
- case AP_RESPONSE_NORMAL:
- return 0;
- case AP_RESPONSE_RESET_IN_PROGRESS:
- case AP_RESPONSE_BUSY:
- msleep(20);
+#define WAIT_MSG "Waited %dms for reset of queue %02x.%04x (%u, %u, %u)"
+
+static void apq_reset_check(struct work_struct *reset_work)
+{
+ int ret = -EBUSY, elapsed = 0;
+ struct ap_queue_status status;
+ struct vfio_ap_queue *q;
+
+ q = container_of(reset_work, struct vfio_ap_queue, reset_work);
+ memcpy(&status, &q->reset_status, sizeof(status));
+ while (true) {
+ msleep(AP_RESET_INTERVAL);
+ elapsed += AP_RESET_INTERVAL;
+ status = ap_tapq(q->apqn, NULL);
+ ret = apq_status_check(q->apqn, &status);
+ if (ret == -EIO)
+ return;
+ if (ret == -EBUSY) {
+ pr_notice_ratelimited(WAIT_MSG, elapsed,
+ AP_QID_CARD(q->apqn),
+ AP_QID_QUEUE(q->apqn),
+ status.response_code,
+ status.queue_empty,
+ status.irq_enabled);
+ } else {
+ if (q->reset_status.response_code == AP_RESPONSE_RESET_IN_PROGRESS ||
+ q->reset_status.response_code == AP_RESPONSE_BUSY ||
+ q->reset_status.response_code == AP_RESPONSE_STATE_CHANGE_IN_PROGRESS ||
+ ret == -EAGAIN) {
+ status = ap_zapq(q->apqn, 0);
+ memcpy(&q->reset_status, &status, sizeof(status));
+ continue;
+ }
+ if (q->saved_isc != VFIO_AP_ISC_INVALID)
+ vfio_ap_free_aqic_resources(q);
break;
- default:
- /* things are really broken, give up */
- return -EIO;
}
- } while (retry--);
+ }
+}
+
+static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
+{
+ struct ap_queue_status status;
- return -EBUSY;
+ if (!q)
+ return;
+ status = ap_zapq(q->apqn, 0);
+ memcpy(&q->reset_status, &status, sizeof(status));
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
+ /*
+ * Let's verify whether the ZAPQ completed successfully on a work queue.
+ */
+ queue_work(system_long_wq, &q->reset_work);
+ break;
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ vfio_ap_free_aqic_resources(q);
+ break;
+ default:
+ WARN(true,
+ "PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
+ status.response_code);
+ }
}
-static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
+static int vfio_ap_mdev_reset_queues(struct ap_matrix_mdev *matrix_mdev)
{
- int ret;
- int rc = 0;
- unsigned long apid, apqi;
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ int ret = 0, loop_cursor;
+ struct vfio_ap_queue *q;
- for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
- matrix_mdev->matrix.apm_max + 1) {
- for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
- matrix_mdev->matrix.aqm_max + 1) {
- ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
- /*
- * Regardless whether a queue turns out to be busy, or
- * is not operational, we need to continue resetting
- * the remaining queues.
- */
- if (ret)
- rc = ret;
- }
+ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode)
+ vfio_ap_mdev_reset_queue(q);
+
+ hash_for_each(matrix_mdev->qtable.queues, loop_cursor, q, mdev_qnode) {
+ flush_work(&q->reset_work);
+
+ if (q->reset_status.response_code)
+ ret = -EIO;
}
- return rc;
+ return ret;
}
-static int vfio_ap_mdev_open(struct mdev_device *mdev)
+static int vfio_ap_mdev_reset_qlist(struct list_head *qlist)
{
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
- unsigned long events;
- int ret;
+ int ret = 0;
+ struct vfio_ap_queue *q;
+ list_for_each_entry(q, qlist, reset_qnode)
+ vfio_ap_mdev_reset_queue(q);
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
+ list_for_each_entry(q, qlist, reset_qnode) {
+ flush_work(&q->reset_work);
- matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
- events = VFIO_GROUP_NOTIFY_SET_KVM;
-
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &events, &matrix_mdev->group_notifier);
- if (ret) {
- module_put(THIS_MODULE);
- return ret;
+ if (q->reset_status.response_code)
+ ret = -EIO;
}
- return 0;
+ return ret;
+}
+
+static int vfio_ap_mdev_open_device(struct vfio_device *vdev)
+{
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
+
+ if (!vdev->kvm)
+ return -EINVAL;
+
+ return vfio_ap_mdev_set_kvm(matrix_mdev, vdev->kvm);
}
-static void vfio_ap_mdev_release(struct mdev_device *mdev)
+static void vfio_ap_mdev_close_device(struct vfio_device *vdev)
{
- struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
+
+ vfio_ap_mdev_unset_kvm(matrix_mdev);
+}
+
+static void vfio_ap_mdev_request(struct vfio_device *vdev, unsigned int count)
+{
+ struct device *dev = vdev->dev;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ matrix_mdev = container_of(vdev, struct ap_matrix_mdev, vdev);
- if (matrix_mdev->kvm)
+ get_update_locks_for_mdev(matrix_mdev);
+
+ if (matrix_mdev->kvm) {
kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+ signal_guest_ap_cfg_changed(matrix_mdev);
+ }
- vfio_ap_mdev_reset_queues(mdev);
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &matrix_mdev->group_notifier);
- matrix_mdev->kvm = NULL;
- module_put(THIS_MODULE);
+ if (matrix_mdev->req_trigger) {
+ if (!(count % 10))
+ dev_notice_ratelimited(dev,
+ "Relaying device request to user (#%u)\n",
+ count);
+
+ eventfd_signal(matrix_mdev->req_trigger);
+ } else if (count == 0) {
+ dev_notice(dev,
+ "No device request registered, blocked until released by user\n");
+ }
+
+ release_update_locks_for_mdev(matrix_mdev);
}
static int vfio_ap_mdev_get_device_info(unsigned long arg)
@@ -890,50 +2113,757 @@ static int vfio_ap_mdev_get_device_info(unsigned long arg)
info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
info.num_regions = 0;
- info.num_irqs = 0;
+ info.num_irqs = VFIO_AP_NUM_IRQS;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
}
-static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+static ssize_t vfio_ap_get_irq_info(unsigned long arg)
+{
+ unsigned long minsz;
+ struct vfio_irq_info info;
+
+ minsz = offsetofend(struct vfio_irq_info, count);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz || info.index >= VFIO_AP_NUM_IRQS)
+ return -EINVAL;
+
+ switch (info.index) {
+ case VFIO_AP_REQ_IRQ_INDEX:
+ info.count = 1;
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+ break;
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ info.count = 1;
+ info.flags = VFIO_IRQ_INFO_EVENTFD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return copy_to_user((void __user *)arg, &info, minsz) ? -EFAULT : 0;
+}
+
+static int vfio_ap_irq_set_init(struct vfio_irq_set *irq_set, unsigned long arg)
+{
+ int ret;
+ size_t data_size;
+ unsigned long minsz;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+
+ if (copy_from_user(irq_set, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ ret = vfio_set_irqs_validate_and_prepare(irq_set, 1, VFIO_AP_NUM_IRQS,
+ &data_size);
+ if (ret)
+ return ret;
+
+ if (!(irq_set->flags & VFIO_IRQ_SET_ACTION_TRIGGER))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vfio_ap_set_request_irq(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long arg)
+{
+ s32 fd;
+ void __user *data;
+ unsigned long minsz;
+ struct eventfd_ctx *req_trigger;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+ data = (void __user *)(arg + minsz);
+
+ if (get_user(fd, (s32 __user *)data))
+ return -EFAULT;
+
+ if (fd == -1) {
+ if (matrix_mdev->req_trigger)
+ eventfd_ctx_put(matrix_mdev->req_trigger);
+ matrix_mdev->req_trigger = NULL;
+ } else if (fd >= 0) {
+ req_trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(req_trigger))
+ return PTR_ERR(req_trigger);
+
+ if (matrix_mdev->req_trigger)
+ eventfd_ctx_put(matrix_mdev->req_trigger);
+
+ matrix_mdev->req_trigger = req_trigger;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vfio_ap_set_cfg_change_irq(struct ap_matrix_mdev *matrix_mdev, unsigned long arg)
+{
+ s32 fd;
+ void __user *data;
+ unsigned long minsz;
+ struct eventfd_ctx *cfg_chg_trigger;
+
+ minsz = offsetofend(struct vfio_irq_set, count);
+ data = (void __user *)(arg + minsz);
+
+ if (get_user(fd, (s32 __user *)data))
+ return -EFAULT;
+
+ if (fd == -1) {
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_ctx_put(matrix_mdev->cfg_chg_trigger);
+ matrix_mdev->cfg_chg_trigger = NULL;
+ } else if (fd >= 0) {
+ cfg_chg_trigger = eventfd_ctx_fdget(fd);
+ if (IS_ERR(cfg_chg_trigger))
+ return PTR_ERR(cfg_chg_trigger);
+
+ if (matrix_mdev->cfg_chg_trigger)
+ eventfd_ctx_put(matrix_mdev->cfg_chg_trigger);
+
+ matrix_mdev->cfg_chg_trigger = cfg_chg_trigger;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vfio_ap_set_irqs(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long arg)
+{
+ int ret;
+ struct vfio_irq_set irq_set;
+
+ ret = vfio_ap_irq_set_init(&irq_set, arg);
+ if (ret)
+ return ret;
+
+ switch (irq_set.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ switch (irq_set.index) {
+ case VFIO_AP_REQ_IRQ_INDEX:
+ return vfio_ap_set_request_irq(matrix_mdev, arg);
+ case VFIO_AP_CFG_CHG_IRQ_INDEX:
+ return vfio_ap_set_cfg_change_irq(matrix_mdev, arg);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t vfio_ap_mdev_ioctl(struct vfio_device *vdev,
unsigned int cmd, unsigned long arg)
{
+ struct ap_matrix_mdev *matrix_mdev =
+ container_of(vdev, struct ap_matrix_mdev, vdev);
int ret;
+ mutex_lock(&matrix_dev->mdevs_lock);
switch (cmd) {
case VFIO_DEVICE_GET_INFO:
ret = vfio_ap_mdev_get_device_info(arg);
break;
case VFIO_DEVICE_RESET:
- ret = vfio_ap_mdev_reset_queues(mdev);
+ ret = vfio_ap_mdev_reset_queues(matrix_mdev);
+ break;
+ case VFIO_DEVICE_GET_IRQ_INFO:
+ ret = vfio_ap_get_irq_info(arg);
+ break;
+ case VFIO_DEVICE_SET_IRQS:
+ ret = vfio_ap_set_irqs(matrix_mdev, arg);
break;
default:
ret = -EOPNOTSUPP;
break;
}
+ mutex_unlock(&matrix_dev->mdevs_lock);
return ret;
}
-static const struct mdev_parent_ops vfio_ap_matrix_ops = {
- .owner = THIS_MODULE,
- .supported_type_groups = vfio_ap_mdev_type_groups,
- .mdev_attr_groups = vfio_ap_mdev_attr_groups,
- .create = vfio_ap_mdev_create,
- .remove = vfio_ap_mdev_remove,
- .open = vfio_ap_mdev_open,
- .release = vfio_ap_mdev_release,
- .ioctl = vfio_ap_mdev_ioctl,
+static struct ap_matrix_mdev *vfio_ap_mdev_for_queue(struct vfio_ap_queue *q)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ unsigned long apid = AP_QID_CARD(q->apqn);
+ unsigned long apqi = AP_QID_QUEUE(q->apqn);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (test_bit_inv(apid, matrix_mdev->matrix.apm) &&
+ test_bit_inv(apqi, matrix_mdev->matrix.aqm))
+ return matrix_mdev;
+ }
+
+ return NULL;
+}
+
+static ssize_t status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t nchars = 0;
+ struct vfio_ap_queue *q;
+ unsigned long apid, apqi;
+ struct ap_matrix_mdev *matrix_mdev;
+ struct ap_device *apdev = to_ap_dev(dev);
+
+ mutex_lock(&matrix_dev->mdevs_lock);
+ q = dev_get_drvdata(&apdev->device);
+ matrix_mdev = vfio_ap_mdev_for_queue(q);
+
+ /* If the queue is assigned to the matrix mediated device, then
+ * determine whether it is passed through to a guest; otherwise,
+ * indicate that it is unassigned.
+ */
+ if (matrix_mdev) {
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+ /*
+ * If the queue is passed through to the guest, then indicate
+ * that it is in use; otherwise, indicate that it is
+ * merely assigned to a matrix mediated device.
+ */
+ if (matrix_mdev->kvm &&
+ test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm))
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_IN_USE);
+ else
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_ASSIGNED);
+ } else {
+ nchars = sysfs_emit(buf, "%s\n", AP_QUEUE_UNASSIGNED);
+ }
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+
+ return nchars;
+}
+
+static DEVICE_ATTR_RO(status);
+
+static struct attribute *vfio_queue_attrs[] = {
+ &dev_attr_status.attr,
+ NULL,
+};
+
+static const struct attribute_group vfio_queue_attr_group = {
+ .attrs = vfio_queue_attrs,
+};
+
+static const struct vfio_device_ops vfio_ap_matrix_dev_ops = {
+ .init = vfio_ap_mdev_init_dev,
+ .open_device = vfio_ap_mdev_open_device,
+ .close_device = vfio_ap_mdev_close_device,
+ .ioctl = vfio_ap_mdev_ioctl,
+ .dma_unmap = vfio_ap_mdev_dma_unmap,
+ .bind_iommufd = vfio_iommufd_emulated_bind,
+ .unbind_iommufd = vfio_iommufd_emulated_unbind,
+ .attach_ioas = vfio_iommufd_emulated_attach_ioas,
+ .detach_ioas = vfio_iommufd_emulated_detach_ioas,
+ .request = vfio_ap_mdev_request
+};
+
+static struct mdev_driver vfio_ap_matrix_driver = {
+ .device_api = VFIO_DEVICE_API_AP_STRING,
+ .max_instances = MAX_ZDEV_ENTRIES_EXT,
+ .driver = {
+ .name = "vfio_ap_mdev",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ .dev_groups = vfio_ap_mdev_attr_groups,
+ },
+ .probe = vfio_ap_mdev_probe,
+ .remove = vfio_ap_mdev_remove,
};
int vfio_ap_mdev_register(void)
{
- atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
+ int ret;
+
+ ret = mdev_register_driver(&vfio_ap_matrix_driver);
+ if (ret)
+ return ret;
- return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+ matrix_dev->mdev_type.sysfs_name = VFIO_AP_MDEV_TYPE_HWVIRT;
+ matrix_dev->mdev_type.pretty_name = VFIO_AP_MDEV_NAME_HWVIRT;
+ matrix_dev->mdev_types = &matrix_dev->mdev_type;
+ ret = mdev_register_parent(&matrix_dev->parent, &matrix_dev->device,
+ &vfio_ap_matrix_driver,
+ &matrix_dev->mdev_types, 1);
+ if (ret)
+ goto err_driver;
+ return 0;
+
+err_driver:
+ mdev_unregister_driver(&vfio_ap_matrix_driver);
+ return ret;
}
void vfio_ap_mdev_unregister(void)
{
- mdev_unregister_device(&matrix_dev->device);
+ mdev_unregister_parent(&matrix_dev->parent);
+ mdev_unregister_driver(&vfio_ap_matrix_driver);
+}
+
+int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
+{
+ int ret;
+ struct vfio_ap_queue *q;
+ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ struct ap_matrix_mdev *matrix_mdev;
+
+ ret = sysfs_create_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ if (ret)
+ return ret;
+
+ q = kzalloc(sizeof(*q), GFP_KERNEL);
+ if (!q) {
+ ret = -ENOMEM;
+ goto err_remove_group;
+ }
+
+ q->apqn = to_ap_queue(&apdev->device)->qid;
+ q->saved_isc = VFIO_AP_ISC_INVALID;
+ memset(&q->reset_status, 0, sizeof(q->reset_status));
+ INIT_WORK(&q->reset_work, apq_reset_check);
+ matrix_mdev = get_update_locks_by_apqn(q->apqn);
+
+ if (matrix_mdev) {
+ vfio_ap_mdev_link_queue(matrix_mdev, q);
+
+ /*
+ * If we're in the process of handling the adding of adapters or
+ * domains to the host's AP configuration, then let the
+ * vfio_ap device driver's on_scan_complete callback filter the
+ * matrix and update the guest's AP configuration after all of
+ * the new queue devices are probed.
+ */
+ if (!bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) ||
+ !bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS))
+ goto done;
+
+ if (vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered)) {
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ reset_queues_for_apids(matrix_mdev, apm_filtered);
+ }
+ }
+
+done:
+ dev_set_drvdata(&apdev->device, q);
+ release_update_locks_for_mdev(matrix_mdev);
+
+ return ret;
+
+err_remove_group:
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ return ret;
+}
+
+void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
+{
+ unsigned long apid, apqi;
+ struct vfio_ap_queue *q;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ q = dev_get_drvdata(&apdev->device);
+ get_update_locks_for_queue(q);
+ matrix_mdev = q->matrix_mdev;
+ apid = AP_QID_CARD(q->apqn);
+ apqi = AP_QID_QUEUE(q->apqn);
+
+ if (matrix_mdev) {
+ /* If the queue is assigned to the guest's AP configuration */
+ if (test_bit_inv(apid, matrix_mdev->shadow_apcb.apm) &&
+ test_bit_inv(apqi, matrix_mdev->shadow_apcb.aqm)) {
+ /*
+ * Since the queues are defined via a matrix of adapters
+ * and domains, it is not possible to hot unplug a
+ * single queue; so, let's unplug the adapter.
+ */
+ clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+ reset_queues_for_apid(matrix_mdev, apid);
+ goto done;
+ }
+ }
+
+ /*
+ * If the queue is not in the host's AP configuration, then resetting
+ * it will fail with response code 01, (APQN not valid); so, let's make
+ * sure it is in the host's config.
+ */
+ if (test_bit_inv(apid, (unsigned long *)matrix_dev->info.apm) &&
+ test_bit_inv(apqi, (unsigned long *)matrix_dev->info.aqm)) {
+ vfio_ap_mdev_reset_queue(q);
+ flush_work(&q->reset_work);
+ }
+
+done:
+ if (matrix_mdev)
+ vfio_ap_unlink_queue_fr_mdev(q);
+
+ dev_set_drvdata(&apdev->device, NULL);
+ kfree(q);
+ release_update_locks_for_mdev(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_resource_in_use: check whether any of a set of APQNs is
+ * assigned to a mediated device under the control
+ * of the vfio_ap device driver.
+ *
+ * @apm: a bitmap specifying a set of APIDs comprising the APQNs to check.
+ * @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check.
+ *
+ * Return:
+ * * -EADDRINUSE if one or more of the APQNs specified via @apm/@aqm are
+ * assigned to a mediated device under the control of the vfio_ap
+ * device driver.
+ * * Otherwise, return 0.
+ */
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm)
+{
+ int ret;
+
+ mutex_lock(&matrix_dev->guests_lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+ ret = vfio_ap_mdev_verify_no_sharing(NULL, apm, aqm);
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_dev->guests_lock);
+
+ return ret;
+}
+
+/**
+ * vfio_ap_mdev_hot_unplug_cfg - hot unplug the adapters, domains and control
+ * domains that have been removed from the host's
+ * AP configuration from a guest.
+ *
+ * @matrix_mdev: an ap_matrix_mdev object attached to a KVM guest.
+ * @aprem: the adapters that have been removed from the host's AP configuration
+ * @aqrem: the domains that have been removed from the host's AP configuration
+ * @cdrem: the control domains that have been removed from the host's AP
+ * configuration.
+ */
+static void vfio_ap_mdev_hot_unplug_cfg(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long *aprem,
+ unsigned long *aqrem,
+ unsigned long *cdrem)
+{
+ int do_hotplug = 0;
+
+ if (!bitmap_empty(aprem, AP_DEVICES)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.apm,
+ matrix_mdev->shadow_apcb.apm,
+ aprem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(aqrem, AP_DOMAINS)) {
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.aqm,
+ matrix_mdev->shadow_apcb.aqm,
+ aqrem, AP_DEVICES);
+ }
+
+ if (!bitmap_empty(cdrem, AP_DOMAINS))
+ do_hotplug |= bitmap_andnot(matrix_mdev->shadow_apcb.adm,
+ matrix_mdev->shadow_apcb.adm,
+ cdrem, AP_DOMAINS);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+}
+
+/**
+ * vfio_ap_mdev_cfg_remove - determines which guests are using the adapters,
+ * domains and control domains that have been removed
+ * from the host AP configuration and unplugs them
+ * from those guests.
+ *
+ * @ap_remove: bitmap specifying which adapters have been removed from the host
+ * config.
+ * @aq_remove: bitmap specifying which domains have been removed from the host
+ * config.
+ * @cd_remove: bitmap specifying which control domains have been removed from
+ * the host config.
+ */
+static void vfio_ap_mdev_cfg_remove(unsigned long *ap_remove,
+ unsigned long *aq_remove,
+ unsigned long *cd_remove)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+ int do_remove = 0;
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ do_remove |= bitmap_and(aprem, ap_remove,
+ matrix_mdev->matrix.apm,
+ AP_DEVICES);
+ do_remove |= bitmap_and(aqrem, aq_remove,
+ matrix_mdev->matrix.aqm,
+ AP_DOMAINS);
+ do_remove |= bitmap_andnot(cdrem, cd_remove,
+ matrix_mdev->matrix.adm,
+ AP_DOMAINS);
+
+ if (do_remove)
+ vfio_ap_mdev_hot_unplug_cfg(matrix_mdev, aprem, aqrem,
+ cdrem);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_remove - responds to the removal of adapters, domains and
+ * control domains from the host AP configuration
+ * by unplugging them from the guests that are
+ * using them.
+ * @cur_config_info: the current host AP configuration information
+ * @prev_config_info: the previous host AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_remove(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ int do_remove;
+ DECLARE_BITMAP(aprem, AP_DEVICES);
+ DECLARE_BITMAP(aqrem, AP_DOMAINS);
+ DECLARE_BITMAP(cdrem, AP_DOMAINS);
+
+ do_remove = bitmap_andnot(aprem,
+ (unsigned long *)prev_config_info->apm,
+ (unsigned long *)cur_config_info->apm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(aqrem,
+ (unsigned long *)prev_config_info->aqm,
+ (unsigned long *)cur_config_info->aqm,
+ AP_DEVICES);
+ do_remove |= bitmap_andnot(cdrem,
+ (unsigned long *)prev_config_info->adm,
+ (unsigned long *)cur_config_info->adm,
+ AP_DEVICES);
+
+ if (do_remove)
+ vfio_ap_mdev_cfg_remove(aprem, aqrem, cdrem);
+}
+
+/**
+ * vfio_ap_filter_apid_by_qtype: filter APIDs from an AP mask for adapters that
+ * are older than AP type 10 (CEX4).
+ * @apm: a bitmap of the APIDs to examine
+ * @aqm: a bitmap of the APQIs of the queues to query for the AP type.
+ */
+static void vfio_ap_filter_apid_by_qtype(unsigned long *apm, unsigned long *aqm)
+{
+ bool apid_cleared;
+ struct ap_queue_status status;
+ unsigned long apid, apqi;
+ struct ap_tapq_hwinfo info;
+
+ for_each_set_bit_inv(apid, apm, AP_DEVICES) {
+ apid_cleared = false;
+
+ for_each_set_bit_inv(apqi, aqm, AP_DOMAINS) {
+ status = ap_test_queue(AP_MKQID(apid, apqi), 1, &info);
+ switch (status.response_code) {
+ /*
+ * According to the architecture in each case
+ * below, the queue's info should be filled.
+ */
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ case AP_RESPONSE_BUSY:
+ /*
+ * The vfio_ap device driver only
+ * supports CEX4 and newer adapters, so
+ * remove the APID if the adapter is
+ * older than a CEX4.
+ */
+ if (info.at < AP_DEVICE_TYPE_CEX4) {
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ }
+
+ break;
+
+ default:
+ /*
+ * If we don't know the adapter type,
+ * clear its APID since it can't be
+ * determined whether the vfio_ap
+ * device driver supports it.
+ */
+ clear_bit_inv(apid, apm);
+ apid_cleared = true;
+ break;
+ }
+
+ /*
+ * If we've already cleared the APID from the apm, there
+ * is no need to continue examining the remainin AP
+ * queues to determine the type of the adapter.
+ */
+ if (apid_cleared)
+ continue;
+ }
+ }
+}
+
+/**
+ * vfio_ap_mdev_cfg_add - store bitmaps specifying the adapters, domains and
+ * control domains that have been added to the host's
+ * AP configuration for each matrix mdev to which they
+ * are assigned.
+ *
+ * @apm_add: a bitmap specifying the adapters that have been added to the AP
+ * configuration.
+ * @aqm_add: a bitmap specifying the domains that have been added to the AP
+ * configuration.
+ * @adm_add: a bitmap specifying the control domains that have been added to the
+ * AP configuration.
+ */
+static void vfio_ap_mdev_cfg_add(unsigned long *apm_add, unsigned long *aqm_add,
+ unsigned long *adm_add)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (list_empty(&matrix_dev->mdev_list))
+ return;
+
+ vfio_ap_filter_apid_by_qtype(apm_add, aqm_add);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ bitmap_and(matrix_mdev->apm_add,
+ matrix_mdev->matrix.apm, apm_add, AP_DEVICES);
+ bitmap_and(matrix_mdev->aqm_add,
+ matrix_mdev->matrix.aqm, aqm_add, AP_DOMAINS);
+ bitmap_and(matrix_mdev->adm_add,
+ matrix_mdev->matrix.adm, adm_add, AP_DEVICES);
+ }
+}
+
+/**
+ * vfio_ap_mdev_on_cfg_add - responds to the addition of adapters, domains and
+ * control domains to the host AP configuration
+ * by updating the bitmaps that specify what adapters,
+ * domains and control domains have been added so they
+ * can be hot plugged into the guest when the AP bus
+ * scan completes (see vfio_ap_on_scan_complete
+ * function).
+ * @cur_config_info: the current AP configuration information
+ * @prev_config_info: the previous AP configuration information
+ */
+static void vfio_ap_mdev_on_cfg_add(struct ap_config_info *cur_config_info,
+ struct ap_config_info *prev_config_info)
+{
+ bool do_add;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
+
+ do_add = bitmap_andnot(apm_add,
+ (unsigned long *)cur_config_info->apm,
+ (unsigned long *)prev_config_info->apm,
+ AP_DEVICES);
+ do_add |= bitmap_andnot(aqm_add,
+ (unsigned long *)cur_config_info->aqm,
+ (unsigned long *)prev_config_info->aqm,
+ AP_DOMAINS);
+ do_add |= bitmap_andnot(adm_add,
+ (unsigned long *)cur_config_info->adm,
+ (unsigned long *)prev_config_info->adm,
+ AP_DOMAINS);
+
+ if (do_add)
+ vfio_ap_mdev_cfg_add(apm_add, aqm_add, adm_add);
+}
+
+/**
+ * vfio_ap_on_cfg_changed - handles notification of changes to the host AP
+ * configuration.
+ *
+ * @cur_cfg_info: the current host AP configuration
+ * @prev_cfg_info: the previous host AP configuration
+ */
+void vfio_ap_on_cfg_changed(struct ap_config_info *cur_cfg_info,
+ struct ap_config_info *prev_cfg_info)
+{
+ if (!cur_cfg_info || !prev_cfg_info)
+ return;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ vfio_ap_mdev_on_cfg_remove(cur_cfg_info, prev_cfg_info);
+ vfio_ap_mdev_on_cfg_add(cur_cfg_info, prev_cfg_info);
+ memcpy(&matrix_dev->info, cur_cfg_info, sizeof(*cur_cfg_info));
+
+ mutex_unlock(&matrix_dev->guests_lock);
+}
+
+static void vfio_ap_mdev_hot_plug_cfg(struct ap_matrix_mdev *matrix_mdev)
+{
+ DECLARE_BITMAP(apm_filtered, AP_DEVICES);
+ bool filter_domains, filter_adapters, filter_cdoms, do_hotplug = false;
+
+ mutex_lock(&matrix_mdev->kvm->lock);
+ mutex_lock(&matrix_dev->mdevs_lock);
+
+ filter_adapters = bitmap_intersects(matrix_mdev->matrix.apm,
+ matrix_mdev->apm_add, AP_DEVICES);
+ filter_domains = bitmap_intersects(matrix_mdev->matrix.aqm,
+ matrix_mdev->aqm_add, AP_DOMAINS);
+ filter_cdoms = bitmap_intersects(matrix_mdev->matrix.adm,
+ matrix_mdev->adm_add, AP_DOMAINS);
+
+ if (filter_adapters || filter_domains)
+ do_hotplug = vfio_ap_mdev_filter_matrix(matrix_mdev, apm_filtered);
+
+ if (filter_cdoms)
+ do_hotplug |= vfio_ap_mdev_filter_cdoms(matrix_mdev);
+
+ if (do_hotplug)
+ vfio_ap_mdev_update_guest_apcb(matrix_mdev);
+
+ reset_queues_for_apids(matrix_mdev, apm_filtered);
+
+ mutex_unlock(&matrix_dev->mdevs_lock);
+ mutex_unlock(&matrix_mdev->kvm->lock);
+}
+
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ mutex_lock(&matrix_dev->guests_lock);
+
+ list_for_each_entry(matrix_mdev, &matrix_dev->mdev_list, node) {
+ if (bitmap_empty(matrix_mdev->apm_add, AP_DEVICES) &&
+ bitmap_empty(matrix_mdev->aqm_add, AP_DOMAINS) &&
+ bitmap_empty(matrix_mdev->adm_add, AP_DOMAINS))
+ continue;
+
+ vfio_ap_mdev_hot_plug_cfg(matrix_mdev);
+ bitmap_clear(matrix_mdev->apm_add, 0, AP_DEVICES);
+ bitmap_clear(matrix_mdev->aqm_add, 0, AP_DOMAINS);
+ bitmap_clear(matrix_mdev->adm_add, 0, AP_DOMAINS);
+ }
+
+ mutex_unlock(&matrix_dev->guests_lock);
}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
index 5675492233c7..9bff666b0b35 100644
--- a/drivers/s390/crypto/vfio_ap_private.h
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -4,6 +4,7 @@
*
* Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
* Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
*
* Copyright IBM Corp. 2018
*/
@@ -12,10 +13,13 @@
#define _VFIO_AP_PRIVATE_H_
#include <linux/types.h>
-#include <linux/device.h>
#include <linux/mdev.h>
#include <linux/delay.h>
+#include <linux/eventfd.h>
#include <linux/mutex.h>
+#include <linux/kvm_host.h>
+#include <linux/vfio.h>
+#include <linux/hashtable.h>
#include "ap_bus.h"
@@ -23,66 +27,142 @@
#define VFIO_AP_DRV_NAME "vfio_ap"
/**
- * ap_matrix_dev - the AP matrix device structure
+ * struct ap_matrix_dev - Contains the data for the matrix device.
+ *
* @device: generic device structure associated with the AP matrix device
- * @available_instances: number of mediated matrix devices that can be created
* @info: the struct containing the output from the PQAP(QCI) instruction
- * mdev_list: the list of mediated matrix devices created
- * lock: mutex for locking the AP matrix device. This lock will be
+ * @mdev_list: the list of mediated matrix devices created
+ * @mdevs_lock: mutex for locking the AP matrix device. This lock will be
* taken every time we fiddle with state managed by the vfio_ap
* driver, be it using @mdev_list or writing the state of a
* single ap_matrix_mdev device. It's quite coarse but we don't
* expect much contention.
+ * @vfio_ap_drv: the vfio_ap device driver
+ * @guests_lock: mutex for controlling access to a guest that is using AP
+ * devices passed through by the vfio_ap device driver. This lock
+ * will be taken when the AP devices are plugged into or unplugged
+ * from a guest, and when an ap_matrix_mdev device is added to or
+ * removed from @mdev_list or the list is iterated.
*/
struct ap_matrix_dev {
struct device device;
- atomic_t available_instances;
struct ap_config_info info;
struct list_head mdev_list;
- struct mutex lock;
+ struct mutex mdevs_lock; /* serializes access to each ap_matrix_mdev */
+ struct ap_driver *vfio_ap_drv;
+ struct mutex guests_lock; /* serializes access to each KVM guest */
+ struct mdev_parent parent;
+ struct mdev_type mdev_type;
+ struct mdev_type *mdev_types;
};
extern struct ap_matrix_dev *matrix_dev;
/**
- * The AP matrix is comprised of three bit masks identifying the adapters,
- * queues (domains) and control domains that belong to an AP matrix. The bits i
- * each mask, from least significant to most significant bit, correspond to IDs
- * 0 to 255. When a bit is set, the corresponding ID belongs to the matrix.
+ * struct ap_matrix - matrix of adapters, domains and control domains
*
* @apm_max: max adapter number in @apm
- * @apm identifies the AP adapters in the matrix
+ * @apm: identifies the AP adapters in the matrix
* @aqm_max: max domain number in @aqm
- * @aqm identifies the AP queues (domains) in the matrix
+ * @aqm: identifies the AP queues (domains) in the matrix
* @adm_max: max domain number in @adm
- * @adm identifies the AP control domains in the matrix
+ * @adm: identifies the AP control domains in the matrix
+ *
+ * The AP matrix is comprised of three bit masks identifying the adapters,
+ * queues (domains) and control domains that belong to an AP matrix. The bits in
+ * each mask, from left to right, correspond to IDs 0 to 255. When a bit is set
+ * the corresponding ID belongs to the matrix.
*/
struct ap_matrix {
unsigned long apm_max;
- DECLARE_BITMAP(apm, 256);
+ DECLARE_BITMAP(apm, AP_DEVICES);
unsigned long aqm_max;
- DECLARE_BITMAP(aqm, 256);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
unsigned long adm_max;
- DECLARE_BITMAP(adm, 256);
+ DECLARE_BITMAP(adm, AP_DOMAINS);
+};
+
+/**
+ * struct ap_queue_table - a table of queue objects.
+ *
+ * @queues: a hashtable of queues (struct vfio_ap_queue).
+ */
+struct ap_queue_table {
+ DECLARE_HASHTABLE(queues, 8);
};
/**
- * struct ap_matrix_mdev - the mediated matrix device structure
- * @list: allows the ap_matrix_mdev struct to be added to a list
+ * struct ap_matrix_mdev - Contains the data associated with a matrix mediated
+ * device.
+ * @vdev: the vfio device
+ * @node: allows the ap_matrix_mdev struct to be added to a list
* @matrix: the adapters, usage domains and control domains assigned to the
* mediated matrix device.
- * @group_notifier: notifier block used for specifying callback function for
- * handling the VFIO_GROUP_NOTIFY_SET_KVM event
+ * @shadow_apcb: the shadow copy of the APCB field of the KVM guest's CRYCB
* @kvm: the struct holding guest's state
+ * @pqap_hook: the function pointer to the interception handler for the
+ * PQAP(AQIC) instruction.
+ * @mdev: the mediated device
+ * @qtable: table of queues (struct vfio_ap_queue) assigned to the mdev
+ * @req_trigger eventfd ctx for signaling userspace to return a device
+ * @cfg_chg_trigger eventfd ctx to signal AP config changed to userspace
+ * @apm_add: bitmap of APIDs added to the host's AP configuration
+ * @aqm_add: bitmap of APQIs added to the host's AP configuration
+ * @adm_add: bitmap of control domain numbers added to the host's AP
+ * configuration
*/
struct ap_matrix_mdev {
+ struct vfio_device vdev;
struct list_head node;
struct ap_matrix matrix;
- struct notifier_block group_notifier;
+ struct ap_matrix shadow_apcb;
struct kvm *kvm;
+ crypto_hook pqap_hook;
+ struct mdev_device *mdev;
+ struct ap_queue_table qtable;
+ struct eventfd_ctx *req_trigger;
+ struct eventfd_ctx *cfg_chg_trigger;
+ DECLARE_BITMAP(apm_add, AP_DEVICES);
+ DECLARE_BITMAP(aqm_add, AP_DOMAINS);
+ DECLARE_BITMAP(adm_add, AP_DOMAINS);
+};
+
+/**
+ * struct vfio_ap_queue - contains the data associated with a queue bound to the
+ * vfio_ap device driver
+ * @matrix_mdev: the matrix mediated device
+ * @saved_iova: the notification indicator byte (nib) address
+ * @apqn: the APQN of the AP queue device
+ * @saved_isc: the guest ISC registered with the GIB interface
+ * @mdev_qnode: allows the vfio_ap_queue struct to be added to a hashtable
+ * @reset_qnode: allows the vfio_ap_queue struct to be added to a list of queues
+ * that need to be reset
+ * @reset_status: the status from the last reset of the queue
+ * @reset_work: work to wait for queue reset to complete
+ */
+struct vfio_ap_queue {
+ struct ap_matrix_mdev *matrix_mdev;
+ dma_addr_t saved_iova;
+ int apqn;
+#define VFIO_AP_ISC_INVALID 0xff
+ unsigned char saved_isc;
+ struct hlist_node mdev_qnode;
+ struct list_head reset_qnode;
+ struct ap_queue_status reset_status;
+ struct work_struct reset_work;
};
-extern int vfio_ap_mdev_register(void);
-extern void vfio_ap_mdev_unregister(void);
+int vfio_ap_mdev_register(void);
+void vfio_ap_mdev_unregister(void);
+
+int vfio_ap_mdev_probe_queue(struct ap_device *queue);
+void vfio_ap_mdev_remove_queue(struct ap_device *queue);
+
+int vfio_ap_mdev_resource_in_use(unsigned long *apm, unsigned long *aqm);
+
+void vfio_ap_on_cfg_changed(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
+void vfio_ap_on_scan_complete(struct ap_config_info *new_config_info,
+ struct ap_config_info *old_config_info);
#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..7a3b99f065f2 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -12,12 +12,14 @@
* Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
*/
+#define pr_fmt(fmt) "zcrypt: " fmt
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -25,6 +27,7 @@
#include <linux/debugfs.h>
#include <linux/cdev.h>
#include <linux/ctype.h>
+#include <linux/capability.h>
#include <asm/debug.h>
#define CREATE_TRACE_POINTS
@@ -35,6 +38,8 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
/*
* Module description.
@@ -44,47 +49,37 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
"Copyright IBM Corp. 2001, 2012");
MODULE_LICENSE("GPL");
+unsigned int zcrypt_mempool_threshold = 5;
+module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440);
+MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)");
+
/*
* zcrypt tracepoint functions
*/
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
-static int zcrypt_hwrng_seed = 1;
-module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440);
-MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
-
DEFINE_SPINLOCK(zcrypt_list_lock);
LIST_HEAD(zcrypt_card_list);
-int zcrypt_device_count;
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
-static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
-
-atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
-EXPORT_SYMBOL(zcrypt_rescan_req);
static LIST_HEAD(zcrypt_ops_list);
/* Zcrypt related debug feature stuff. */
debug_info_t *zcrypt_dbf_info;
-/**
+/*
* Process a rescan of the transport layer.
- *
- * Returns 1, if the rescan has been processed, otherwise 0.
+ * Runs a synchronous AP bus rescan.
+ * Returns true if something has changed (for example the
+ * bus scan has found and build up new devices) and it is
+ * worth to do a retry. Otherwise false is returned meaning
+ * no changes on the AP bus level.
*/
-static inline int zcrypt_process_rescan(void)
+static inline bool zcrypt_process_rescan(void)
{
- if (atomic_read(&zcrypt_rescan_req)) {
- atomic_set(&zcrypt_rescan_req, 0);
- atomic_inc(&zcrypt_rescan_count);
- ap_bus_force_rescan();
- ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n",
- atomic_inc_return(&zcrypt_rescan_count));
- return 1;
- }
- return 0;
+ return ap_bus_force_rescan();
}
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
@@ -102,7 +97,7 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
struct zcrypt_ops *zops;
list_for_each_entry(zops, &zcrypt_ops_list, list)
- if ((zops->variant == variant) &&
+ if (zops->variant == variant &&
(!strncmp(zops->name, name, sizeof(zops->name))))
return zops;
return NULL;
@@ -113,11 +108,13 @@ EXPORT_SYMBOL(zcrypt_msgtype);
* Multi device nodes extension functions.
*/
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
-
struct zcdn_device;
-static struct class *zcrypt_class;
+static void zcdn_device_release(struct device *dev);
+static const struct class zcrypt_class = {
+ .name = ZCRYPT_NAME,
+ .dev_release = zcdn_device_release,
+};
static dev_t zcrypt_devt;
static struct cdev zcrypt_cdev;
@@ -133,18 +130,6 @@ struct zcdn_device {
static int zcdn_create(const char *name);
static int zcdn_destroy(const char *name);
-/* helper function, matches the name for find_zcdndev_by_name() */
-static int __match_zcdn_name(struct device *dev, const void *data)
-{
- return strcmp(dev_name(dev), (const char *)data) == 0;
-}
-
-/* helper function, matches the devt value for find_zcdndev_by_devt() */
-static int __match_zcdn_devt(struct device *dev, const void *data)
-{
- return dev->devt == *((dev_t *) data);
-}
-
/*
* Find zcdn device by name.
* Returns reference to the zcdn device which needs to be released
@@ -152,10 +137,7 @@ static int __match_zcdn_devt(struct device *dev, const void *data)
*/
static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
{
- struct device *dev =
- class_find_device(zcrypt_class, NULL,
- (void *) name,
- __match_zcdn_name);
+ struct device *dev = class_find_device_by_name(&zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -167,10 +149,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
*/
static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{
- struct device *dev =
- class_find_device(zcrypt_class, NULL,
- (void *) &devt,
- __match_zcdn_devt);
+ struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -179,25 +158,20 @@ static ssize_t ioctlmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int i, rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
- buf[0] = '0';
- buf[1] = 'x';
+ n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
- snprintf(buf + 2 + 2 * i * sizeof(long),
- PAGE_SIZE - 2 - 2 * i * sizeof(long),
- "%016lx", zcdndev->perms.ioctlm[i]);
- buf[2 + 2 * i * sizeof(long)] = '\n';
- buf[2 + 2 * i * sizeof(long) + 1] = '\0';
- rc = 2 + 2 * i * sizeof(long) + 1;
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
- return rc;
+ return n;
}
static ssize_t ioctlmask_store(struct device *dev,
@@ -208,7 +182,7 @@ static ssize_t ioctlmask_store(struct device *dev,
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
- AP_IOCTLS, &ap_perms_mutex);
+ AP_IOCTLS, &ap_attr_mutex);
if (rc)
return rc;
@@ -221,25 +195,20 @@ static ssize_t apmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int i, rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
- buf[0] = '0';
- buf[1] = 'x';
+ n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
- snprintf(buf + 2 + 2 * i * sizeof(long),
- PAGE_SIZE - 2 - 2 * i * sizeof(long),
- "%016lx", zcdndev->perms.apm[i]);
- buf[2 + 2 * i * sizeof(long)] = '\n';
- buf[2 + 2 * i * sizeof(long) + 1] = '\0';
- rc = 2 + 2 * i * sizeof(long) + 1;
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
- return rc;
+ return n;
}
static ssize_t apmask_store(struct device *dev,
@@ -250,7 +219,7 @@ static ssize_t apmask_store(struct device *dev,
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
- AP_DEVICES, &ap_perms_mutex);
+ AP_DEVICES, &ap_attr_mutex);
if (rc)
return rc;
@@ -263,25 +232,20 @@ static ssize_t aqmask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int i, rc;
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
- buf[0] = '0';
- buf[1] = 'x';
+ n = sysfs_emit(buf, "0x");
for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
- snprintf(buf + 2 + 2 * i * sizeof(long),
- PAGE_SIZE - 2 - 2 * i * sizeof(long),
- "%016lx", zcdndev->perms.aqm[i]);
- buf[2 + 2 * i * sizeof(long)] = '\n';
- buf[2 + 2 * i * sizeof(long) + 1] = '\0';
- rc = 2 + 2 * i * sizeof(long) + 1;
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
- return rc;
+ return n;
}
static ssize_t aqmask_store(struct device *dev,
@@ -292,7 +256,7 @@ static ssize_t aqmask_store(struct device *dev,
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
- AP_DOMAINS, &ap_perms_mutex);
+ AP_DOMAINS, &ap_attr_mutex);
if (rc)
return rc;
@@ -301,10 +265,48 @@ static ssize_t aqmask_store(struct device *dev,
static DEVICE_ATTR_RW(aqmask);
+static ssize_t admask_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+ int i, n;
+
+ if (mutex_lock_interruptible(&ap_attr_mutex))
+ return -ERESTARTSYS;
+
+ n = sysfs_emit(buf, "0x");
+ for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
+ n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
+ n += sysfs_emit_at(buf, n, "\n");
+
+ mutex_unlock(&ap_attr_mutex);
+
+ return n;
+}
+
+static ssize_t admask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ struct zcdn_device *zcdndev = to_zcdn_dev(dev);
+
+ rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
+ AP_DOMAINS, &ap_attr_mutex);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(admask);
+
static struct attribute *zcdn_dev_attrs[] = {
&dev_attr_ioctlmask.attr,
&dev_attr_apmask.attr,
&dev_attr_aqmask.attr,
+ &dev_attr_admask.attr,
NULL
};
@@ -317,15 +319,14 @@ static const struct attribute_group *zcdn_dev_attr_groups[] = {
NULL
};
-static ssize_t zcdn_create_store(struct class *class,
- struct class_attribute *attr,
+static ssize_t zcdn_create_store(const struct class *class,
+ const struct class_attribute *attr,
const char *buf, size_t count)
{
int rc;
char name[ZCDN_MAX_NAME];
- strncpy(name, skip_spaces(buf), sizeof(name));
- name[sizeof(name) - 1] = '\0';
+ strscpy(name, skip_spaces(buf), sizeof(name));
rc = zcdn_create(strim(name));
@@ -335,15 +336,14 @@ static ssize_t zcdn_create_store(struct class *class,
static const struct class_attribute class_attr_zcdn_create =
__ATTR(create, 0600, NULL, zcdn_create_store);
-static ssize_t zcdn_destroy_store(struct class *class,
- struct class_attribute *attr,
+static ssize_t zcdn_destroy_store(const struct class *class,
+ const struct class_attribute *attr,
const char *buf, size_t count)
{
int rc;
char name[ZCDN_MAX_NAME];
- strncpy(name, skip_spaces(buf), sizeof(name));
- name[sizeof(name) - 1] = '\0';
+ strscpy(name, skip_spaces(buf), sizeof(name));
rc = zcdn_destroy(strim(name));
@@ -357,8 +357,8 @@ static void zcdn_device_release(struct device *dev)
{
struct zcdn_device *zcdndev = to_zcdn_dev(dev);
- ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n",
- MAJOR(dev->devt), MINOR(dev->devt));
+ ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
+ __func__, MAJOR(dev->devt), MINOR(dev->devt));
kfree(zcdndev);
}
@@ -367,10 +367,9 @@ static int zcdn_create(const char *name)
{
dev_t devt;
int i, rc = 0;
- char nodename[ZCDN_MAX_NAME];
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
/* check if device node with this name already exists */
@@ -404,17 +403,15 @@ static int zcdn_create(const char *name)
goto unlockout;
}
zcdndev->device.release = zcdn_device_release;
- zcdndev->device.class = zcrypt_class;
+ zcdndev->device.class = &zcrypt_class;
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups;
if (name[0])
- strncpy(nodename, name, sizeof(nodename));
+ rc = dev_set_name(&zcdndev->device, "%s", name);
else
- snprintf(nodename, sizeof(nodename),
- ZCRYPT_NAME "_%d", (int) MINOR(devt));
- nodename[sizeof(nodename)-1] = '\0';
- if (dev_set_name(&zcdndev->device, nodename)) {
- rc = -EINVAL;
+ rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
+ if (rc) {
+ kfree(zcdndev);
goto unlockout;
}
rc = device_register(&zcdndev->device);
@@ -423,11 +420,11 @@ static int zcdn_create(const char *name)
goto unlockout;
}
- ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n",
- MAJOR(devt), MINOR(devt));
+ ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
+ __func__, MAJOR(devt), MINOR(devt));
unlockout:
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
@@ -436,7 +433,7 @@ static int zcdn_destroy(const char *name)
int rc = 0;
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
/* try to find this zcdn device */
@@ -454,7 +451,7 @@ static int zcdn_destroy(const char *name)
device_unregister(&zcdndev->device);
unlockout:
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
@@ -464,7 +461,7 @@ static void zcdn_destroy_all(void)
dev_t devt;
struct zcdn_device *zcdndev;
- mutex_lock(&ap_perms_mutex);
+ mutex_lock(&ap_attr_mutex);
for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
zcdndev = find_zcdndev_by_devt(devt);
@@ -473,12 +470,10 @@ static void zcdn_destroy_all(void)
device_unregister(&zcdndev->device);
}
}
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
}
-#endif
-
-/**
+/*
* zcrypt_read (): Not supported beyond zcrypt 1.3.1.
*
* This function is not supported beyond zcrypt 1.3.1.
@@ -489,10 +484,10 @@ static ssize_t zcrypt_read(struct file *filp, char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_write(): Not allowed.
*
- * Write is is not allowed
+ * Write is not allowed
*/
static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
@@ -500,7 +495,7 @@ static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
return -EPERM;
}
-/**
+/*
* zcrypt_open(): Count number of users.
*
* Device open function to count number of users.
@@ -509,47 +504,42 @@ static int zcrypt_open(struct inode *inode, struct file *filp)
{
struct ap_perms *perms = &ap_perms;
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
/* find returns a reference, no get_device() needed */
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
if (zcdndev)
perms = &zcdndev->perms;
}
-#endif
- filp->private_data = (void *) perms;
+ filp->private_data = (void *)perms;
atomic_inc(&zcrypt_open_count);
- return nonseekable_open(inode, filp);
+ return stream_open(inode, filp);
}
-/**
+/*
* zcrypt_release(): Count number of users.
*
* Device close function to count number of users.
*/
static int zcrypt_release(struct inode *inode, struct file *filp)
{
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
if (filp->f_inode->i_cdev == &zcrypt_cdev) {
struct zcdn_device *zcdndev;
- if (mutex_lock_interruptible(&ap_perms_mutex))
- return -ERESTARTSYS;
+ mutex_lock(&ap_attr_mutex);
zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
if (zcdndev) {
/* 2 puts here: one for find, one for open */
put_device(&zcdndev->device);
put_device(&zcdndev->device);
}
}
-#endif
atomic_dec(&zcrypt_open_count);
return 0;
@@ -567,9 +557,8 @@ static inline int zcrypt_check_ioctl(struct ap_perms *perms,
}
if (rc)
- ZCRYPT_DBF(DBF_WARN,
- "ioctl check failed: ioctlnr=0x%04x rc=%d\n",
- ioctlnr, rc);
+ ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
+ __func__, ioctlnr, rc);
return rc;
}
@@ -586,29 +575,32 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module **pmod,
unsigned int weight)
{
- if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
+ if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
return NULL;
+ zcrypt_card_get(zc);
zcrypt_queue_get(zq);
get_device(&zq->queue->ap_dev.device);
atomic_add(weight, &zc->load);
atomic_add(weight, &zq->load);
zq->request_count++;
+ *pmod = zq->queue->ap_dev.device.driver->owner;
return zq;
}
static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
struct zcrypt_queue *zq,
+ struct module *mod,
unsigned int weight)
{
- struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
zq->request_count--;
atomic_sub(weight, &zc->load);
atomic_sub(weight, &zq->load);
put_device(&zq->queue->ap_dev.device);
zcrypt_queue_put(zq);
+ zcrypt_card_put(zc);
module_put(mod);
}
@@ -618,13 +610,13 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
unsigned int pref_weight)
{
if (!pref_zc)
- return false;
+ return true;
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
- return atomic_read(&zc->card->total_request_count) >
- atomic_read(&pref_zc->card->total_request_count);
- return weight > pref_weight;
+ return atomic64_read(&zc->card->total_request_count) <
+ atomic64_read(&pref_zc->card->total_request_count);
+ return weight < pref_weight;
}
static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
@@ -633,29 +625,36 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
unsigned int pref_weight)
{
if (!pref_zq)
- return false;
+ return true;
weight += atomic_read(&zq->load);
pref_weight += atomic_read(&pref_zq->load);
if (weight == pref_weight)
- return zq->queue->total_request_count >
+ return zq->queue->total_request_count <
pref_zq->queue->total_request_count;
- return weight > pref_weight;
+ return weight < pref_weight;
}
/*
* zcrypt ioctls.
*/
static long zcrypt_rsa_modexpo(struct ap_perms *perms,
+ struct zcrypt_track *tr,
struct ica_rsa_modexpo *mex)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight, pref_weight;
- unsigned int func_code;
- int qid = 0, rc = -ENODEV;
+ struct ap_message ap_msg;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code = 0;
+ int cpen, qpen, qid = 0, rc;
+ struct module *mod;
trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
+ rc = ap_init_apmsg(&ap_msg, 0);
+ if (rc)
+ goto out;
+
if (mex->outputdatalength < mex->inputdatalength) {
rc = -EINVAL;
goto out;
@@ -676,8 +675,9 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online accelarator and CCA cards */
- if (!zc->online || !(zc->card->functions & 0x18000000))
+ /* Check for usable accelerator or CCA card */
+ if (!zc->online || !zc->card->config || zc->card->chkstop ||
+ !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
continue;
/* Check for size limits */
if (zc->min_mod_size > mex->inputdatalength ||
@@ -687,57 +687,80 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = zc->speed_rating[func_code];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = zc->speed_rating[func_code];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online || !zq->ops->rsa_modexpo)
+ /* check if device is usable and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo ||
+ !ap_queue_usable(zq->queue))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
- rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
+ rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
+ ap_release_apmsg(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
trace_s390_zcrypt_rep(mex, func_code, rc,
- AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ ap_msg.psmid);
return rc;
}
static long zcrypt_rsa_crt(struct ap_perms *perms,
+ struct zcrypt_track *tr,
struct ica_rsa_modexpo_crt *crt)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight, pref_weight;
- unsigned int func_code;
- int qid = 0, rc = -ENODEV;
+ struct ap_message ap_msg;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code = 0;
+ int cpen, qpen, qid = 0, rc;
+ struct module *mod;
trace_s390_zcrypt_req(crt, TP_ICARSACRT);
+ rc = ap_init_apmsg(&ap_msg, 0);
+ if (rc)
+ goto out;
+
if (crt->outputdatalength < crt->inputdatalength) {
rc = -EINVAL;
goto out;
@@ -758,8 +781,9 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online accelarator and CCA cards */
- if (!zc->online || !(zc->card->functions & 0x18000000))
+ /* Check for usable accelerator or CCA card */
+ if (!zc->online || !zc->card->config || zc->card->chkstop ||
+ !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
continue;
/* Check for size limits */
if (zc->min_mod_size > crt->inputdatalength ||
@@ -769,131 +793,220 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = zc->speed_rating[func_code];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = zc->speed_rating[func_code];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online || !zq->ops->rsa_modexpo_crt)
+ /* check if device is usable and eligible */
+ if (!zq->online || !zq->ops->rsa_modexpo_crt ||
+ !ap_queue_usable(zq->queue))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
qid = pref_zq->queue->qid;
- rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
+ rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
+ ap_release_apmsg(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
trace_s390_zcrypt_rep(crt, func_code, rc,
- AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ ap_msg.psmid);
return rc;
}
-static long _zcrypt_send_cprb(struct ap_perms *perms,
- struct ica_xcRB *xcRB)
+static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms,
+ struct zcrypt_track *tr,
+ struct ica_xcRB *xcrb)
{
+ bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
struct ap_message ap_msg;
- unsigned int weight, pref_weight;
- unsigned int func_code;
- unsigned short *domain;
- int qid = 0, rc = -ENODEV;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code = 0;
+ unsigned short *domain, tdom;
+ int cpen, qpen, qid = 0, rc;
+ struct module *mod;
+
+ trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
- trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
+ xcrb->status = 0;
- xcRB->status = 0;
- ap_init_message(&ap_msg);
- rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
+ rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
+ AP_MSG_FLAG_MEMPOOL : 0);
if (rc)
goto out;
+ rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
+ if (rc)
+ goto out;
+ print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+
+ tdom = *domain;
+ if (perms != &ap_perms && tdom < AP_DOMAINS) {
+ if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+ if (!test_bit_inv(tdom, perms->adm)) {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+ /*
+ * If a valid target domain is set and this domain is NOT a usage
+ * domain but a control only domain, autoselect target domain.
+ */
+ if (tdom < AP_DOMAINS &&
+ !ap_test_config_usage_domain(tdom) &&
+ ap_test_config_ctrl_domain(tdom))
+ tdom = AUTOSEL_DOM;
+
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online CCA cards */
- if (!zc->online || !(zc->card->functions & 0x10000000))
+ /* Check for usable CCA card */
+ if (!zc->online || !zc->card->config || zc->card->chkstop ||
+ !zc->card->hwinfo.cca)
continue;
/* Check for user selected CCA card */
- if (xcRB->user_defined != AUTOSELECT &&
- xcRB->user_defined != zc->card->id)
+ if (xcrb->user_defined != AUTOSELECT &&
+ xcrb->user_defined != zc->card->id)
+ continue;
+ /* check if request size exceeds card max msg size */
+ if (ap_msg.len > zc->card->maxmsgsize)
continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online ||
- !zq->ops->send_cprb ||
- ((*domain != (unsigned short) AUTOSELECT) &&
- (*domain != AP_QID_QUEUE(zq->queue->qid))))
+ /* check for device usable and eligible */
+ if (!zq->online || !zq->ops->send_cprb ||
+ !ap_queue_usable(zq->queue) ||
+ (tdom != AUTOSEL_DOM &&
+ tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
/* check if device node has admission for this queue */
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
+ pr_debug("no match for address %02x.%04x => ENODEV\n",
+ xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
/* in case of auto select, provide the correct domain */
qid = pref_zq->queue->qid;
- if (*domain == (unsigned short) AUTOSELECT)
+ if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
- rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
+ rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
- ap_release_message(&ap_msg);
- trace_s390_zcrypt_rep(xcRB, func_code, rc,
- AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ ap_release_apmsg(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
+ trace_s390_zcrypt_rep(xcrb, func_code, rc,
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ ap_msg.psmid);
return rc;
}
-long zcrypt_send_cprb(struct ica_xcRB *xcRB)
+long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags)
{
- return _zcrypt_send_cprb(&ap_perms, xcRB);
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("rc=%d\n", rc);
+
+ return rc;
}
EXPORT_SYMBOL(zcrypt_send_cprb);
@@ -902,7 +1015,7 @@ static bool is_desired_ep11_card(unsigned int dev_id,
struct ep11_target_dev *targets)
{
while (target_num-- > 0) {
- if (dev_id == targets->ap_id)
+ if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
return true;
targets++;
}
@@ -913,77 +1026,106 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
unsigned short target_num,
struct ep11_target_dev *targets)
{
+ int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
while (target_num-- > 0) {
- if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+ if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+ (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
return true;
targets++;
}
return false;
}
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
- struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms,
+ struct zcrypt_track *tr,
+ struct ep11_urb *xcrb)
{
+ bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE;
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- struct ep11_target_dev *targets;
+ struct ep11_target_dev *targets = NULL;
unsigned short target_num;
- unsigned int weight, pref_weight;
- unsigned int func_code;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code = 0, domain;
struct ap_message ap_msg;
- int qid = 0, rc = -ENODEV;
+ int cpen, qpen, qid = 0, rc;
+ struct module *mod;
trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
- ap_init_message(&ap_msg);
+ rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ?
+ AP_MSG_FLAG_MEMPOOL : 0);
+ if (rc)
+ goto out;
- target_num = (unsigned short) xcrb->targets_num;
+ target_num = (unsigned short)xcrb->targets_num;
/* empty list indicates autoselect (all available targets) */
- targets = NULL;
+ rc = -ENOMEM;
if (target_num != 0) {
- struct ep11_target_dev __user *uptr;
-
- targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
- if (!targets) {
- rc = -ENOMEM;
- goto out;
- }
-
- uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
- if (copy_from_user(targets, uptr,
- target_num * sizeof(*targets))) {
- rc = -EFAULT;
- goto out_free;
+ if (userspace) {
+ targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
+ if (!targets)
+ goto out;
+ if (copy_from_user(targets, xcrb->targets,
+ target_num * sizeof(*targets))) {
+ rc = -EFAULT;
+ goto out;
+ }
+ } else {
+ targets = (struct ep11_target_dev __force __kernel *)xcrb->targets;
}
}
- rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
+ rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
- goto out_free;
+ goto out;
+ print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+
+ if (perms != &ap_perms && domain < AUTOSEL_DOM) {
+ if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
+ if (!test_bit_inv(domain, perms->adm)) {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+ }
pref_zc = NULL;
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online EP11 cards */
- if (!zc->online || !(zc->card->functions & 0x04000000))
+ /* Check for usable EP11 card */
+ if (!zc->online || !zc->card->config || zc->card->chkstop ||
+ !zc->card->hwinfo.ep11)
continue;
/* Check for user selected EP11 card */
if (targets &&
!is_desired_ep11_card(zc->card->id, target_num, targets))
continue;
+ /* check if request size exceeds card max msg size */
+ if (ap_msg.len > zc->card->maxmsgsize)
+ continue;
/* check if device node has admission for this card */
if (!zcrypt_check_card(perms, zc->card->id))
continue;
/* get weight index of the card device */
- weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
+ /* penalty if this msg was previously sent via this card */
+ cpen = (tr && tr->again_counter && tr->last_qid &&
+ AP_QID_CARD(tr->last_qid) == zc->card->id) ?
+ TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
+ if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online ||
- !zq->ops->send_ep11_cprb ||
+ /* check if device is usable and eligible */
+ if (!zq->online || !zq->ops->send_ep11_cprb ||
+ !ap_queue_usable(zq->queue) ||
(targets &&
!is_desired_ep11_queue(zq->queue->qid,
target_num, targets)))
@@ -992,52 +1134,102 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
if (!zcrypt_check_queue(perms,
AP_QID_QUEUE(zq->queue->qid)))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ /* penalty if the msg was previously sent at this qid */
+ qpen = (tr && tr->again_counter && tr->last_qid &&
+ tr->last_qid == zq->queue->qid) ?
+ TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
+ if (!zcrypt_queue_compare(zq, pref_zq,
+ wgt + cpen + qpen, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt + cpen + qpen;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
+ if (targets && target_num == 1) {
+ pr_debug("no match for address %02x.%04x => ENODEV\n",
+ (int)targets->ap_id, (int)targets->dom_id);
+ } else if (targets) {
+ pr_debug("no match for %d target addrs => ENODEV\n",
+ (int)target_num);
+ } else {
+ pr_debug("no match for address ff.ffff => ENODEV\n");
+ }
rc = -ENODEV;
- goto out_free;
+ goto out;
}
qid = pref_zq->queue->qid;
- rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
+ rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
-out_free:
- kfree(targets);
out:
- ap_release_message(&ap_msg);
+ if (userspace)
+ kfree(targets);
+ ap_release_apmsg(&ap_msg);
+ if (tr) {
+ tr->last_rc = rc;
+ tr->last_qid = qid;
+ }
trace_s390_zcrypt_rep(xcrb, func_code, rc,
- AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ ap_msg.psmid);
return rc;
}
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags)
+{
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("rc=%d\n", rc);
+
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
- unsigned int weight, pref_weight;
- unsigned int func_code;
+ unsigned int wgt = 0, pref_wgt = 0;
+ unsigned int func_code = 0;
struct ap_message ap_msg;
unsigned int domain;
int qid = 0, rc = -ENODEV;
+ struct module *mod;
trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
- ap_init_message(&ap_msg);
- rc = get_rng_fc(&ap_msg, &func_code, &domain);
+ rc = ap_init_apmsg(&ap_msg, 0);
+ if (rc)
+ goto out;
+ rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
if (rc)
goto out;
@@ -1045,29 +1237,31 @@ static long zcrypt_rng(char *buffer)
pref_zq = NULL;
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
- /* Check for online CCA cards */
- if (!zc->online || !(zc->card->functions & 0x10000000))
+ /* Check for usable CCA card */
+ if (!zc->online || !zc->card->config || zc->card->chkstop ||
+ !zc->card->hwinfo.cca)
continue;
/* get weight index of the card device */
- weight = zc->speed_rating[func_code];
- if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
+ wgt = zc->speed_rating[func_code];
+ if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
continue;
for_each_zcrypt_queue(zq, zc) {
- /* check if device is online and eligible */
- if (!zq->online || !zq->ops->rng)
+ /* check if device is usable and eligible */
+ if (!zq->online || !zq->ops->rng ||
+ !ap_queue_usable(zq->queue))
continue;
- if (zcrypt_queue_compare(zq, pref_zq,
- weight, pref_weight))
+ if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
continue;
pref_zc = zc;
pref_zq = zq;
- pref_weight = weight;
+ pref_wgt = wgt;
}
}
- pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+ pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
+ pr_debug("no matching queue found => ENODEV\n");
rc = -ENODEV;
goto out;
}
@@ -1076,13 +1270,14 @@ static long zcrypt_rng(char *buffer)
rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
spin_lock(&zcrypt_list_lock);
- zcrypt_drop_queue(pref_zc, pref_zq, weight);
+ zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
spin_unlock(&zcrypt_list_lock);
out:
- ap_release_message(&ap_msg);
+ ap_release_apmsg(&ap_msg);
trace_s390_zcrypt_rep(buffer, func_code, rc,
- AP_QID_CARD(qid), AP_QID_QUEUE(qid));
+ AP_QID_CARD(qid), AP_QID_QUEUE(qid),
+ ap_msg.psmid);
return rc;
}
@@ -1105,7 +1300,7 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
queue = AP_QID_QUEUE(zq->queue->qid);
stat = &devstatus[card * AP_DOMAINS + queue];
stat->hwtype = zc->card->ap_dev.device_type;
- stat->functions = zc->card->functions >> 26;
+ stat->functions = zc->card->hwinfo.fac >> 26;
stat->qid = zq->queue->qid;
stat->online = zq->online ? 0x01 : 0x00;
}
@@ -1113,24 +1308,27 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
spin_unlock(&zcrypt_list_lock);
}
-void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
+ int maxcard, int maxqueue)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
struct zcrypt_device_status_ext *stat;
int card, queue;
- memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
- * sizeof(struct zcrypt_device_status_ext));
+ maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT);
+ maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT);
spin_lock(&zcrypt_list_lock);
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
queue = AP_QID_QUEUE(zq->queue->qid);
- stat = &devstatus[card * AP_DOMAINS + queue];
+ if (card >= maxcard || queue >= maxqueue)
+ continue;
+ stat = &devstatus[card * maxqueue + queue];
stat->hwtype = zc->card->ap_dev.device_type;
- stat->functions = zc->card->functions >> 26;
+ stat->functions = zc->card->hwinfo.fac >> 26;
stat->qid = zq->queue->qid;
stat->online = zq->online ? 0x01 : 0x00;
}
@@ -1139,6 +1337,34 @@ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
}
EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstat)
+{
+ struct zcrypt_card *zc;
+ struct zcrypt_queue *zq;
+
+ memset(devstat, 0, sizeof(*devstat));
+
+ spin_lock(&zcrypt_list_lock);
+ for_each_zcrypt_card(zc) {
+ for_each_zcrypt_queue(zq, zc) {
+ if (card == AP_QID_CARD(zq->queue->qid) &&
+ queue == AP_QID_QUEUE(zq->queue->qid)) {
+ devstat->hwtype = zc->card->ap_dev.device_type;
+ devstat->functions = zc->card->hwinfo.fac >> 26;
+ devstat->qid = zq->queue->qid;
+ devstat->online = zq->online ? 0x01 : 0x00;
+ spin_unlock(&zcrypt_list_lock);
+ return 0;
+ }
+ }
+ }
+ spin_unlock(&zcrypt_list_lock);
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(zcrypt_device_status_ext);
+
static void zcrypt_status_mask(char status[], size_t max_adapters)
{
struct zcrypt_card *zc;
@@ -1150,8 +1376,8 @@ static void zcrypt_status_mask(char status[], size_t max_adapters)
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
- || card >= max_adapters)
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
+ card >= max_adapters)
continue;
status[card] = zc->online ? zc->user_space_type : 0x0d;
}
@@ -1171,8 +1397,8 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
- || card >= max_adapters)
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
+ card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
qdepth[card] =
@@ -1185,11 +1411,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
+static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int card;
+ u64 cnt;
memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
@@ -1197,12 +1424,13 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
for_each_zcrypt_card(zc) {
for_each_zcrypt_queue(zq, zc) {
card = AP_QID_CARD(zq->queue->qid);
- if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index
- || card >= max_adapters)
+ if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
+ card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- reqcnt[card] = zq->queue->total_request_count;
+ cnt = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
+ reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
}
}
local_bh_enable();
@@ -1255,119 +1483,169 @@ static int zcrypt_requestq_count(void)
return requestq_count;
}
-static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
{
int rc;
- struct ap_perms *perms =
- (struct ap_perms *) filp->private_data;
+ struct zcrypt_track tr;
+ struct ica_rsa_modexpo mex;
+ struct ica_rsa_modexpo __user *umex = (void __user *)arg;
- rc = zcrypt_check_ioctl(perms, cmd);
- if (rc)
- return rc;
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&mex, umex, sizeof(mex)))
+ return -EFAULT;
- switch (cmd) {
- case ICARSAMODEXPO: {
- struct ica_rsa_modexpo __user *umex = (void __user *) arg;
- struct ica_rsa_modexpo mex;
+ do {
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
- if (copy_from_user(&mex, umex, sizeof(mex)))
- return -EFAULT;
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = zcrypt_rsa_modexpo(perms, &mex);
- } while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = zcrypt_rsa_modexpo(perms, &mex);
- } while (rc == -EAGAIN);
- if (rc) {
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc);
- return rc;
- }
- return put_user(mex.outputdatalength, &umex->outputdatalength);
+ rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc) {
+ pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
+ return rc;
}
- case ICARSACRT: {
- struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
- struct ica_rsa_modexpo_crt crt;
+ return put_user(mex.outputdatalength, &umex->outputdatalength);
+}
- if (copy_from_user(&crt, ucrt, sizeof(crt)))
- return -EFAULT;
- do {
- rc = zcrypt_rsa_crt(perms, &crt);
- } while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = zcrypt_rsa_crt(perms, &crt);
- } while (rc == -EAGAIN);
- if (rc) {
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc);
- return rc;
- }
- return put_user(crt.outputdatalength, &ucrt->outputdatalength);
- }
- case ZSECSENDCPRB: {
- struct ica_xcRB __user *uxcRB = (void __user *) arg;
- struct ica_xcRB xcRB;
+static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct zcrypt_track tr;
+ struct ica_rsa_modexpo_crt crt;
+ struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
- if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
- return -EFAULT;
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&crt, ucrt, sizeof(crt)))
+ return -EFAULT;
+
+ do {
+ rc = zcrypt_rsa_crt(perms, &tr, &crt);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = _zcrypt_send_cprb(perms, &xcRB);
- } while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = _zcrypt_send_cprb(perms, &xcRB);
- } while (rc == -EAGAIN);
- if (rc)
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n",
- rc, xcRB.status);
- if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
- return -EFAULT;
+ rc = zcrypt_rsa_crt(perms, &tr, &crt);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc) {
+ pr_debug("ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
- case ZSENDEP11CPRB: {
- struct ep11_urb __user *uxcrb = (void __user *)arg;
- struct ep11_urb xcrb;
+ return put_user(crt.outputdatalength, &ucrt->outputdatalength);
+}
- if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
- return -EFAULT;
+static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct ica_xcRB xcrb;
+ struct zcrypt_track tr;
+ u32 xflags = ZCRYPT_XFLAG_USERSPACE;
+ struct ica_xcRB __user *uxcrb = (void __user *)arg;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+ return -EFAULT;
+
+ do {
+ rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
- } while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
- } while (rc == -EAGAIN);
- if (rc)
- ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
- if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
- return -EFAULT;
+ rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcrb.status);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+ return -EFAULT;
+ return rc;
+}
+
+static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
+{
+ int rc;
+ struct ep11_urb xcrb;
+ struct zcrypt_track tr;
+ u32 xflags = ZCRYPT_XFLAG_USERSPACE;
+ struct ep11_urb __user *uxcrb = (void __user *)arg;
+
+ memset(&tr, 0, sizeof(tr));
+ if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
+ return -EFAULT;
+
+ do {
+ rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
+ return -EFAULT;
+ return rc;
+}
+
+static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc;
+ struct ap_perms *perms =
+ (struct ap_perms *)filp->private_data;
+
+ rc = zcrypt_check_ioctl(perms, cmd);
+ if (rc)
return rc;
- }
+
+ switch (cmd) {
+ case ICARSAMODEXPO:
+ return icarsamodexpo_ioctl(perms, arg);
+ case ICARSACRT:
+ return icarsacrt_ioctl(perms, arg);
+ case ZSECSENDCPRB:
+ return zsecsendcprb_ioctl(perms, arg);
+ case ZSENDEP11CPRB:
+ return zsendep11cprb_ioctl(perms, arg);
case ZCRYPT_DEVICE_STATUS: {
struct zcrypt_device_status_ext *device_status;
size_t total_size = MAX_ZDEV_ENTRIES_EXT
* sizeof(struct zcrypt_device_status_ext);
- device_status = kzalloc(total_size, GFP_KERNEL);
+ device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
if (!device_status)
return -ENOMEM;
- zcrypt_device_status_mask_ext(device_status);
- if (copy_to_user((char __user *) arg, device_status,
+ zcrypt_device_status_mask_ext(device_status,
+ MAX_ZDEV_CARDIDS_EXT,
+ MAX_ZDEV_DOMAINS_EXT);
+ if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
- kfree(device_status);
+ kvfree(device_status);
return rc;
}
case ZCRYPT_STATUS_MASK: {
char status[AP_DEVICES];
zcrypt_status_mask(status, AP_DEVICES);
- if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ if (copy_to_user((char __user *)arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
@@ -1375,31 +1653,32 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
char qdepth[AP_DEVICES];
zcrypt_qdepth_mask(qdepth, AP_DEVICES);
- if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
case ZCRYPT_PERDEV_REQCNT: {
- int *reqcnt;
+ u32 *reqcnt;
- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
if (!reqcnt)
return -ENOMEM;
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
- if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ if (copy_to_user((int __user *)arg, reqcnt,
+ sizeof(u32) * AP_DEVICES))
rc = -EFAULT;
kfree(reqcnt);
return rc;
}
case Z90STAT_REQUESTQ_COUNT:
- return put_user(zcrypt_requestq_count(), (int __user *) arg);
+ return put_user(zcrypt_requestq_count(), (int __user *)arg);
case Z90STAT_PENDINGQ_COUNT:
- return put_user(zcrypt_pendingq_count(), (int __user *) arg);
+ return put_user(zcrypt_pendingq_count(), (int __user *)arg);
case Z90STAT_TOTALOPEN_COUNT:
return put_user(atomic_read(&zcrypt_open_count),
- (int __user *) arg);
+ (int __user *)arg);
case Z90STAT_DOMAIN_INDEX:
- return put_user(ap_domain_index, (int __user *) arg);
+ return put_user(ap_domain_index, (int __user *)arg);
/*
* Deprecated ioctls
*/
@@ -1413,7 +1692,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (!device_status)
return -ENOMEM;
zcrypt_device_status_mask(device_status);
- if (copy_to_user((char __user *) arg, device_status,
+ if (copy_to_user((char __user *)arg, device_status,
total_size))
rc = -EFAULT;
kfree(device_status);
@@ -1424,7 +1703,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
char status[MAX_ZDEV_CARDIDS];
zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
- if (copy_to_user((char __user *) arg, status, sizeof(status)))
+ if (copy_to_user((char __user *)arg, status, sizeof(status)))
return -EFAULT;
return 0;
}
@@ -1433,201 +1712,26 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
char qdepth[MAX_ZDEV_CARDIDS];
zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
- if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth)))
+ if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
return -EFAULT;
return 0;
}
case Z90STAT_PERDEV_REQCNT: {
/* the old ioctl supports only 64 adapters */
- int reqcnt[MAX_ZDEV_CARDIDS];
+ u32 reqcnt[MAX_ZDEV_CARDIDS];
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
- if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
+ if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
return -EFAULT;
return 0;
}
/* unknown ioctl number */
default:
- ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd);
+ pr_debug("unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
-#ifdef CONFIG_COMPAT
-/*
- * ioctl32 conversion routines
- */
-struct compat_ica_rsa_modexpo {
- compat_uptr_t inputdata;
- unsigned int inputdatalength;
- compat_uptr_t outputdata;
- unsigned int outputdatalength;
- compat_uptr_t b_key;
- compat_uptr_t n_modulus;
-};
-
-static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
- struct compat_ica_rsa_modexpo mex32;
- struct ica_rsa_modexpo mex64;
- long rc;
-
- if (copy_from_user(&mex32, umex32, sizeof(mex32)))
- return -EFAULT;
- mex64.inputdata = compat_ptr(mex32.inputdata);
- mex64.inputdatalength = mex32.inputdatalength;
- mex64.outputdata = compat_ptr(mex32.outputdata);
- mex64.outputdatalength = mex32.outputdatalength;
- mex64.b_key = compat_ptr(mex32.b_key);
- mex64.n_modulus = compat_ptr(mex32.n_modulus);
- do {
- rc = zcrypt_rsa_modexpo(perms, &mex64);
- } while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = zcrypt_rsa_modexpo(perms, &mex64);
- } while (rc == -EAGAIN);
- if (rc)
- return rc;
- return put_user(mex64.outputdatalength,
- &umex32->outputdatalength);
-}
-
-struct compat_ica_rsa_modexpo_crt {
- compat_uptr_t inputdata;
- unsigned int inputdatalength;
- compat_uptr_t outputdata;
- unsigned int outputdatalength;
- compat_uptr_t bp_key;
- compat_uptr_t bq_key;
- compat_uptr_t np_prime;
- compat_uptr_t nq_prime;
- compat_uptr_t u_mult_inv;
-};
-
-static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
- struct compat_ica_rsa_modexpo_crt crt32;
- struct ica_rsa_modexpo_crt crt64;
- long rc;
-
- if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
- return -EFAULT;
- crt64.inputdata = compat_ptr(crt32.inputdata);
- crt64.inputdatalength = crt32.inputdatalength;
- crt64.outputdata = compat_ptr(crt32.outputdata);
- crt64.outputdatalength = crt32.outputdatalength;
- crt64.bp_key = compat_ptr(crt32.bp_key);
- crt64.bq_key = compat_ptr(crt32.bq_key);
- crt64.np_prime = compat_ptr(crt32.np_prime);
- crt64.nq_prime = compat_ptr(crt32.nq_prime);
- crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
- do {
- rc = zcrypt_rsa_crt(perms, &crt64);
- } while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = zcrypt_rsa_crt(perms, &crt64);
- } while (rc == -EAGAIN);
- if (rc)
- return rc;
- return put_user(crt64.outputdatalength,
- &ucrt32->outputdatalength);
-}
-
-struct compat_ica_xcRB {
- unsigned short agent_ID;
- unsigned int user_defined;
- unsigned short request_ID;
- unsigned int request_control_blk_length;
- unsigned char padding1[16 - sizeof(compat_uptr_t)];
- compat_uptr_t request_control_blk_addr;
- unsigned int request_data_length;
- char padding2[16 - sizeof(compat_uptr_t)];
- compat_uptr_t request_data_address;
- unsigned int reply_control_blk_length;
- char padding3[16 - sizeof(compat_uptr_t)];
- compat_uptr_t reply_control_blk_addr;
- unsigned int reply_data_length;
- char padding4[16 - sizeof(compat_uptr_t)];
- compat_uptr_t reply_data_addr;
- unsigned short priority_window;
- unsigned int status;
-} __packed;
-
-static long trans_xcRB32(struct ap_perms *perms, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
- struct compat_ica_xcRB xcRB32;
- struct ica_xcRB xcRB64;
- long rc;
-
- if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
- return -EFAULT;
- xcRB64.agent_ID = xcRB32.agent_ID;
- xcRB64.user_defined = xcRB32.user_defined;
- xcRB64.request_ID = xcRB32.request_ID;
- xcRB64.request_control_blk_length =
- xcRB32.request_control_blk_length;
- xcRB64.request_control_blk_addr =
- compat_ptr(xcRB32.request_control_blk_addr);
- xcRB64.request_data_length =
- xcRB32.request_data_length;
- xcRB64.request_data_address =
- compat_ptr(xcRB32.request_data_address);
- xcRB64.reply_control_blk_length =
- xcRB32.reply_control_blk_length;
- xcRB64.reply_control_blk_addr =
- compat_ptr(xcRB32.reply_control_blk_addr);
- xcRB64.reply_data_length = xcRB32.reply_data_length;
- xcRB64.reply_data_addr =
- compat_ptr(xcRB32.reply_data_addr);
- xcRB64.priority_window = xcRB32.priority_window;
- xcRB64.status = xcRB32.status;
- do {
- rc = _zcrypt_send_cprb(perms, &xcRB64);
- } while (rc == -EAGAIN);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- do {
- rc = _zcrypt_send_cprb(perms, &xcRB64);
- } while (rc == -EAGAIN);
- xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
- xcRB32.reply_data_length = xcRB64.reply_data_length;
- xcRB32.status = xcRB64.status;
- if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
- return -EFAULT;
- return rc;
-}
-
-static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- int rc;
- struct ap_perms *perms =
- (struct ap_perms *) filp->private_data;
-
- rc = zcrypt_check_ioctl(perms, cmd);
- if (rc)
- return rc;
-
- if (cmd == ICARSAMODEXPO)
- return trans_modexpo32(perms, filp, cmd, arg);
- if (cmd == ICARSACRT)
- return trans_modexpo_crt32(perms, filp, cmd, arg);
- if (cmd == ZSECSENDCPRB)
- return trans_xcRB32(perms, filp, cmd, arg);
- return zcrypt_unlocked_ioctl(filp, cmd, arg);
-}
-#endif
-
/*
* Misc device file operations.
*/
@@ -1636,12 +1740,8 @@ static const struct file_operations zcrypt_fops = {
.read = zcrypt_read,
.write = zcrypt_write,
.unlocked_ioctl = zcrypt_unlocked_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = zcrypt_compat_ioctl,
-#endif
.open = zcrypt_open,
.release = zcrypt_release,
- .llseek = no_llseek,
};
/*
@@ -1667,10 +1767,10 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
* read method calls.
*/
if (zcrypt_rng_buffer_index == 0) {
- rc = zcrypt_rng((char *) zcrypt_rng_buffer);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
- rc = zcrypt_rng((char *) zcrypt_rng_buffer);
+ rc = zcrypt_rng((char *)zcrypt_rng_buffer);
+ /* on ENODEV failure: retry once again after an AP bus rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ rc = zcrypt_rng((char *)zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
zcrypt_rng_buffer_index = rc / sizeof(*data);
@@ -1691,25 +1791,24 @@ int zcrypt_rng_device_add(void)
mutex_lock(&zcrypt_rng_mutex);
if (zcrypt_rng_device_count == 0) {
- zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
+ zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
if (!zcrypt_rng_buffer) {
rc = -ENOMEM;
goto out;
}
zcrypt_rng_buffer_index = 0;
- if (!zcrypt_hwrng_seed)
- zcrypt_rng_dev.quality = 0;
rc = hwrng_register(&zcrypt_rng_dev);
if (rc)
goto out_free;
zcrypt_rng_device_count = 1;
- } else
+ } else {
zcrypt_rng_device_count++;
+ }
mutex_unlock(&zcrypt_rng_mutex);
return 0;
out_free:
- free_page((unsigned long) zcrypt_rng_buffer);
+ free_page((unsigned long)zcrypt_rng_buffer);
out:
mutex_unlock(&zcrypt_rng_mutex);
return rc;
@@ -1721,15 +1820,78 @@ void zcrypt_rng_device_remove(void)
zcrypt_rng_device_count--;
if (zcrypt_rng_device_count == 0) {
hwrng_unregister(&zcrypt_rng_dev);
- free_page((unsigned long) zcrypt_rng_buffer);
+ free_page((unsigned long)zcrypt_rng_buffer);
}
mutex_unlock(&zcrypt_rng_mutex);
}
+/*
+ * Wait until the zcrypt api is operational.
+ * The AP bus scan and the binding of ap devices to device drivers is
+ * an asynchronous job. This function waits until these initial jobs
+ * are done and so the zcrypt api should be ready to serve crypto
+ * requests - if there are resources available. The function uses an
+ * internal timeout of 30s. The very first caller will either wait for
+ * ap bus bindings complete or the timeout happens. This state will be
+ * remembered for further callers which will only be blocked until a
+ * decision is made (timeout or bindings complete).
+ * On timeout -ETIME is returned, on success the return value is 0.
+ */
+int zcrypt_wait_api_operational(void)
+{
+ static DEFINE_MUTEX(zcrypt_wait_api_lock);
+ static int zcrypt_wait_api_state;
+ int rc;
+
+ rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
+ if (rc)
+ return rc;
+
+ switch (zcrypt_wait_api_state) {
+ case 0:
+ /* initial state, invoke wait for the ap bus complete */
+ rc = ap_wait_apqn_bindings_complete(
+ msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
+ switch (rc) {
+ case 0:
+ /* ap bus bindings are complete */
+ zcrypt_wait_api_state = 1;
+ break;
+ case -EINTR:
+ /* interrupted, go back to caller */
+ break;
+ case -ETIME:
+ /* timeout */
+ ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
+ __func__);
+ zcrypt_wait_api_state = -ETIME;
+ break;
+ default:
+ /* other failure */
+ pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
+ break;
+ }
+ break;
+ case 1:
+ /* a previous caller already found ap bus bindings complete */
+ rc = 0;
+ break;
+ default:
+ /* a previous caller had timeout or other failure */
+ rc = zcrypt_wait_api_state;
+ break;
+ }
+
+ mutex_unlock(&zcrypt_wait_api_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(zcrypt_wait_api_operational);
+
int __init zcrypt_debug_init(void)
{
- zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
+ ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
debug_set_level(zcrypt_dbf_info, DBF_ERR);
@@ -1741,19 +1903,14 @@ void zcrypt_debug_exit(void)
debug_unregister(zcrypt_dbf_info);
}
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
-
static int __init zcdn_init(void)
{
int rc;
/* create a new class 'zcrypt' */
- zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME);
- if (IS_ERR(zcrypt_class)) {
- rc = PTR_ERR(zcrypt_class);
- goto out_class_create_failed;
- }
- zcrypt_class->dev_release = zcdn_device_release;
+ rc = class_register(&zcrypt_class);
+ if (rc)
+ goto out_class_register_failed;
/* alloc device minor range */
rc = alloc_chrdev_region(&zcrypt_devt,
@@ -1769,40 +1926,38 @@ static int __init zcdn_init(void)
goto out_cdev_add_failed;
/* need some class specific sysfs attributes */
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
if (rc)
goto out_class_create_file_1_failed;
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
if (rc)
goto out_class_create_file_2_failed;
return 0;
out_class_create_file_2_failed:
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
out_class_create_file_1_failed:
cdev_del(&zcrypt_cdev);
out_cdev_add_failed:
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
out_alloc_chrdev_failed:
- class_destroy(zcrypt_class);
-out_class_create_failed:
+ class_unregister(&zcrypt_class);
+out_class_register_failed:
return rc;
}
static void zcdn_exit(void)
{
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
- class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
zcdn_destroy_all();
cdev_del(&zcrypt_cdev);
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
- class_destroy(zcrypt_class);
+ class_unregister(&zcrypt_class);
}
-#endif
-
-/**
+/*
* zcrypt_api_init(): Module initialization.
*
* The module initialization code.
@@ -1811,15 +1966,27 @@ int __init zcrypt_api_init(void)
{
int rc;
+ /* make sure the mempool threshold is >= 1 */
+ if (zcrypt_mempool_threshold < 1) {
+ rc = -EINVAL;
+ goto out;
+ }
+
rc = zcrypt_debug_init();
if (rc)
goto out;
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
rc = zcdn_init();
if (rc)
- goto out;
-#endif
+ goto out_zcdn_init_failed;
+
+ rc = zcrypt_ccamisc_init();
+ if (rc)
+ goto out_ccamisc_init_failed;
+
+ rc = zcrypt_ep11misc_init();
+ if (rc)
+ goto out_ep11misc_init_failed;
/* Register the request sprayer. */
rc = misc_register(&zcrypt_misc_device);
@@ -1832,27 +1999,30 @@ int __init zcrypt_api_init(void)
return 0;
out_misc_register_failed:
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
+ zcrypt_ep11misc_exit();
+out_ep11misc_init_failed:
+ zcrypt_ccamisc_exit();
+out_ccamisc_init_failed:
zcdn_exit();
-#endif
+out_zcdn_init_failed:
zcrypt_debug_exit();
out:
return rc;
}
-/**
+/*
* zcrypt_api_exit(): Module termination.
*
* The module termination code.
*/
void __exit zcrypt_api_exit(void)
{
-#ifdef CONFIG_ZCRYPT_MULTIDEVNODES
zcdn_exit();
-#endif
misc_deregister(&zcrypt_misc_device);
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
+ zcrypt_ccamisc_exit();
+ zcrypt_ep11misc_exit();
zcrypt_debug_exit();
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index af67a768a3fc..6ef8850a42df 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2001, 2018
+ * Copyright IBM Corp. 2001, 2019
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
* Cornelia Huck <cornelia.huck@de.ibm.com>
@@ -29,6 +29,7 @@
#define ZCRYPT_CEX4 10
#define ZCRYPT_CEX5 11
#define ZCRYPT_CEX6 12
+#define ZCRYPT_CEX7 13
/**
* Large random numbers are pulled in 4096 byte chunks from the crypto cards
@@ -37,6 +38,15 @@
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
+/**
+ * The zcrypt_wait_api_operational() function waits this
+ * amount in milliseconds for ap_wait_aqpn_bindings_complete().
+ * Also on a cprb send failure with ENODEV the send functions
+ * trigger an ap bus rescan and wait this time in milliseconds
+ * for ap_wait_aqpn_bindings_complete() before resending.
+ */
+#define ZCRYPT_WAIT_BINDINGS_COMPLETE_MS 30000
+
/*
* Identifier for Crypto Request Performance Index
*/
@@ -54,13 +64,34 @@ enum crypto_ops {
struct zcrypt_queue;
+/* struct to hold tracking information for a userspace request/response */
+struct zcrypt_track {
+ int again_counter; /* retry attempts counter */
+ int last_qid; /* last qid used */
+ int last_rc; /* last return code */
+};
+
+/* defines related to message tracking */
+#define TRACK_AGAIN_MAX 10
+#define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000
+#define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000
+
+/*
+ * xflags - to be used with zcrypt_send_cprb() and
+ * zcrypt_send_ep11_cprb() for the xflags parameter.
+ */
+#define ZCRYPT_XFLAG_USERSPACE 0x0001 /* data ptrs address userspace */
+#define ZCRYPT_XFLAG_NOMEMALLOC 0x0002 /* do not allocate memory via kmalloc */
+
struct zcrypt_ops {
- long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *);
+ long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *,
+ struct ap_message *);
long (*rsa_modexpo_crt)(struct zcrypt_queue *,
- struct ica_rsa_modexpo_crt *);
- long (*send_cprb)(struct zcrypt_queue *, struct ica_xcRB *,
+ struct ica_rsa_modexpo_crt *,
+ struct ap_message *);
+ long (*send_cprb)(bool userspace, struct zcrypt_queue *, struct ica_xcRB *,
struct ap_message *);
- long (*send_ep11_cprb)(struct zcrypt_queue *, struct ep11_urb *,
+ long (*send_ep11_cprb)(bool userspace, struct zcrypt_queue *, struct ep11_urb *,
struct ap_message *);
long (*rng)(struct zcrypt_queue *, char *, struct ap_message *);
struct list_head list; /* zcrypt ops list. */
@@ -81,7 +112,7 @@ struct zcrypt_card {
int min_mod_size; /* Min number of bits. */
int max_mod_size; /* Max number of bits. */
int max_exp_bit_length;
- int speed_rating[NUM_OPS]; /* Speed idx of crypto ops. */
+ const int *speed_rating; /* Speed idx of crypto ops. */
atomic_t load; /* Utilization of the crypto device */
int request_count; /* # current requests. */
@@ -106,9 +137,10 @@ struct zcrypt_queue {
extern atomic_t zcrypt_rescan_req;
extern spinlock_t zcrypt_list_lock;
-extern int zcrypt_device_count;
extern struct list_head zcrypt_card_list;
+extern unsigned int zcrypt_mempool_threshold;
+
#define for_each_zcrypt_card(_zc) \
list_for_each_entry(_zc, &zcrypt_card_list, list)
@@ -121,9 +153,6 @@ void zcrypt_card_get(struct zcrypt_card *);
int zcrypt_card_put(struct zcrypt_card *);
int zcrypt_card_register(struct zcrypt_card *);
void zcrypt_card_unregister(struct zcrypt_card *);
-struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
- unsigned int, unsigned int);
-void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
struct zcrypt_queue *zcrypt_queue_alloc(size_t);
void zcrypt_queue_free(struct zcrypt_queue *);
@@ -131,9 +160,7 @@ void zcrypt_queue_get(struct zcrypt_queue *);
int zcrypt_queue_put(struct zcrypt_queue *);
int zcrypt_queue_register(struct zcrypt_queue *);
void zcrypt_queue_unregister(struct zcrypt_queue *);
-void zcrypt_queue_force_online(struct zcrypt_queue *, int);
-struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
-void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
+bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online);
int zcrypt_rng_device_add(void);
void zcrypt_rng_device_remove(void);
@@ -143,7 +170,35 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *);
struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
-long zcrypt_send_cprb(struct ica_xcRB *xcRB);
-void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
+long zcrypt_send_cprb(struct ica_xcRB *xcRB, u32 xflags);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb, u32 xflags);
+void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus,
+ int maxcard, int maxqueue);
+int zcrypt_device_status_ext(int card, int queue,
+ struct zcrypt_device_status_ext *devstatus);
+
+int zcrypt_wait_api_operational(void);
+
+static inline unsigned long z_copy_from_user(bool userspace,
+ void *to,
+ const void __user *from,
+ unsigned long n)
+{
+ if (likely(userspace))
+ return copy_from_user(to, from, n);
+ memcpy(to, (void __force *)from, n);
+ return 0;
+}
+
+static inline unsigned long z_copy_to_user(bool userspace,
+ void __user *to,
+ const void *from,
+ unsigned long n)
+{
+ if (likely(userspace))
+ return copy_to_user(to, from, n);
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
#endif /* _ZCRYPT_API_H_ */
diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c
index d4f35a183c15..6dea702a5cac 100644
--- a/drivers/s390/crypto/zcrypt_card.c
+++ b/drivers/s390/crypto/zcrypt_card.c
@@ -11,6 +11,7 @@
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -18,7 +19,6 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -39,9 +39,9 @@
static ssize_t type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", zc->type_string);
+ return sysfs_emit(buf, "%s\n", zc->type_string);
}
static DEVICE_ATTR_RO(type);
@@ -50,31 +50,63 @@ static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
+ struct ap_card *ac = to_ap_card(dev);
+ int online = ac->config && !ac->chkstop && zc->online ? 1 : 0;
- return snprintf(buf, PAGE_SIZE, "%d\n", zc->online);
+ return sysfs_emit(buf, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
+ struct ap_card *ac = to_ap_card(dev);
struct zcrypt_queue *zq;
- int online, id;
+ int online, id, i = 0, maxzqs = 0;
+ struct zcrypt_queue **zq_uelist = NULL;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
+ if (online && (!ac->config || ac->chkstop))
+ return -ENODEV;
+
zc->online = online;
id = zc->card->id;
- ZCRYPT_DBF(DBF_INFO, "card=%02x online=%d\n", id, online);
+ ZCRYPT_DBF_INFO("%s card=%02x online=%d\n", __func__, id, online);
+
+ ap_send_online_uevent(&ac->ap_dev, online);
spin_lock(&zcrypt_list_lock);
+ /*
+ * As we are in atomic context here, directly sending uevents
+ * does not work. So collect the zqueues in a dynamic array
+ * and process them after zcrypt_list_lock release. As we get/put
+ * the zqueue objects, we make sure they exist after lock release.
+ */
+ list_for_each_entry(zq, &zc->zqueues, list)
+ maxzqs++;
+ if (maxzqs > 0)
+ zq_uelist = kcalloc(maxzqs + 1, sizeof(*zq_uelist), GFP_ATOMIC);
list_for_each_entry(zq, &zc->zqueues, list)
- zcrypt_queue_force_online(zq, online);
+ if (zcrypt_queue_force_online(zq, online))
+ if (zq_uelist) {
+ zcrypt_queue_get(zq);
+ zq_uelist[i++] = zq;
+ }
spin_unlock(&zcrypt_list_lock);
+ if (zq_uelist) {
+ for (i = 0; zq_uelist[i]; i++) {
+ zq = zq_uelist[i];
+ ap_send_online_uevent(&zq->queue->ap_dev, online);
+ zcrypt_queue_put(zq);
+ }
+ kfree(zq_uelist);
+ }
+
return count;
}
@@ -84,9 +116,9 @@ static ssize_t load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_card *zc = to_ap_card(dev)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zc->load));
+ return sysfs_emit(buf, "%d\n", atomic_read(&zc->load));
}
static DEVICE_ATTR_RO(load);
@@ -106,7 +138,7 @@ struct zcrypt_card *zcrypt_card_alloc(void)
{
struct zcrypt_card *zc;
- zc = kzalloc(sizeof(struct zcrypt_card), GFP_KERNEL);
+ zc = kzalloc(sizeof(*zc), GFP_KERNEL);
if (!zc)
return NULL;
INIT_LIST_HEAD(&zc->list);
@@ -151,18 +183,22 @@ int zcrypt_card_register(struct zcrypt_card *zc)
{
int rc;
- rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
- &zcrypt_card_attr_group);
- if (rc)
- return rc;
-
spin_lock(&zcrypt_list_lock);
list_add_tail(&zc->list, &zcrypt_card_list);
spin_unlock(&zcrypt_list_lock);
zc->online = 1;
- ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id);
+ ZCRYPT_DBF_INFO("%s card=%02x register online=1\n",
+ __func__, zc->card->id);
+
+ rc = sysfs_create_group(&zc->card->ap_dev.device.kobj,
+ &zcrypt_card_attr_group);
+ if (rc) {
+ spin_lock(&zcrypt_list_lock);
+ list_del_init(&zc->list);
+ spin_unlock(&zcrypt_list_lock);
+ }
return rc;
}
@@ -176,12 +212,14 @@ EXPORT_SYMBOL(zcrypt_card_register);
*/
void zcrypt_card_unregister(struct zcrypt_card *zc)
{
- ZCRYPT_DBF(DBF_INFO, "card=%02x unregister\n", zc->card->id);
+ ZCRYPT_DBF_INFO("%s card=%02x unregister\n",
+ __func__, zc->card->id);
spin_lock(&zcrypt_list_lock);
list_del_init(&zc->list);
spin_unlock(&zcrypt_list_lock);
sysfs_remove_group(&zc->card->ap_dev.device.kobj,
&zcrypt_card_attr_group);
+ zcrypt_card_put(zc);
}
EXPORT_SYMBOL(zcrypt_card_unregister);
diff --git a/drivers/s390/crypto/zcrypt_cca_key.h b/drivers/s390/crypto/zcrypt_cca_key.h
index f09bb850763b..f5907b67db29 100644
--- a/drivers/s390/crypto/zcrypt_cca_key.h
+++ b/drivers/s390/crypto/zcrypt_cca_key.h
@@ -11,7 +11,7 @@
#ifndef _ZCRYPT_CCA_KEY_H_
#define _ZCRYPT_CCA_KEY_H_
-struct T6_keyBlock_hdr {
+struct t6_keyblock_hdr {
unsigned short blen;
unsigned short ulen;
unsigned short flags;
@@ -63,7 +63,7 @@ struct cca_public_sec {
* complement of the residue modulo 8 of the sum of
* (p_len + q_len + dp_len + dq_len + u_len).
*/
-struct cca_pvt_ext_CRT_sec {
+struct cca_pvt_ext_crt_sec {
unsigned char section_identifier;
unsigned char version;
unsigned short section_length;
@@ -89,10 +89,7 @@ struct cca_pvt_ext_CRT_sec {
#define CCA_PVT_EXT_CRT_SEC_FMT_CL 0x40
/**
- * Set up private key fields of a type6 MEX message. The _pad variant
- * strips leading zeroes from the b_key.
- * Note that all numerics in the key token are big-endian,
- * while the entries in the key block header are little-endian.
+ * Set up private key fields of a type6 MEX message.
*
* @mex: pointer to user input data
* @p: pointer to memory area for the key
@@ -108,13 +105,12 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
.section_identifier = 0x04,
};
struct {
- struct T6_keyBlock_hdr t6_hdr;
- struct cca_token_hdr pubHdr;
- struct cca_public_sec pubSec;
- char exponent[0];
+ struct t6_keyblock_hdr t6_hdr;
+ struct cca_token_hdr pubhdr;
+ struct cca_public_sec pubsec;
+ char exponent[];
} __packed *key = p;
- unsigned char *temp;
- int i;
+ unsigned char *ptr;
/*
* The inputdatalength was a selection criteria in the dispatching
@@ -127,41 +123,33 @@ static inline int zcrypt_type6_mex_key_en(struct ica_rsa_modexpo *mex, void *p)
memset(key, 0, sizeof(*key));
- key->pubHdr = static_pub_hdr;
- key->pubSec = static_pub_sec;
+ key->pubhdr = static_pub_hdr;
+ key->pubsec = static_pub_sec;
/* key parameter block */
- temp = key->exponent;
- if (copy_from_user(temp, mex->b_key, mex->inputdatalength))
+ ptr = key->exponent;
+ if (copy_from_user(ptr, mex->b_key, mex->inputdatalength))
return -EFAULT;
- /* Strip leading zeroes from b_key. */
- for (i = 0; i < mex->inputdatalength; i++)
- if (temp[i])
- break;
- if (i >= mex->inputdatalength)
- return -EINVAL;
- memmove(temp, temp + i, mex->inputdatalength - i);
- temp += mex->inputdatalength - i;
+ ptr += mex->inputdatalength;
/* modulus */
- if (copy_from_user(temp, mex->n_modulus, mex->inputdatalength))
+ if (copy_from_user(ptr, mex->n_modulus, mex->inputdatalength))
return -EFAULT;
- key->pubSec.modulus_bit_len = 8 * mex->inputdatalength;
- key->pubSec.modulus_byte_len = mex->inputdatalength;
- key->pubSec.exponent_len = mex->inputdatalength - i;
- key->pubSec.section_length = sizeof(key->pubSec) +
- 2*mex->inputdatalength - i;
- key->pubHdr.token_length =
- key->pubSec.section_length + sizeof(key->pubHdr);
- key->t6_hdr.ulen = key->pubHdr.token_length + 4;
- key->t6_hdr.blen = key->pubHdr.token_length + 6;
- return sizeof(*key) + 2*mex->inputdatalength - i;
+ key->pubsec.modulus_bit_len = 8 * mex->inputdatalength;
+ key->pubsec.modulus_byte_len = mex->inputdatalength;
+ key->pubsec.exponent_len = mex->inputdatalength;
+ key->pubsec.section_length = sizeof(key->pubsec) +
+ 2 * mex->inputdatalength;
+ key->pubhdr.token_length =
+ key->pubsec.section_length + sizeof(key->pubhdr);
+ key->t6_hdr.ulen = key->pubhdr.token_length + 4;
+ key->t6_hdr.blen = key->pubhdr.token_length + 6;
+
+ return sizeof(*key) + 2 * mex->inputdatalength;
}
/**
* Set up private key fields of a type6 CRT message.
- * Note that all numerics in the key token are big-endian,
- * while the entries in the key block header are little-endian.
*
* @mex: pointer to user input data
* @p: pointer to memory area for the key
@@ -177,10 +165,10 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
};
static char pk_exponent[3] = { 0x01, 0x00, 0x01 };
struct {
- struct T6_keyBlock_hdr t6_hdr;
+ struct t6_keyblock_hdr t6_hdr;
struct cca_token_hdr token;
- struct cca_pvt_ext_CRT_sec pvt;
- char key_parts[0];
+ struct cca_pvt_ext_crt_sec pvt;
+ char key_parts[];
} __packed *key = p;
struct cca_public_sec *pub;
int short_len, long_len, pad_len, key_len, size;
@@ -198,8 +186,8 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
short_len = (crt->inputdatalength + 1) / 2;
long_len = short_len + 8;
- pad_len = -(3*long_len + 2*short_len) & 7;
- key_len = 3*long_len + 2*short_len + pad_len + crt->inputdatalength;
+ pad_len = -(3 * long_len + 2 * short_len) & 7;
+ key_len = 3 * long_len + 2 * short_len + pad_len + crt->inputdatalength;
size = sizeof(*key) + key_len + sizeof(*pub) + 3;
/* parameter block.key block */
@@ -223,15 +211,15 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
/* key parts */
if (copy_from_user(key->key_parts, crt->np_prime, long_len) ||
copy_from_user(key->key_parts + long_len,
- crt->nq_prime, short_len) ||
+ crt->nq_prime, short_len) ||
copy_from_user(key->key_parts + long_len + short_len,
- crt->bp_key, long_len) ||
- copy_from_user(key->key_parts + 2*long_len + short_len,
- crt->bq_key, short_len) ||
- copy_from_user(key->key_parts + 2*long_len + 2*short_len,
- crt->u_mult_inv, long_len))
+ crt->bp_key, long_len) ||
+ copy_from_user(key->key_parts + 2 * long_len + short_len,
+ crt->bq_key, short_len) ||
+ copy_from_user(key->key_parts + 2 * long_len + 2 * short_len,
+ crt->u_mult_inv, long_len))
return -EFAULT;
- memset(key->key_parts + 3*long_len + 2*short_len + pad_len,
+ memset(key->key_parts + 3 * long_len + 2 * short_len + pad_len,
0xff, crt->inputdatalength);
pub = (struct cca_public_sec *)(key->key_parts + key_len);
*pub = static_cca_pub_sec;
@@ -241,7 +229,8 @@ static inline int zcrypt_type6_crt_key(struct ica_rsa_modexpo_crt *crt, void *p)
* section. So, an arbitrary public exponent of 0x010001 will be
* used.
*/
- memcpy((char *) (pub + 1), pk_exponent, 3);
+ memcpy((char *)(pub + 1), pk_exponent, 3);
+
return size;
}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
new file mode 100644
index 000000000000..573bad1d6d86
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -0,0 +1,1813 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#define pr_fmt(fmt) "zcrypt: " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ccamisc.h"
+
+/* Size of parameter block used for all cca requests/replies */
+#define PARMBSIZE 512
+
+/* Size of vardata block used for some of the cca requests/replies */
+#define VARDATASIZE 4096
+
+/*
+ * Cprb memory pool held for urgent cases where no memory
+ * can be allocated via kmalloc. This pool is only used
+ * when alloc_and_prep_cprbmem() is called with the xflag
+ * ZCRYPT_XFLAG_NOMEMALLOC. The cprb memory needs to hold
+ * space for request AND reply!
+ */
+#define CPRB_MEMPOOL_ITEM_SIZE (16 * 1024)
+static mempool_t *cprb_mempool;
+
+/*
+ * This is a pre-allocated memory for the device status array
+ * used within the findcard() functions. It is currently
+ * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is
+ * controlled via dev_status_mem_mutex. Needs adaption if more
+ * than 128 cards or domains to be are supported.
+ */
+#define ZCRYPT_DEV_STATUS_CARD_MAX 128
+#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128
+#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \
+ ZCRYPT_DEV_STATUS_QUEUE_MAX)
+#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \
+ sizeof(struct zcrypt_device_status_ext))
+static void *dev_status_mem;
+static DEFINE_MUTEX(dev_status_mem_mutex);
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize)
+{
+ struct secaeskeytoken *t = (struct secaeskeytoken *)token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int)t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_AES) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int)t->version, TOKVER_CCA_AES);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && t->bitsize != keybitsize) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d != %d\n",
+ __func__, (int)t->bitsize, keybitsize);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaeskeytoken);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport)
+{
+ struct cipherkeytoken *t = (struct cipherkeytoken *)token;
+ bool keybitsizeok = true;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int)t->type, TOKTYPE_CCA_INTERNAL);
+ return -EINVAL;
+ }
+ if (t->version != TOKVER_CCA_VLSC) {
+ if (dbg)
+ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int)t->version, TOKVER_CCA_VLSC);
+ return -EINVAL;
+ }
+ if (t->algtype != 0x02) {
+ if (dbg)
+ DBF("%s token check failed, algtype 0x%02x != 0x02\n",
+ __func__, (int)t->algtype);
+ return -EINVAL;
+ }
+ if (t->keytype != 0x0001) {
+ if (dbg)
+ DBF("%s token check failed, keytype 0x%04x != 0x0001\n",
+ __func__, (int)t->keytype);
+ return -EINVAL;
+ }
+ if (t->plfver != 0x00 && t->plfver != 0x01) {
+ if (dbg)
+ DBF("%s token check failed, unknown plfver 0x%02x\n",
+ __func__, (int)t->plfver);
+ return -EINVAL;
+ }
+ if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
+ if (dbg)
+ DBF("%s token check failed, unknown wpllen %d\n",
+ __func__, (int)t->wpllen);
+ return -EINVAL;
+ }
+ if (keybitsize > 0) {
+ switch (keybitsize) {
+ case 128:
+ if (t->wpllen != (t->plfver ? 640 : 512))
+ keybitsizeok = false;
+ break;
+ case 192:
+ if (t->wpllen != (t->plfver ? 640 : 576))
+ keybitsizeok = false;
+ break;
+ case 256:
+ if (t->wpllen != 640)
+ keybitsizeok = false;
+ break;
+ default:
+ keybitsizeok = false;
+ break;
+ }
+ if (!keybitsizeok) {
+ if (dbg)
+ DBF("%s token check failed, bitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ }
+ if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) {
+ if (dbg)
+ DBF("%s token check failed, XPRT_CPAC bit is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_secaescipherkey);
+
+/*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, u32 keysize,
+ int checkcpacfexport)
+{
+ struct eccprivkeytoken *t = (struct eccprivkeytoken *)token;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (t->type != TOKTYPE_CCA_INTERNAL_PKA) {
+ if (dbg)
+ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int)t->type, TOKTYPE_CCA_INTERNAL_PKA);
+ return -EINVAL;
+ }
+ if (t->len > keysize) {
+ if (dbg)
+ DBF("%s token check failed, len %d > keysize %u\n",
+ __func__, (int)t->len, keysize);
+ return -EINVAL;
+ }
+ if (t->secid != 0x20) {
+ if (dbg)
+ DBF("%s token check failed, secid 0x%02x != 0x20\n",
+ __func__, (int)t->secid);
+ return -EINVAL;
+ }
+ if (checkcpacfexport && !(t->kutc & 0x01)) {
+ if (dbg)
+ DBF("%s token check failed, XPRTCPAC bit is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(cca_check_sececckeytoken);
+
+/*
+ * Allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block and fill in values
+ * for the common fields. Returns 0 on success or errno value
+ * on failure.
+ */
+static int alloc_and_prep_cprbmem(size_t paramblen,
+ u8 **p_cprb_mem,
+ struct CPRBX **p_req_cprb,
+ struct CPRBX **p_rep_cprb,
+ u32 xflags)
+{
+ u8 *cprbmem = NULL;
+ size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen;
+ size_t len = 2 * cprbplusparamblen;
+ struct CPRBX *preqcblk, *prepcblk;
+
+ /*
+ * allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block
+ */
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) {
+ if (len <= CPRB_MEMPOOL_ITEM_SIZE)
+ cprbmem = mempool_alloc_preallocated(cprb_mempool);
+ } else {
+ cprbmem = kmalloc(len, GFP_KERNEL);
+ }
+ if (!cprbmem)
+ return -ENOMEM;
+ memset(cprbmem, 0, len);
+
+ preqcblk = (struct CPRBX *)cprbmem;
+ prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen);
+
+ /* fill request cprb struct */
+ preqcblk->cprb_len = sizeof(struct CPRBX);
+ preqcblk->cprb_ver_id = 0x02;
+ memcpy(preqcblk->func_id, "T2", 2);
+ preqcblk->rpl_msgbl = cprbplusparamblen;
+ if (paramblen) {
+ preqcblk->req_parmb =
+ ((u8 __user *)preqcblk) + sizeof(struct CPRBX);
+ preqcblk->rpl_parmb =
+ ((u8 __user *)prepcblk) + sizeof(struct CPRBX);
+ }
+
+ *p_cprb_mem = cprbmem;
+ *p_req_cprb = preqcblk;
+ *p_rep_cprb = prepcblk;
+
+ return 0;
+}
+
+/*
+ * Free the cprb memory allocated with the function above.
+ * If the scrub value is not zero, the memory is filled
+ * with zeros before freeing (useful if there was some
+ * clear key material in there).
+ */
+static void free_cprbmem(void *mem, size_t paramblen, bool scrub, u32 xflags)
+{
+ if (mem && scrub)
+ memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen));
+
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC)
+ mempool_free(mem, cprb_mempool);
+ else
+ kfree(mem);
+}
+
+/*
+ * Helper function to prepare the xcrb struct
+ */
+static inline void prep_xcrb(struct ica_xcRB *pxcrb,
+ u16 cardnr,
+ struct CPRBX *preqcblk,
+ struct CPRBX *prepcblk)
+{
+ memset(pxcrb, 0, sizeof(*pxcrb));
+ pxcrb->agent_ID = 0x4341; /* 'CA' */
+ pxcrb->user_defined = (cardnr == 0xFFFF ? AUTOSELECT : cardnr);
+ pxcrb->request_control_blk_length =
+ preqcblk->cprb_len + preqcblk->req_parml;
+ pxcrb->request_control_blk_addr = (void __user *)preqcblk;
+ pxcrb->reply_control_blk_length = preqcblk->rpl_msgbl;
+ pxcrb->reply_control_blk_addr = (void __user *)prepcblk;
+}
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain,
+ u32 keybitsize, u8 *seckey, u32 xflags)
+{
+ int i, rc, keysize;
+ int seckeysize;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct kgreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ char key_form[8];
+ char key_length[8];
+ char key_type1[8];
+ char key_type2[8];
+ } lv1;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid[6];
+ } lv2;
+ } __packed * preqparm;
+ struct kgrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with KG request */
+ preqparm = (struct kgreqparm __force *)preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "KG", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ memcpy(preqparm->lv1.key_form, "OP ", 8);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
+ break;
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(preqparm->lv1.key_type1, "AESDATA ", 8);
+ preqparm->lv2.len = sizeof(struct lv2);
+ for (i = 0; i < 6; i++) {
+ preqparm->lv2.keyid[i].len = sizeof(struct keyid);
+ preqparm->lv2.keyid[i].attr = (i == 2 ? 0x30 : 0x10);
+ }
+ preqcblk->req_parml = sizeof(struct kgreqparm);
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s secure key generate failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct kgrepparm *)ptr;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8 * keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, false, xflags);
+ return rc;
+}
+EXPORT_SYMBOL(cca_genseckey);
+
+/*
+ * Generate an CCA AES DATA secure key with given key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 *seckey, u32 xflags)
+{
+ int rc, keysize, seckeysize;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct cmreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 clrkey[];
+ } lv1;
+ /* followed by struct lv2 */
+ } __packed * preqparm;
+ struct lv2 {
+ u16 len;
+ struct keyid {
+ u16 len;
+ u16 attr;
+ u8 data[SECKEYBLOBSIZE];
+ } keyid;
+ } __packed * plv2;
+ struct cmrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 keyblocklen;
+ struct {
+ u16 toklen;
+ u16 tokattr;
+ u8 tok[];
+ /* ... some more data ... */
+ } keyblock;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with CM request */
+ preqparm = (struct cmreqparm __force *)preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "CM", 2);
+ memcpy(preqparm->rule_array, "AES ", 8);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ switch (keybitsize) {
+ case PKEY_SIZE_AES_128:
+ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ break;
+ case PKEY_SIZE_AES_192:
+ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ break;
+ case PKEY_SIZE_AES_256:
+ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ break;
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->lv1.len = sizeof(struct lv1) + keysize;
+ memcpy(preqparm->lv1.clrkey, clrkey, keysize);
+ plv2 = (struct lv2 *)(((u8 *)preqparm) + sizeof(*preqparm) + keysize);
+ plv2->len = sizeof(struct lv2);
+ plv2->keyid.len = sizeof(struct keyid);
+ plv2->keyid.attr = 0x30;
+ preqcblk->req_parml = sizeof(*preqparm) + keysize + sizeof(*plv2);
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s clear key import failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct cmrepparm *)ptr;
+
+ /* check length of the returned secure key token */
+ seckeysize = prepparm->lv3.keyblock.toklen
+ - sizeof(prepparm->lv3.keyblock.toklen)
+ - sizeof(prepparm->lv3.keyblock.tokattr);
+ if (seckeysize != SECKEYBLOBSIZE) {
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check secure key token */
+ rc = cca_check_secaeskeytoken(zcrypt_dbf_info, DBF_ERR,
+ prepparm->lv3.keyblock.tok, 8 * keysize);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated secure key token */
+ if (seckey)
+ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+out:
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2seckey);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 *seckey, u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype, u32 xflags)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct uskreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv1 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ } lv1;
+ struct lv2 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ u8 token[]; /* cca secure key token */
+ } lv2;
+ } __packed * preqparm;
+ struct uskrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct lv3 {
+ u16 len;
+ u16 attr_len;
+ u16 attr_flags;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 len;
+ u8 key[64]; /* the key (len bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } lv3;
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with USK request */
+ preqparm = (struct uskreqparm __force *)preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "US", 2);
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ preqparm->lv1.attr_len = sizeof(struct lv1) - sizeof(preqparm->lv1.len);
+ preqparm->lv1.attr_flags = 0x0001;
+ preqparm->lv2.len = sizeof(struct lv2) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_len = sizeof(struct lv2)
+ - sizeof(preqparm->lv2.len) + SECKEYBLOBSIZE;
+ preqparm->lv2.attr_flags = 0x0000;
+ memcpy(preqparm->lv2.token, seckey, SECKEYBLOBSIZE);
+ preqcblk->req_parml = sizeof(struct uskreqparm) + SECKEYBLOBSIZE;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
+ rc = -EBUSY;
+ else
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct uskrepparm *)ptr;
+
+ /* check the returned keyblock */
+ if (prepparm->lv3.ckb.version != 0x01 &&
+ prepparm->lv3.ckb.version != 0x02) {
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->lv3.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ switch (prepparm->lv3.ckb.len) {
+ case 16 + 32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24 + 32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32 + 32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.ckb.len);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->lv3.ckb.key, prepparm->lv3.ckb.len);
+ if (protkeylen)
+ *protkeylen = prepparm->lv3.ckb.len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
+ return rc;
+}
+EXPORT_SYMBOL(cca_sec2protkey);
+
+/*
+ * AES cipher key skeleton created with CSNBKTB2 with these flags:
+ * INTERNAL, NO-KEY, AES, CIPHER, ANY-MODE, NOEX-SYM, NOEXAASY,
+ * NOEXUASY, XPRTCPAC, NOEX-RAW, NOEX-DES, NOEX-AES, NOEX-RSA
+ * used by cca_gencipherkey() and cca_clr2cipherkey().
+ */
+static const u8 aes_cipher_key_skeleton[] = {
+ 0x01, 0x00, 0x00, 0x38, 0x05, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x02, 0x00, 0x01, 0x02, 0xc0, 0x00, 0xff,
+ 0x00, 0x03, 0x08, 0xc8, 0x00, 0x00, 0x00, 0x00 };
+#define SIZEOF_SKELETON (sizeof(aes_cipher_key_skeleton))
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, u32 *keybufsize, u32 xflags)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct gkreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[2 * 8];
+ struct {
+ u16 len;
+ u8 key_type_1[8];
+ u8 key_type_2[8];
+ u16 clear_key_bit_len;
+ u16 key_name_1_len;
+ u16 key_name_2_len;
+ u16 user_data_1_len;
+ u16 user_data_2_len;
+ /* u8 key_name_1[]; */
+ /* u8 key_name_2[]; */
+ /* u8 user_data_1[]; */
+ /* u8 user_data_2[]; */
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ /* u8 kek_id_1[]; */
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag;
+ /* u8 kek_id_2[]; */
+ } tlv2;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key_id_1[SIZEOF_SKELETON];
+ } tlv3;
+ struct {
+ u16 len;
+ u16 flag;
+ /* u8 gen_key_id_1_label[]; */
+ } tlv4;
+ struct {
+ u16 len;
+ u16 flag;
+ /* u8 gen_key_id_2[]; */
+ } tlv5;
+ struct {
+ u16 len;
+ u16 flag;
+ /* u8 gen_key_id_2_label[]; */
+ } tlv6;
+ } kb;
+ } __packed * preqparm;
+ struct gkrepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag;
+ u8 gen_key[]; /* 120-136 bytes */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = sizeof(struct gkreqparm);
+
+ /* prepare request param block with GK request */
+ preqparm = (struct gkreqparm __force *)preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "GK", 2);
+ preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preqparm->rule_array, "AES OP ", 2 * 8);
+
+ /* prepare vud block */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+ preqparm->vud.clear_key_bit_len = keybitsize;
+ memcpy(preqparm->vud.key_type_1, "TOKEN ", 8);
+ memset(preqparm->vud.key_type_2, ' ', sizeof(preqparm->vud.key_type_2));
+
+ /* prepare kb block */
+ preqparm->kb.len = sizeof(preqparm->kb);
+ preqparm->kb.tlv1.len = sizeof(preqparm->kb.tlv1);
+ preqparm->kb.tlv1.flag = 0x0030;
+ preqparm->kb.tlv2.len = sizeof(preqparm->kb.tlv2);
+ preqparm->kb.tlv2.flag = 0x0030;
+ preqparm->kb.tlv3.len = sizeof(preqparm->kb.tlv3);
+ preqparm->kb.tlv3.flag = 0x0030;
+ memcpy(preqparm->kb.tlv3.gen_key_id_1,
+ aes_cipher_key_skeleton, SIZEOF_SKELETON);
+ preqparm->kb.tlv4.len = sizeof(preqparm->kb.tlv4);
+ preqparm->kb.tlv4.flag = 0x0030;
+ preqparm->kb.tlv5.len = sizeof(preqparm->kb.tlv5);
+ preqparm->kb.tlv5.flag = 0x0030;
+ preqparm->kb.tlv6.len = sizeof(preqparm->kb.tlv6);
+ preqparm->kb.tlv6.flag = 0x0030;
+
+ /* patch the skeleton key token export flags inside the kb block */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *)preqparm->kb.tlv3.gen_key_id_1;
+ t->kmf1 |= (u16)(keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16)~(keygenflags & 0x000000FF);
+ }
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s cipher key generate failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct gkrepparm *)ptr;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* and some checks on the generated key */
+ rc = cca_check_secaescipherkey(zcrypt_dbf_info, DBF_ERR,
+ prepparm->kb.tlv1.gen_key,
+ keybitsize, 1);
+ if (rc) {
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the generated vlsc key token */
+ t = (struct cipherkeytoken *)prepparm->kb.tlv1.gen_key;
+ if (keybuf) {
+ if (*keybufsize >= t->len)
+ memcpy(keybuf, t, t->len);
+ else
+ rc = -EINVAL;
+ }
+ *keybufsize = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, false, xflags);
+ return rc;
+}
+EXPORT_SYMBOL(cca_gencipherkey);
+
+/*
+ * Helper function, does a the CSNBKPI2 CPRB.
+ */
+static int _ip_cprb_helper(u16 cardnr, u16 domain,
+ const char *rule_array_1,
+ const char *rule_array_2,
+ const char *rule_array_3,
+ const u8 *clr_key_value,
+ int clr_key_bit_size,
+ u8 *key_token,
+ int *key_token_size,
+ u32 xflags)
+{
+ int rc, n;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct rule_array_block {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[];
+ } __packed * preq_ra_block;
+ struct vud_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0064 */
+ u16 clr_key_bit_len;
+ } tlv1;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0063 */
+ u8 clr_key[]; /* clear key value bytes */
+ } tlv2;
+ } __packed * preq_vud_block;
+ struct key_block {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[]; /* key skeleton */
+ } tlv1;
+ } __packed * preq_key_block;
+ struct iprepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ } vud;
+ struct {
+ u16 len;
+ struct {
+ u16 len;
+ u16 flag; /* 0x0030 */
+ u8 key_token[]; /* key token */
+ } tlv1;
+ } kb;
+ } __packed * prepparm;
+ struct cipherkeytoken *t;
+ int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+ preqcblk->req_parml = 0;
+
+ /* prepare request param block with IP request */
+ preq_ra_block = (struct rule_array_block __force *)preqcblk->req_parmb;
+ memcpy(preq_ra_block->subfunc_code, "IP", 2);
+ preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
+ memcpy(preq_ra_block->rule_array, rule_array_1, 8);
+ memcpy(preq_ra_block->rule_array + 8, rule_array_2, 8);
+ preqcblk->req_parml = sizeof(struct rule_array_block) + 2 * 8;
+ if (rule_array_3) {
+ preq_ra_block->rule_array_len += 8;
+ memcpy(preq_ra_block->rule_array + 16, rule_array_3, 8);
+ preqcblk->req_parml += 8;
+ }
+
+ /* prepare vud block */
+ preq_vud_block = (struct vud_block __force *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = complete ? 0 : (clr_key_bit_size + 7) / 8;
+ preq_vud_block->len = sizeof(struct vud_block) + n;
+ preq_vud_block->tlv1.len = sizeof(preq_vud_block->tlv1);
+ preq_vud_block->tlv1.flag = 0x0064;
+ preq_vud_block->tlv1.clr_key_bit_len = complete ? 0 : clr_key_bit_size;
+ preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n;
+ preq_vud_block->tlv2.flag = 0x0063;
+ if (!complete)
+ memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n);
+ preqcblk->req_parml += preq_vud_block->len;
+
+ /* prepare key block */
+ preq_key_block = (struct key_block __force *)
+ (preqcblk->req_parmb + preqcblk->req_parml);
+ n = *key_token_size;
+ preq_key_block->len = sizeof(struct key_block) + n;
+ preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n;
+ preq_key_block->tlv1.flag = 0x0030;
+ memcpy(preq_key_block->tlv1.key_token, key_token, *key_token_size);
+ preqcblk->req_parml += preq_key_block->len;
+
+ /* prepare xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s CSNBKPI2 failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct iprepparm *)ptr;
+
+ /* do some plausibility checks on the key block */
+ if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
+ prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* do not check the key here, it may be incomplete */
+
+ /* copy the vlsc key token back */
+ t = (struct cipherkeytoken *)prepparm->kb.tlv1.key_token;
+ memcpy(key_token, t, t->len);
+ *key_token_size = t->len;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, false, xflags);
+ return rc;
+}
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 xflags)
+{
+ int rc;
+ void *mem;
+ int tokensize;
+ u8 *token, exorbuf[32];
+ struct cipherkeytoken *t;
+
+ /* fill exorbuf with random data */
+ get_random_bytes(exorbuf, sizeof(exorbuf));
+
+ /*
+ * Allocate space for the key token to build.
+ * Also we only need up to MAXCCAVLSCTOKENSIZE bytes for this
+ * we use the already existing cprb mempool to solve this
+ * short term memory requirement.
+ */
+ mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ /* prepare the token with the key skeleton */
+ token = (u8 *)mem;
+ tokensize = SIZEOF_SKELETON;
+ memcpy(token, aes_cipher_key_skeleton, tokensize);
+
+ /* patch the skeleton key token export flags */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *)token;
+ t->kmf1 |= (u16)(keygenflags & 0x0000FF00);
+ t->kmf1 &= (u16)~(keygenflags & 0x000000FF);
+ }
+
+ /*
+ * Do the key import with the clear key value in 4 steps:
+ * 1/4 FIRST import with only random data
+ * 2/4 EXOR the clear key
+ * 3/4 EXOR the very same random data again
+ * 4/4 COMPLETE the secure cipher key import
+ */
+ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
+ exorbuf, keybitsize, token, &tokensize, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ clrkey, keybitsize, token, &tokensize, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
+ exorbuf, keybitsize, token, &tokensize, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
+ NULL, keybitsize, token, &tokensize, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* copy the generated key token */
+ if (keybuf) {
+ if (tokensize > *keybufsize)
+ rc = -EINVAL;
+ else
+ memcpy(keybuf, token, tokensize);
+ }
+ *keybufsize = tokensize;
+
+out:
+ mempool_free(mem, cprb_mempool);
+ return rc;
+}
+EXPORT_SYMBOL(cca_clr2cipherkey);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct aureqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ u8 rule_array[8];
+ struct {
+ u16 len;
+ u16 tk_blob_len;
+ u16 tk_blob_tag;
+ u8 tk_blob[66];
+ } vud;
+ struct {
+ u16 len;
+ u16 cca_key_token_len;
+ u16 cca_key_token_flags;
+ u8 cca_key_token[]; /* 64 or more */
+ } kb;
+ } __packed * preqparm;
+ struct aurepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ u16 sublen;
+ u16 tag;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[64]; /* the key (keylen bytes) */
+ u16 keyattrlen;
+ u8 keyattr[32];
+ u8 pad2[1];
+ u8 vptype;
+ u8 vp[32]; /* verification pattern */
+ } ckb;
+ } vud;
+ struct {
+ u16 len;
+ } kb;
+ } __packed * prepparm;
+ int keytoklen = ((struct cipherkeytoken *)ckey)->len;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with AU request */
+ preqparm = (struct aureqparm __force *)preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "AU", 2);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len)
+ + sizeof(preqparm->rule_array);
+ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+ /* vud, tk blob */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ + 2 * sizeof(uint16_t);
+ preqparm->vud.tk_blob_tag = 0x00C2;
+ /* kb, cca token */
+ preqparm->kb.len = keytoklen + 3 * sizeof(uint16_t);
+ preqparm->kb.cca_key_token_len = keytoklen + 2 * sizeof(uint16_t);
+ memcpy(preqparm->kb.cca_key_token, ckey, keytoklen);
+ /* now fill length of param block into cprb */
+ preqcblk->req_parml = sizeof(struct aureqparm) + keytoklen;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
+ rc = -EBUSY;
+ else
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct aurepparm *)ptr;
+
+ /* check the returned keyblock */
+ if (prepparm->vud.ckb.version != 0x01 &&
+ prepparm->vud.ckb.version != 0x02) {
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->vud.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepparm->vud.ckb.algo != 0x02) {
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.algo);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ switch (prepparm->vud.ckb.keylen) {
+ case 16 + 32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24 + 32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32 + 32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->vud.ckb.keylen);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+ if (protkeylen)
+ *protkeylen = prepparm->vud.ckb.keylen;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
+ return rc;
+}
+EXPORT_SYMBOL(cca_cipher2protkey);
+
+/*
+ * Derive protected key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags)
+{
+ int rc;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct aureqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ u8 rule_array[8];
+ struct {
+ u16 len;
+ u16 tk_blob_len;
+ u16 tk_blob_tag;
+ u8 tk_blob[66];
+ } vud;
+ struct {
+ u16 len;
+ u16 cca_key_token_len;
+ u16 cca_key_token_flags;
+ u8 cca_key_token[];
+ } kb;
+ } __packed * preqparm;
+ struct aurepparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ struct {
+ u16 len;
+ u16 sublen;
+ u16 tag;
+ struct cpacfkeyblock {
+ u8 version; /* version of this struct */
+ u8 flags[2];
+ u8 algo;
+ u8 form;
+ u8 pad1[3];
+ u16 keylen;
+ u8 key[]; /* the key (keylen bytes) */
+ /* u16 keyattrlen; */
+ /* u8 keyattr[32]; */
+ /* u8 pad2[1]; */
+ /* u8 vptype; */
+ /* u8 vp[32]; verification pattern */
+ } ckb;
+ } vud;
+ /* followed by a key block */
+ } __packed * prepparm;
+ int keylen = ((struct eccprivkeytoken *)key)->len;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with AU request */
+ preqparm = (struct aureqparm __force *)preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "AU", 2);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len)
+ + sizeof(preqparm->rule_array);
+ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
+ /* vud, tk blob */
+ preqparm->vud.len = sizeof(preqparm->vud);
+ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
+ + 2 * sizeof(uint16_t);
+ preqparm->vud.tk_blob_tag = 0x00C2;
+ /* kb, cca token */
+ preqparm->kb.len = keylen + 3 * sizeof(uint16_t);
+ preqparm->kb.cca_key_token_len = keylen + 2 * sizeof(uint16_t);
+ memcpy(preqparm->kb.cca_key_token, key, keylen);
+ /* now fill length of param block into cprb */
+ preqcblk->req_parml = sizeof(struct aureqparm) + keylen;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
+ rc = -EBUSY;
+ else
+ rc = -EIO;
+ goto out;
+ }
+ if (prepcblk->ccp_rscode != 0) {
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct aurepparm *)ptr;
+
+ /* check the returned keyblock */
+ if (prepparm->vud.ckb.version != 0x02) {
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.version);
+ rc = -EIO;
+ goto out;
+ }
+ if (prepparm->vud.ckb.algo != 0x81) {
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+ __func__, (int)prepparm->vud.ckb.algo);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ if (prepparm->vud.ckb.keylen > *protkeylen) {
+ ZCRYPT_DBF_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+ __func__, prepparm->vud.ckb.keylen, *protkeylen);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
+ *protkeylen = prepparm->vud.ckb.keylen;
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_ECC;
+
+out:
+ free_cprbmem(mem, PARMBSIZE, true, xflags);
+ return rc;
+}
+EXPORT_SYMBOL(cca_ecc2protkey);
+
+/*
+ * query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen,
+ u32 xflags)
+{
+ int rc;
+ u16 len;
+ u8 *mem, *ptr;
+ struct CPRBX *preqcblk, *prepcblk;
+ struct ica_xcRB xcrb;
+ struct fqreqparm {
+ u8 subfunc_code[2];
+ u16 rule_array_len;
+ char rule_array[8];
+ struct lv1 {
+ u16 len;
+ u8 data[VARDATASIZE];
+ } lv1;
+ u16 dummylen;
+ } __packed * preqparm;
+ size_t parmbsize = sizeof(struct fqreqparm);
+ struct fqrepparm {
+ u8 subfunc_code[2];
+ u8 lvdata[];
+ } __packed * prepparm;
+
+ /* get already prepared memory for 2 cprbs with param block each */
+ rc = alloc_and_prep_cprbmem(parmbsize, &mem,
+ &preqcblk, &prepcblk, xflags);
+ if (rc)
+ return rc;
+
+ /* fill request cprb struct */
+ preqcblk->domain = domain;
+
+ /* fill request cprb param block with FQ request */
+ preqparm = (struct fqreqparm __force *)preqcblk->req_parmb;
+ memcpy(preqparm->subfunc_code, "FQ", 2);
+ memcpy(preqparm->rule_array, keyword, sizeof(preqparm->rule_array));
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+ preqparm->lv1.len = sizeof(preqparm->lv1);
+ preqparm->dummylen = sizeof(preqparm->dummylen);
+ preqcblk->req_parml = parmbsize;
+
+ /* fill xcrb struct */
+ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
+
+ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
+ rc = zcrypt_send_cprb(&xcrb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check response returncode and reasoncode */
+ if (prepcblk->ccp_rtcode != 0) {
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* process response cprb param block */
+ ptr = ((u8 *)prepcblk) + sizeof(struct CPRBX);
+ prepcblk->rpl_parmb = (u8 __user *)ptr;
+ prepparm = (struct fqrepparm *)ptr;
+ ptr = prepparm->lvdata;
+
+ /* check and possibly copy reply rule array */
+ len = *((u16 *)ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (rarray && rarraylen && *rarraylen > 0) {
+ *rarraylen = (len > *rarraylen ? *rarraylen : len);
+ memcpy(rarray, ptr, *rarraylen);
+ }
+ ptr += len;
+ }
+ /* check and possible copy reply var array */
+ len = *((u16 *)ptr);
+ if (len > sizeof(u16)) {
+ ptr += sizeof(u16);
+ len -= sizeof(u16);
+ if (varray && varraylen && *varraylen > 0) {
+ *varraylen = (len > *varraylen ? *varraylen : len);
+ memcpy(varray, ptr, *varraylen);
+ }
+ ptr += len;
+ }
+
+out:
+ free_cprbmem(mem, parmbsize, false, xflags);
+ return rc;
+}
+EXPORT_SYMBOL(cca_query_crypto_facility);
+
+/*
+ * Fetch cca_info values about a CCA queue via
+ * query_crypto_facility from adapter.
+ */
+int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags)
+{
+ void *mem;
+ int rc, found = 0;
+ size_t rlen, vlen;
+ u8 *rarray, *varray;
+ struct zcrypt_device_status_ext devstat;
+
+ memset(ci, 0, sizeof(*ci));
+
+ /* get first info from zcrypt device driver about this apqn */
+ rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
+ if (rc)
+ return rc;
+ ci->hwtype = devstat.hwtype;
+
+ /*
+ * Prep memory for rule array and var array use.
+ * Use the cprb mempool for this.
+ */
+ mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ rarray = (u8 *)mem;
+ varray = (u8 *)mem + PAGE_SIZE / 2;
+ rlen = vlen = PAGE_SIZE / 2;
+
+ /* QF for this card/domain */
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
+ rarray, &rlen, varray, &vlen, xflags);
+ if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) {
+ memcpy(ci->serial, rarray, 8);
+ ci->new_asym_mk_state = (char)rarray[4 * 8];
+ ci->cur_asym_mk_state = (char)rarray[5 * 8];
+ ci->old_asym_mk_state = (char)rarray[6 * 8];
+ if (ci->old_asym_mk_state == '2')
+ memcpy(ci->old_asym_mkvp, varray + 64, 16);
+ if (ci->cur_asym_mk_state == '2')
+ memcpy(ci->cur_asym_mkvp, varray + 84, 16);
+ if (ci->new_asym_mk_state == '3')
+ memcpy(ci->new_asym_mkvp, varray + 104, 16);
+ ci->new_aes_mk_state = (char)rarray[7 * 8];
+ ci->cur_aes_mk_state = (char)rarray[8 * 8];
+ ci->old_aes_mk_state = (char)rarray[9 * 8];
+ if (ci->old_aes_mk_state == '2')
+ memcpy(&ci->old_aes_mkvp, varray + 172, 8);
+ if (ci->cur_aes_mk_state == '2')
+ memcpy(&ci->cur_aes_mkvp, varray + 184, 8);
+ if (ci->new_aes_mk_state == '3')
+ memcpy(&ci->new_aes_mkvp, varray + 196, 8);
+ found++;
+ }
+ if (!found)
+ goto out;
+ rlen = vlen = PAGE_SIZE / 2;
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
+ rarray, &rlen, varray, &vlen, xflags);
+ if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) {
+ ci->new_apka_mk_state = (char)rarray[10 * 8];
+ ci->cur_apka_mk_state = (char)rarray[11 * 8];
+ ci->old_apka_mk_state = (char)rarray[12 * 8];
+ if (ci->old_apka_mk_state == '2')
+ memcpy(&ci->old_apka_mkvp, varray + 208, 8);
+ if (ci->cur_apka_mk_state == '2')
+ memcpy(&ci->cur_apka_mkvp, varray + 220, 8);
+ if (ci->new_apka_mk_state == '3')
+ memcpy(&ci->new_apka_mkvp, varray + 232, 8);
+ found++;
+ }
+
+out:
+ mempool_free(mem, cprb_mempool);
+ return found == 2 ? 0 : -ENOENT;
+}
+EXPORT_SYMBOL(cca_get_info);
+
+int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+ u32 xflags)
+{
+ struct zcrypt_device_status_ext *device_status;
+ int i, card, dom, curmatch, oldmatch;
+ struct cca_info ci;
+ u32 _nr_apqns = 0;
+
+ /* occupy the device status memory */
+ mutex_lock(&dev_status_mem_mutex);
+ memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE);
+ device_status = (struct zcrypt_device_status_ext *)dev_status_mem;
+
+ /* fetch crypto device status into this struct */
+ zcrypt_device_status_mask_ext(device_status,
+ ZCRYPT_DEV_STATUS_CARD_MAX,
+ ZCRYPT_DEV_STATUS_QUEUE_MAX);
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for cca functions */
+ if (!(device_status[i].functions & 0x04))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* get cca info on this apqn */
+ if (cca_get_info(card, dom, &ci, xflags))
+ continue;
+ /* current master key needs to be valid */
+ if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2')
+ continue;
+ if (mktype == APKA_MK_SET && ci.cur_apka_mk_state != '2')
+ continue;
+ /* check min hardware type */
+ if (minhwtype > 0 && minhwtype > ci.hwtype)
+ continue;
+ if (cur_mkvp || old_mkvp) {
+ /* check mkvps */
+ curmatch = oldmatch = 0;
+ if (mktype == AES_MK_SET) {
+ if (cur_mkvp && cur_mkvp == ci.cur_aes_mkvp)
+ curmatch = 1;
+ if (old_mkvp && ci.old_aes_mk_state == '2' &&
+ old_mkvp == ci.old_aes_mkvp)
+ oldmatch = 1;
+ } else {
+ if (cur_mkvp && cur_mkvp == ci.cur_apka_mkvp)
+ curmatch = 1;
+ if (old_mkvp && ci.old_apka_mk_state == '2' &&
+ old_mkvp == ci.old_apka_mkvp)
+ oldmatch = 1;
+ }
+ if (curmatch + oldmatch < 1)
+ continue;
+ }
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < *nr_apqns)
+ apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
+ }
+
+ *nr_apqns = _nr_apqns;
+
+ /* release the device status memory */
+ mutex_unlock(&dev_status_mem_mutex);
+
+ return _nr_apqns ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL(cca_findcard2);
+
+int __init zcrypt_ccamisc_init(void)
+{
+ /* Pre-allocate a small memory pool for cca cprbs. */
+ cprb_mempool = mempool_create_kmalloc_pool(zcrypt_mempool_threshold,
+ CPRB_MEMPOOL_ITEM_SIZE);
+ if (!cprb_mempool)
+ return -ENOMEM;
+
+ /* Pre-allocate one crypto status card struct used in findcard() */
+ dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL);
+ if (!dev_status_mem) {
+ mempool_destroy(cprb_mempool);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void zcrypt_ccamisc_exit(void)
+{
+ mutex_lock(&dev_status_mem_mutex);
+ kvfree(dev_status_mem);
+ mutex_unlock(&dev_status_mem_mutex);
+ mempool_destroy(cprb_mempool);
+}
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
new file mode 100644
index 000000000000..1ecc4e37e9ad
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ * Ingo Franzki <ifranzki@linux.ibm.com>
+ *
+ * Collection of CCA misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_CCAMISC_H_
+#define _ZCRYPT_CCAMISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+#include "zcrypt_api.h"
+
+/* Key token types */
+#define TOKTYPE_NON_CCA 0x00 /* Non-CCA key token */
+#define TOKTYPE_CCA_INTERNAL 0x01 /* CCA internal sym key token */
+#define TOKTYPE_CCA_INTERNAL_PKA 0x1f /* CCA internal asym key token */
+
+/* For TOKTYPE_NON_CCA: */
+#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY 0x02 /* Clear key token */
+
+/* For TOKTYPE_CCA_INTERNAL: */
+#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
+#define TOKVER_CCA_VLSC 0x05 /* var length sym cipher key token */
+
+/* Max size of a cca variable length cipher key token */
+#define MAXCCAVLSCTOKENSIZE 725
+
+/* header part of a CCA key token */
+struct keytoken_header {
+ u8 type; /* one of the TOKTYPE values */
+ u8 res0[1];
+ u16 len; /* vlsc token: total length in bytes */
+ u8 version; /* one of the TOKVER values */
+ u8 res1[3];
+} __packed;
+
+/* inside view of a CCA secure key token (only type 0x01 version 0x04) */
+struct secaeskeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[3];
+ u8 version; /* should be 0x04 */
+ u8 res1[1];
+ u8 flag; /* key flags */
+ u8 res2[1];
+ u64 mkvp; /* master key verification pattern */
+ u8 key[32]; /* key value (encrypted) */
+ u8 cv[8]; /* control vector */
+ u16 bitsize; /* key bit size */
+ u16 keysize; /* key byte size */
+ u8 tvv[4]; /* token validation value */
+} __packed;
+
+/* inside view of a variable length symmetric cipher AES key token */
+struct cipherkeytoken {
+ u8 type; /* 0x01 for internal key token */
+ u8 res0[1];
+ u16 len; /* total key token length in bytes */
+ u8 version; /* should be 0x05 */
+ u8 res1[3];
+ u8 kms; /* key material state, 0x03 means wrapped with MK */
+ u8 kvpt; /* key verification pattern type, should be 0x01 */
+ u64 mkvp0; /* master key verification pattern, lo part */
+ u64 mkvp1; /* master key verification pattern, hi part (unused) */
+ u8 eskwm; /* encrypted section key wrapping method */
+ u8 hashalg; /* hash algorithmus used for wrapping key */
+ u8 plfver; /* pay load format version */
+ u8 res2[1];
+ u8 adsver; /* associated data section version */
+ u8 res3[1];
+ u16 adslen; /* associated data section length */
+ u8 kllen; /* optional key label length */
+ u8 ieaslen; /* optional extended associated data length */
+ u8 uadlen; /* optional user definable associated data length */
+ u8 res4[1];
+ u16 wpllen; /* wrapped payload length in bits: */
+ /* plfver 0x00 0x01 */
+ /* AES-128 512 640 */
+ /* AES-192 576 640 */
+ /* AES-256 640 640 */
+ u8 res5[1];
+ u8 algtype; /* 0x02 for AES cipher */
+ u16 keytype; /* 0x0001 for 'cipher' */
+ u8 kufc; /* key usage field count */
+ u16 kuf1; /* key usage field 1 */
+ u16 kuf2; /* key usage field 2 */
+ u8 kmfc; /* key management field count */
+ u16 kmf1; /* key management field 1 */
+ u16 kmf2; /* key management field 2 */
+ u16 kmf3; /* key management field 3 */
+ u8 vdata[]; /* variable part data follows */
+} __packed;
+
+/* inside view of an CCA secure ECC private key */
+struct eccprivkeytoken {
+ u8 type; /* 0x1f for internal asym key token */
+ u8 version; /* should be 0x00 */
+ u16 len; /* total key token length in bytes */
+ u8 res1[4];
+ u8 secid; /* 0x20 for ECC priv key section marker */
+ u8 secver; /* section version */
+ u16 seclen; /* section length */
+ u8 wtype; /* wrapping method, 0x00 clear, 0x01 AES */
+ u8 htype; /* hash method, 0x02 for SHA-256 */
+ u8 res2[2];
+ u8 kutc; /* key usage and translation control */
+ u8 ctype; /* curve type */
+ u8 kfs; /* key format and security */
+ u8 ksrc; /* key source */
+ u16 pbitlen; /* length of prime p in bits */
+ u16 ibmadlen; /* IBM associated data length in bytes */
+ u64 mkvp; /* master key verification pattern */
+ u8 opk[48]; /* encrypted object protection key data */
+ u16 adatalen; /* associated data length in bytes */
+ u16 fseclen; /* formatted section length in bytes */
+ u8 more_data[]; /* more data follows */
+} __packed;
+
+/* Some defines for the CCA AES cipherkeytoken kmf1 field */
+#define KMF1_XPRT_SYM 0x8000
+#define KMF1_XPRT_UASY 0x4000
+#define KMF1_XPRT_AASY 0x2000
+#define KMF1_XPRT_RAW 0x1000
+#define KMF1_XPRT_CPAC 0x0800
+#define KMF1_XPRT_DES 0x0080
+#define KMF1_XPRT_AES 0x0040
+#define KMF1_XPRT_RSA 0x0008
+
+/*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize);
+
+/*
+ * Simple check if the token is a valid CCA secure AES cipher key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. If checkcpacfexport is enabled, the key is also
+ * checked for the export flag to allow CPACF export.
+ * Returns 0 on success or errno value on failure.
+ */
+int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize,
+ int checkcpacfexport);
+
+/*
+ * Simple check if the token is a valid CCA secure ECC private
+ * key token. Returns 0 on success or errno value on failure.
+ */
+int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, u32 keysize,
+ int checkcpacfexport);
+
+/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey,
+ u32 xflags);
+
+/*
+ * Generate CCA AES DATA secure key with given clear key value.
+ */
+int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 *seckey, u32 xflags);
+
+/*
+ * Derive proteced key from an CCA AES DATA secure key.
+ */
+int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 *seckey, u8 *protkey, u32 *protkeylen,
+ u32 *protkeytype, u32 xflags);
+
+/*
+ * Generate (random) CCA AES CIPHER secure key.
+ */
+int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, u32 *keybufsize, u32 xflags);
+
+/*
+ * Derive proteced key from CCA AES cipher secure key.
+ */
+int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
+
+/*
+ * Build CCA AES CIPHER secure key with a given clear key value.
+ */
+int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
+ u32 xflags);
+
+/*
+ * Derive proteced key from CCA ECC secure private key.
+ */
+int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags);
+
+/*
+ * Query cryptographic facility from CCA adapter
+ */
+int cca_query_crypto_facility(u16 cardnr, u16 domain,
+ const char *keyword,
+ u8 *rarray, size_t *rarraylen,
+ u8 *varray, size_t *varraylen,
+ u32 xflags);
+
+/*
+ * Build a list of cca apqns meeting the following constrains:
+ * - apqn is online and is in fact a CCA apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp
+ * - if old_mkvp != 0 only apqns where old_mkvp == mkvp
+ * The mktype determines which set of master keys to use:
+ * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set
+ * The caller should set *nr_apqns to the nr of elements available in *apqns.
+ * On return *nr_apqns is then updated with the nr of apqns filled into *apqns.
+ * The return value is either 0 for success or a negative errno value.
+ * If no apqn meeting the criteria is found, -ENODEV is returned.
+ */
+int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp,
+ u32 xflags);
+
+#define AES_MK_SET 0
+#define APKA_MK_SET 1
+
+/* struct to hold info for each CCA queue */
+struct cca_info {
+ int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
+ char new_aes_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_aes_mk_state; /* '1' invalid, '2' valid */
+ char old_aes_mk_state; /* '1' invalid, '2' valid */
+ char new_apka_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_apka_mk_state; /* '1' invalid, '2' valid */
+ char old_apka_mk_state; /* '1' invalid, '2' valid */
+ char new_asym_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_asym_mk_state; /* '1' invalid, '2' valid */
+ char old_asym_mk_state; /* '1' invalid, '2' valid */
+ u64 new_aes_mkvp; /* truncated sha256 of new aes master key */
+ u64 cur_aes_mkvp; /* truncated sha256 of current aes master key */
+ u64 old_aes_mkvp; /* truncated sha256 of old aes master key */
+ u64 new_apka_mkvp; /* truncated sha256 of new apka master key */
+ u64 cur_apka_mkvp; /* truncated sha256 of current apka mk */
+ u64 old_apka_mkvp; /* truncated sha256 of old apka mk */
+ u8 new_asym_mkvp[16]; /* verify pattern of new asym master key */
+ u8 cur_asym_mkvp[16]; /* verify pattern of current asym master key */
+ u8 old_asym_mkvp[16]; /* verify pattern of old asym master key */
+ char serial[9]; /* serial number (8 ascii numbers + 0x00) */
+};
+
+/*
+ * Fetch cca information about an CCA queue.
+ */
+int cca_get_info(u16 card, u16 dom, struct cca_info *ci, u32 xflags);
+
+int zcrypt_ccamisc_init(void);
+void zcrypt_ccamisc_exit(void);
+
+#endif /* _ZCRYPT_CCAMISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index c50f3e86cc74..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -1,236 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright IBM Corp. 2001, 2012
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Ralph Wuerthner <rwuerthn@de.ibm.com>
- * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/mod_devicetable.h>
-
-#include "ap_bus.h"
-#include "zcrypt_api.h"
-#include "zcrypt_error.h"
-#include "zcrypt_cex2a.h"
-#include "zcrypt_msgtype50.h"
-
-#define CEX2A_MIN_MOD_SIZE 1 /* 8 bits */
-#define CEX2A_MAX_MOD_SIZE 256 /* 2048 bits */
-#define CEX3A_MIN_MOD_SIZE CEX2A_MIN_MOD_SIZE
-#define CEX3A_MAX_MOD_SIZE 512 /* 4096 bits */
-
-#define CEX2A_MAX_MESSAGE_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
-#define CEX2A_MAX_RESPONSE_SIZE 0x110 /* max outputdatalength + type80_hdr */
-
-#define CEX3A_MAX_RESPONSE_SIZE 0x210 /* 512 bit modulus
- * (max outputdatalength) +
- * type80_hdr*/
-#define CEX3A_MAX_MESSAGE_SIZE sizeof(struct type50_crb3_msg)
-
-#define CEX2A_CLEANUP_TIME (15*HZ)
-#define CEX3A_CLEANUP_TIME CEX2A_CLEANUP_TIME
-
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX2A/CEX3A Cryptographic Coprocessor device driver, " \
- "Copyright IBM Corp. 2001, 2018");
-MODULE_LICENSE("GPL");
-
-static struct ap_device_id zcrypt_cex2a_card_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2A,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3A,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_card_ids);
-
-static struct ap_device_id zcrypt_cex2a_queue_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2A,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3A,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2a_queue_ids);
-
-/**
- * Probe function for CEX2A card devices. It always accepts the AP device
- * since the bus_match already checked the card type.
- * @ap_dev: pointer to the AP device.
- */
-static int zcrypt_cex2a_card_probe(struct ap_device *ap_dev)
-{
- /*
- * Normalized speed ratings per crypto adapter
- * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
- */
- static const int CEX2A_SPEED_IDX[] = {
- 800, 1000, 2000, 900, 1200, 2400, 0, 0};
- static const int CEX3A_SPEED_IDX[] = {
- 400, 500, 1000, 450, 550, 1200, 0, 0};
-
- struct ap_card *ac = to_ap_card(&ap_dev->device);
- struct zcrypt_card *zc;
- int rc = 0;
-
- zc = zcrypt_card_alloc();
- if (!zc)
- return -ENOMEM;
- zc->card = ac;
- ac->private = zc;
-
- if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A) {
- zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
- memcpy(zc->speed_rating, CEX2A_SPEED_IDX,
- sizeof(CEX2A_SPEED_IDX));
- zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
- zc->type_string = "CEX2A";
- zc->user_space_type = ZCRYPT_CEX2A;
- } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX3A) {
- zc->min_mod_size = CEX2A_MIN_MOD_SIZE;
- zc->max_mod_size = CEX2A_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX2A_MAX_MOD_SIZE;
- if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
- zc->max_mod_size = CEX3A_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX3A_MAX_MOD_SIZE;
- }
- memcpy(zc->speed_rating, CEX3A_SPEED_IDX,
- sizeof(CEX3A_SPEED_IDX));
- zc->type_string = "CEX3A";
- zc->user_space_type = ZCRYPT_CEX3A;
- } else {
- zcrypt_card_free(zc);
- return -ENODEV;
- }
- zc->online = 1;
-
- rc = zcrypt_card_register(zc);
- if (rc) {
- ac->private = NULL;
- zcrypt_card_free(zc);
- }
-
- return rc;
-}
-
-/**
- * This is called to remove the CEX2A card driver information
- * if an AP card device is removed.
- */
-static void zcrypt_cex2a_card_remove(struct ap_device *ap_dev)
-{
- struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
-
- if (zc)
- zcrypt_card_unregister(zc);
-}
-
-static struct ap_driver zcrypt_cex2a_card_driver = {
- .probe = zcrypt_cex2a_card_probe,
- .remove = zcrypt_cex2a_card_remove,
- .ids = zcrypt_cex2a_card_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-/**
- * Probe function for CEX2A queue devices. It always accepts the AP device
- * since the bus_match already checked the queue type.
- * @ap_dev: pointer to the AP device.
- */
-static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = NULL;
- int rc;
-
- switch (ap_dev->device_type) {
- case AP_DEVICE_TYPE_CEX2A:
- zq = zcrypt_queue_alloc(CEX2A_MAX_RESPONSE_SIZE);
- if (!zq)
- return -ENOMEM;
- break;
- case AP_DEVICE_TYPE_CEX3A:
- zq = zcrypt_queue_alloc(CEX3A_MAX_RESPONSE_SIZE);
- if (!zq)
- return -ENOMEM;
- break;
- }
- if (!zq)
- return -ENODEV;
- zq->ops = zcrypt_msgtype(MSGTYPE50_NAME, MSGTYPE50_VARIANT_DEFAULT);
- zq->queue = aq;
- zq->online = 1;
- atomic_set(&zq->load, 0);
- ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX2A_CLEANUP_TIME,
- aq->private = zq;
- rc = zcrypt_queue_register(zq);
- if (rc) {
- aq->private = NULL;
- zcrypt_queue_free(zq);
- }
-
- return rc;
-}
-
-/**
- * This is called to remove the CEX2A queue driver information
- * if an AP queue device is removed.
- */
-static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = aq->private;
-
- if (zq)
- zcrypt_queue_unregister(zq);
-}
-
-static struct ap_driver zcrypt_cex2a_queue_driver = {
- .probe = zcrypt_cex2a_queue_probe,
- .remove = zcrypt_cex2a_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
- .ids = zcrypt_cex2a_queue_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-int __init zcrypt_cex2a_init(void)
-{
- int rc;
-
- rc = ap_driver_register(&zcrypt_cex2a_card_driver,
- THIS_MODULE, "cex2acard");
- if (rc)
- return rc;
-
- rc = ap_driver_register(&zcrypt_cex2a_queue_driver,
- THIS_MODULE, "cex2aqueue");
- if (rc)
- ap_driver_unregister(&zcrypt_cex2a_card_driver);
-
- return rc;
-}
-
-void __exit zcrypt_cex2a_exit(void)
-{
- ap_driver_unregister(&zcrypt_cex2a_queue_driver);
- ap_driver_unregister(&zcrypt_cex2a_card_driver);
-}
-
-module_init(zcrypt_cex2a_init);
-module_exit(zcrypt_cex2a_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.h b/drivers/s390/crypto/zcrypt_cex2a.h
index 7842214d9d09..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.h
+++ b/drivers/s390/crypto/zcrypt_cex2a.h
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright IBM Corp. 2001, 2006
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- */
-
-#ifndef _ZCRYPT_CEX2A_H_
-#define _ZCRYPT_CEX2A_H_
-
-/**
- * The type 50 message family is associated with CEXxA cards.
- *
- * The four members of the family are described below.
- *
- * Note that all unsigned char arrays are right-justified and left-padded
- * with zeroes.
- *
- * Note that all reserved fields must be zeroes.
- */
-struct type50_hdr {
- unsigned char reserved1;
- unsigned char msg_type_code; /* 0x50 */
- unsigned short msg_len;
- unsigned char reserved2;
- unsigned char ignored;
- unsigned short reserved3;
-} __packed;
-
-#define TYPE50_TYPE_CODE 0x50
-
-#define TYPE50_MEB1_FMT 0x0001
-#define TYPE50_MEB2_FMT 0x0002
-#define TYPE50_MEB3_FMT 0x0003
-#define TYPE50_CRB1_FMT 0x0011
-#define TYPE50_CRB2_FMT 0x0012
-#define TYPE50_CRB3_FMT 0x0013
-
-/* Mod-Exp, with a small modulus */
-struct type50_meb1_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0001 */
- unsigned char reserved[6];
- unsigned char exponent[128];
- unsigned char modulus[128];
- unsigned char message[128];
-} __packed;
-
-/* Mod-Exp, with a large modulus */
-struct type50_meb2_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0002 */
- unsigned char reserved[6];
- unsigned char exponent[256];
- unsigned char modulus[256];
- unsigned char message[256];
-} __packed;
-
-/* Mod-Exp, with a larger modulus */
-struct type50_meb3_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0003 */
- unsigned char reserved[6];
- unsigned char exponent[512];
- unsigned char modulus[512];
- unsigned char message[512];
-} __packed;
-
-/* CRT, with a small modulus */
-struct type50_crb1_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0011 */
- unsigned char reserved[6];
- unsigned char p[64];
- unsigned char q[64];
- unsigned char dp[64];
- unsigned char dq[64];
- unsigned char u[64];
- unsigned char message[128];
-} __packed;
-
-/* CRT, with a large modulus */
-struct type50_crb2_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0012 */
- unsigned char reserved[6];
- unsigned char p[128];
- unsigned char q[128];
- unsigned char dp[128];
- unsigned char dq[128];
- unsigned char u[128];
- unsigned char message[256];
-} __packed;
-
-/* CRT, with a larger modulus */
-struct type50_crb3_msg {
- struct type50_hdr header;
- unsigned short keyblock_type; /* 0x0013 */
- unsigned char reserved[6];
- unsigned char p[256];
- unsigned char q[256];
- unsigned char dp[256];
- unsigned char dq[256];
- unsigned char u[256];
- unsigned char message[512];
-} __packed;
-
-/**
- * The type 80 response family is associated with a CEXxA cards.
- *
- * Note that all unsigned char arrays are right-justified and left-padded
- * with zeroes.
- *
- * Note that all reserved fields must be zeroes.
- */
-
-#define TYPE80_RSP_CODE 0x80
-
-struct type80_hdr {
- unsigned char reserved1;
- unsigned char type; /* 0x80 */
- unsigned short len;
- unsigned char code; /* 0x00 */
- unsigned char reserved2[3];
- unsigned char reserved3[8];
-} __packed;
-
-int zcrypt_cex2a_init(void);
-void zcrypt_cex2a_exit(void);
-
-#endif /* _ZCRYPT_CEX2A_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex2c.c b/drivers/s390/crypto/zcrypt_cex2c.c
index 35c7c6672713..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.c
+++ b/drivers/s390/crypto/zcrypt_cex2c.c
@@ -1,291 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright IBM Corp. 2001, 2018
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Ralph Wuerthner <rwuerthn@de.ibm.com>
- * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include <linux/uaccess.h>
-#include <linux/mod_devicetable.h>
-
-#include "ap_bus.h"
-#include "zcrypt_api.h"
-#include "zcrypt_error.h"
-#include "zcrypt_msgtype6.h"
-#include "zcrypt_cex2c.h"
-#include "zcrypt_cca_key.h"
-
-#define CEX2C_MIN_MOD_SIZE 16 /* 128 bits */
-#define CEX2C_MAX_MOD_SIZE 256 /* 2048 bits */
-#define CEX3C_MIN_MOD_SIZE 16 /* 128 bits */
-#define CEX3C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX2C_MAX_XCRB_MESSAGE_SIZE (12*1024)
-#define CEX2C_CLEANUP_TIME (15*HZ)
-
-MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX2C/CEX3C Cryptographic Coprocessor device driver, " \
- "Copyright IBM Corp. 2001, 2018");
-MODULE_LICENSE("GPL");
-
-static struct ap_device_id zcrypt_cex2c_card_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2C,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3C,
- .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_card_ids);
-
-static struct ap_device_id zcrypt_cex2c_queue_ids[] = {
- { .dev_type = AP_DEVICE_TYPE_CEX2C,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { .dev_type = AP_DEVICE_TYPE_CEX3C,
- .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
- { /* end of list */ },
-};
-
-MODULE_DEVICE_TABLE(ap, zcrypt_cex2c_queue_ids);
-
-/**
- * Large random number detection function. Its sends a message to a CEX2C/CEX3C
- * card to find out if large random numbers are supported.
- * @ap_dev: pointer to the AP device.
- *
- * Returns 1 if large random numbers are supported, 0 if not and < 0 on error.
- */
-static int zcrypt_cex2c_rng_supported(struct ap_queue *aq)
-{
- struct ap_message ap_msg;
- unsigned long long psmid;
- unsigned int domain;
- struct {
- struct type86_hdr hdr;
- struct type86_fmt2_ext fmt2;
- struct CPRBX cprbx;
- } __packed *reply;
- struct {
- struct type6_hdr hdr;
- struct CPRBX cprbx;
- char function_code[2];
- short int rule_length;
- char rule[8];
- short int verb_length;
- short int key_length;
- } __packed *msg;
- int rc, i;
-
- ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
-
- rng_type6CPRB_msgX(&ap_msg, 4, &domain);
-
- msg = ap_msg.message;
- msg->cprbx.domain = AP_QID_QUEUE(aq->qid);
-
- rc = ap_send(aq->qid, 0x0102030405060708ULL, ap_msg.message,
- ap_msg.length);
- if (rc)
- goto out_free;
-
- /* Wait for the test message to complete. */
- for (i = 0; i < 2 * HZ; i++) {
- msleep(1000 / HZ);
- rc = ap_recv(aq->qid, &psmid, ap_msg.message, 4096);
- if (rc == 0 && psmid == 0x0102030405060708ULL)
- break;
- }
-
- if (i >= 2 * HZ) {
- /* Got no answer. */
- rc = -ENODEV;
- goto out_free;
- }
-
- reply = ap_msg.message;
- if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0)
- rc = 1;
- else
- rc = 0;
-out_free:
- free_page((unsigned long) ap_msg.message);
- return rc;
-}
-
-/**
- * Probe function for CEX2C/CEX3C card devices. It always accepts the
- * AP device since the bus_match already checked the hardware type.
- * @ap_dev: pointer to the AP card device.
- */
-static int zcrypt_cex2c_card_probe(struct ap_device *ap_dev)
-{
- /*
- * Normalized speed ratings per crypto adapter
- * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
- */
- static const int CEX2C_SPEED_IDX[] = {
- 1000, 1400, 2400, 1100, 1500, 2600, 100, 12};
- static const int CEX3C_SPEED_IDX[] = {
- 500, 700, 1400, 550, 800, 1500, 80, 10};
-
- struct ap_card *ac = to_ap_card(&ap_dev->device);
- struct zcrypt_card *zc;
- int rc = 0;
-
- zc = zcrypt_card_alloc();
- if (!zc)
- return -ENOMEM;
- zc->card = ac;
- ac->private = zc;
- switch (ac->ap_dev.device_type) {
- case AP_DEVICE_TYPE_CEX2C:
- zc->user_space_type = ZCRYPT_CEX2C;
- zc->type_string = "CEX2C";
- memcpy(zc->speed_rating, CEX2C_SPEED_IDX,
- sizeof(CEX2C_SPEED_IDX));
- zc->min_mod_size = CEX2C_MIN_MOD_SIZE;
- zc->max_mod_size = CEX2C_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX2C_MAX_MOD_SIZE;
- break;
- case AP_DEVICE_TYPE_CEX3C:
- zc->user_space_type = ZCRYPT_CEX3C;
- zc->type_string = "CEX3C";
- memcpy(zc->speed_rating, CEX3C_SPEED_IDX,
- sizeof(CEX3C_SPEED_IDX));
- zc->min_mod_size = CEX3C_MIN_MOD_SIZE;
- zc->max_mod_size = CEX3C_MAX_MOD_SIZE;
- zc->max_exp_bit_length = CEX3C_MAX_MOD_SIZE;
- break;
- default:
- zcrypt_card_free(zc);
- return -ENODEV;
- }
- zc->online = 1;
-
- rc = zcrypt_card_register(zc);
- if (rc) {
- ac->private = NULL;
- zcrypt_card_free(zc);
- }
-
- return rc;
-}
-
-/**
- * This is called to remove the CEX2C/CEX3C card driver information
- * if an AP card device is removed.
- */
-static void zcrypt_cex2c_card_remove(struct ap_device *ap_dev)
-{
- struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
-
- if (zc)
- zcrypt_card_unregister(zc);
-}
-
-static struct ap_driver zcrypt_cex2c_card_driver = {
- .probe = zcrypt_cex2c_card_probe,
- .remove = zcrypt_cex2c_card_remove,
- .ids = zcrypt_cex2c_card_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-/**
- * Probe function for CEX2C/CEX3C queue devices. It always accepts the
- * AP device since the bus_match already checked the hardware type.
- * @ap_dev: pointer to the AP card device.
- */
-static int zcrypt_cex2c_queue_probe(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq;
- int rc;
-
- zq = zcrypt_queue_alloc(CEX2C_MAX_XCRB_MESSAGE_SIZE);
- if (!zq)
- return -ENOMEM;
- zq->queue = aq;
- zq->online = 1;
- atomic_set(&zq->load, 0);
- rc = zcrypt_cex2c_rng_supported(aq);
- if (rc < 0) {
- zcrypt_queue_free(zq);
- return rc;
- }
- if (rc)
- zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_DEFAULT);
- else
- zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
- MSGTYPE06_VARIANT_NORNG);
- ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX2C_CLEANUP_TIME;
- aq->private = zq;
- rc = zcrypt_queue_register(zq);
- if (rc) {
- aq->private = NULL;
- zcrypt_queue_free(zq);
- }
- return rc;
-}
-
-/**
- * This is called to remove the CEX2C/CEX3C queue driver information
- * if an AP queue device is removed.
- */
-static void zcrypt_cex2c_queue_remove(struct ap_device *ap_dev)
-{
- struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = aq->private;
-
- if (zq)
- zcrypt_queue_unregister(zq);
-}
-
-static struct ap_driver zcrypt_cex2c_queue_driver = {
- .probe = zcrypt_cex2c_queue_probe,
- .remove = zcrypt_cex2c_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
- .ids = zcrypt_cex2c_queue_ids,
- .flags = AP_DRIVER_FLAG_DEFAULT,
-};
-
-int __init zcrypt_cex2c_init(void)
-{
- int rc;
-
- rc = ap_driver_register(&zcrypt_cex2c_card_driver,
- THIS_MODULE, "cex2card");
- if (rc)
- return rc;
-
- rc = ap_driver_register(&zcrypt_cex2c_queue_driver,
- THIS_MODULE, "cex2cqueue");
- if (rc)
- ap_driver_unregister(&zcrypt_cex2c_card_driver);
-
- return rc;
-}
-
-void zcrypt_cex2c_exit(void)
-{
- ap_driver_unregister(&zcrypt_cex2c_queue_driver);
- ap_driver_unregister(&zcrypt_cex2c_card_driver);
-}
-
-module_init(zcrypt_cex2c_init);
-module_exit(zcrypt_cex2c_exit);
diff --git a/drivers/s390/crypto/zcrypt_cex2c.h b/drivers/s390/crypto/zcrypt_cex2c.h
index 6ec405c2bec2..e69de29bb2d1 100644
--- a/drivers/s390/crypto/zcrypt_cex2c.h
+++ b/drivers/s390/crypto/zcrypt_cex2c.h
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright IBM Corp. 2001, 2018
- * Author(s): Robert Burroughs
- * Eric Rossman (edrossma@us.ibm.com)
- *
- * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- */
-
-#ifndef _ZCRYPT_CEX2C_H_
-#define _ZCRYPT_CEX2C_H_
-
-int zcrypt_cex2c_init(void);
-void zcrypt_cex2c_exit(void);
-
-#endif /* _ZCRYPT_CEX2C_H_ */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 582ffa7e0f18..6ba7fbddd3f7 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright IBM Corp. 2012
+ * Copyright IBM Corp. 2012, 2022
* Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
*/
@@ -18,6 +18,8 @@
#include "zcrypt_msgtype50.h"
#include "zcrypt_error.h"
#include "zcrypt_cex4.h"
+#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
@@ -26,19 +28,16 @@
#define CEX4C_MIN_MOD_SIZE 16 /* 256 bits */
#define CEX4C_MAX_MOD_SIZE 512 /* 4096 bits */
-#define CEX4A_MAX_MESSAGE_SIZE MSGTYPE50_CRB3_MAX_MSG_SIZE
-#define CEX4C_MAX_MESSAGE_SIZE MSGTYPE06_MAX_MSG_SIZE
-
/* Waiting time for requests to be processed.
* Currently there are some types of request which are not deterministic.
* But the maximum time limit managed by the stomper code is set to 60sec.
* Hence we have to wait at least that time period.
*/
-#define CEX4_CLEANUP_TIME (900*HZ)
+#define CEX4_CLEANUP_TIME (900 * HZ)
MODULE_AUTHOR("IBM Corporation");
-MODULE_DESCRIPTION("CEX4/CEX5/CEX6 Cryptographic Card device driver, " \
- "Copyright IBM Corp. 2018");
+MODULE_DESCRIPTION("CEX[45678] Cryptographic Card device driver, " \
+ "Copyright IBM Corp. 2022");
MODULE_LICENSE("GPL");
static struct ap_device_id zcrypt_cex4_card_ids[] = {
@@ -48,6 +47,10 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX8,
+ .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE },
{ /* end of list */ },
};
@@ -60,13 +63,363 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ .dev_type = AP_DEVICE_TYPE_CEX6,
.match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX7,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX8,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
-/**
- * Probe function for CEX4/CEX5/CEX6 card device. It always
+/*
+ * CCA card additional device attributes
+ */
+static ssize_t cca_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct cca_info ci;
+
+ memset(&ci, 0, sizeof(ci));
+
+ if (ap_domain_index >= 0)
+ cca_get_info(ac->id, ap_domain_index, &ci, 0);
+
+ return sysfs_emit(buf, "%s\n", ci.serial);
+}
+
+static struct device_attribute dev_attr_cca_serialnr =
+ __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
+
+static struct attribute *cca_card_attrs[] = {
+ &dev_attr_cca_serialnr.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_card_attr_grp = {
+ .attrs = cca_card_attrs,
+};
+
+ /*
+ * CCA queue additional device attributes
+ */
+static ssize_t cca_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ static const char * const new_state[] = { "empty", "partial", "full" };
+ static const char * const cao_state[] = { "invalid", "valid" };
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
+ struct cca_info ci;
+ int n = 0;
+
+ memset(&ci, 0, sizeof(ci));
+
+ cca_get_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &ci, 0);
+
+ if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3')
+ n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n",
+ new_state[ci.new_aes_mk_state - '1'],
+ ci.new_aes_mkvp);
+ else
+ n += sysfs_emit_at(buf, n, "AES NEW: - -\n");
+
+ if (ci.cur_aes_mk_state >= '1' && ci.cur_aes_mk_state <= '2')
+ n += sysfs_emit_at(buf, n, "AES CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_aes_mk_state - '1'],
+ ci.cur_aes_mkvp);
+ else
+ n += sysfs_emit_at(buf, n, "AES CUR: - -\n");
+
+ if (ci.old_aes_mk_state >= '1' && ci.old_aes_mk_state <= '2')
+ n += sysfs_emit_at(buf, n, "AES OLD: %s 0x%016llx\n",
+ cao_state[ci.old_aes_mk_state - '1'],
+ ci.old_aes_mkvp);
+ else
+ n += sysfs_emit_at(buf, n, "AES OLD: - -\n");
+
+ if (ci.new_apka_mk_state >= '1' && ci.new_apka_mk_state <= '3')
+ n += sysfs_emit_at(buf, n, "APKA NEW: %s 0x%016llx\n",
+ new_state[ci.new_apka_mk_state - '1'],
+ ci.new_apka_mkvp);
+ else
+ n += sysfs_emit_at(buf, n, "APKA NEW: - -\n");
+
+ if (ci.cur_apka_mk_state >= '1' && ci.cur_apka_mk_state <= '2')
+ n += sysfs_emit_at(buf, n, "APKA CUR: %s 0x%016llx\n",
+ cao_state[ci.cur_apka_mk_state - '1'],
+ ci.cur_apka_mkvp);
+ else
+ n += sysfs_emit_at(buf, n, "APKA CUR: - -\n");
+
+ if (ci.old_apka_mk_state >= '1' && ci.old_apka_mk_state <= '2')
+ n += sysfs_emit_at(buf, n, "APKA OLD: %s 0x%016llx\n",
+ cao_state[ci.old_apka_mk_state - '1'],
+ ci.old_apka_mkvp);
+ else
+ n += sysfs_emit_at(buf, n, "APKA OLD: - -\n");
+
+ if (ci.new_asym_mk_state >= '1' && ci.new_asym_mk_state <= '3')
+ n += sysfs_emit_at(buf, n, "ASYM NEW: %s 0x%016llx%016llx\n",
+ new_state[ci.new_asym_mk_state - '1'],
+ *((u64 *)(ci.new_asym_mkvp)),
+ *((u64 *)(ci.new_asym_mkvp + sizeof(u64))));
+ else
+ n += sysfs_emit_at(buf, n, "ASYM NEW: - -\n");
+
+ if (ci.cur_asym_mk_state >= '1' && ci.cur_asym_mk_state <= '2')
+ n += sysfs_emit_at(buf, n, "ASYM CUR: %s 0x%016llx%016llx\n",
+ cao_state[ci.cur_asym_mk_state - '1'],
+ *((u64 *)(ci.cur_asym_mkvp)),
+ *((u64 *)(ci.cur_asym_mkvp + sizeof(u64))));
+ else
+ n += sysfs_emit_at(buf, n, "ASYM CUR: - -\n");
+
+ if (ci.old_asym_mk_state >= '1' && ci.old_asym_mk_state <= '2')
+ n += sysfs_emit_at(buf, n, "ASYM OLD: %s 0x%016llx%016llx\n",
+ cao_state[ci.old_asym_mk_state - '1'],
+ *((u64 *)(ci.old_asym_mkvp)),
+ *((u64 *)(ci.old_asym_mkvp + sizeof(u64))));
+ else
+ n += sysfs_emit_at(buf, n, "ASYM OLD: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_cca_mkvps =
+ __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
+
+static struct attribute *cca_queue_attrs[] = {
+ &dev_attr_cca_mkvps.attr,
+ NULL,
+};
+
+static const struct attribute_group cca_queue_attr_grp = {
+ .attrs = cca_queue_attrs,
+};
+
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, 0);
+
+ if (ci.API_ord_nr > 0)
+ return sysfs_emit(buf, "%u\n", ci.API_ord_nr);
+ else
+ return sysfs_emit(buf, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+ __ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, 0);
+
+ if (ci.FW_version > 0)
+ return sysfs_emit(buf, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
+ else
+ return sysfs_emit(buf, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+ __ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, 0);
+
+ if (ci.serial[0])
+ return sysfs_emit(buf, "%16.16s\n", ci.serial);
+ else
+ return sysfs_emit(buf, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+ __ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+ int mode_bit;
+ const char *mode_txt;
+} ep11_op_modes[] = {
+ { 0, "FIPS2009" },
+ { 1, "BSI2009" },
+ { 2, "FIPS2011" },
+ { 3, "BSI2011" },
+ { 4, "SIGG-IMPORT" },
+ { 5, "SIGG" },
+ { 6, "BSICC2017" },
+ { 7, "FIPS2021" },
+ { 8, "FIPS2024" },
+ { 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ap_card *ac = to_ap_card(dev);
+ struct ep11_card_info ci;
+ int i, n = 0;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, 0);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += sysfs_emit_at(buf, n, "%s",
+ ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += sysfs_emit_at(buf, n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+ __ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+ &dev_attr_ep11_api_ordinalnr.attr,
+ &dev_attr_ep11_fw_version.attr,
+ &dev_attr_ep11_serialnr.attr,
+ &dev_attr_ep11_card_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+ .attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
+ int n = 0;
+ struct ep11_domain_info di;
+ static const char * const cwk_state[] = { "invalid", "valid" };
+ static const char * const nwk_state[] = { "empty", "uncommitted",
+ "committed" };
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di, 0);
+
+ if (di.cur_wk_state == '0') {
+ n = sysfs_emit(buf, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
+ } else if (di.cur_wk_state == '1') {
+ n = sysfs_emit(buf, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
+ bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+ n += 2 * sizeof(di.cur_wkvp);
+ n += sysfs_emit_at(buf, n, "\n");
+ } else {
+ n = sysfs_emit(buf, "WK CUR: - -\n");
+ }
+
+ if (di.new_wk_state == '0') {
+ n += sysfs_emit_at(buf, n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
+ } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+ n += sysfs_emit_at(buf, n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
+ bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+ n += 2 * sizeof(di.new_wkvp);
+ n += sysfs_emit_at(buf, n, "\n");
+ } else {
+ n += sysfs_emit_at(buf, n, "WK NEW: - -\n");
+ }
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+ __ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
+ int i, n = 0;
+ struct ep11_domain_info di;
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di, 0);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += sysfs_emit_at(buf, n, "%s",
+ ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += sysfs_emit_at(buf, n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+ __ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+ &dev_attr_ep11_mkvps.attr,
+ &dev_attr_ep11_queue_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+ .attrs = ep11_queue_attrs,
+};
+
+/*
+ * Probe function for CEX[45678] card device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
@@ -77,26 +430,38 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
* Normalized speed ratings per crypto adapter
* MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY
*/
- static const int CEX4A_SPEED_IDX[] = {
- 14, 19, 249, 42, 228, 1458, 0, 0};
- static const int CEX5A_SPEED_IDX[] = {
- 8, 9, 20, 18, 66, 458, 0, 0};
- static const int CEX6A_SPEED_IDX[] = {
- 6, 9, 20, 17, 65, 438, 0, 0};
-
- static const int CEX4C_SPEED_IDX[] = {
+ static const int CEX4A_SPEED_IDX[NUM_OPS] = {
+ 14, 19, 249, 42, 228, 1458, 0, 0};
+ static const int CEX5A_SPEED_IDX[NUM_OPS] = {
+ 8, 9, 20, 18, 66, 458, 0, 0};
+ static const int CEX6A_SPEED_IDX[NUM_OPS] = {
+ 6, 9, 20, 17, 65, 438, 0, 0};
+ static const int CEX7A_SPEED_IDX[NUM_OPS] = {
+ 6, 8, 17, 15, 54, 362, 0, 0};
+ static const int CEX8A_SPEED_IDX[NUM_OPS] = {
+ 6, 8, 17, 15, 54, 362, 0, 0};
+
+ static const int CEX4C_SPEED_IDX[NUM_OPS] = {
59, 69, 308, 83, 278, 2204, 209, 40};
static const int CEX5C_SPEED_IDX[] = {
- 24, 31, 50, 37, 90, 479, 27, 10};
- static const int CEX6C_SPEED_IDX[] = {
- 16, 20, 32, 27, 77, 455, 23, 9};
-
- static const int CEX4P_SPEED_IDX[] = {
- 224, 313, 3560, 359, 605, 2827, 0, 50};
- static const int CEX5P_SPEED_IDX[] = {
- 63, 84, 156, 83, 142, 533, 0, 10};
- static const int CEX6P_SPEED_IDX[] = {
- 55, 70, 121, 73, 129, 522, 0, 9};
+ 24, 31, 50, 37, 90, 479, 27, 10};
+ static const int CEX6C_SPEED_IDX[NUM_OPS] = {
+ 16, 20, 32, 27, 77, 455, 24, 9};
+ static const int CEX7C_SPEED_IDX[NUM_OPS] = {
+ 14, 16, 26, 23, 64, 376, 23, 8};
+ static const int CEX8C_SPEED_IDX[NUM_OPS] = {
+ 14, 16, 26, 23, 64, 376, 23, 8};
+
+ static const int CEX4P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 50};
+ static const int CEX5P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 10};
+ static const int CEX6P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 9};
+ static const int CEX7P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 8};
+ static const int CEX8P_SPEED_IDX[NUM_OPS] = {
+ 0, 0, 0, 0, 0, 0, 0, 8};
struct ap_card *ac = to_ap_card(&ap_dev->device);
struct zcrypt_card *zc;
@@ -106,27 +471,37 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (!zc)
return -ENOMEM;
zc->card = ac;
- ac->private = zc;
- if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL)) {
+ dev_set_drvdata(&ap_dev->device, zc);
+ if (ac->hwinfo.accel) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4A";
zc->user_space_type = ZCRYPT_CEX4;
- memcpy(zc->speed_rating, CEX4A_SPEED_IDX,
- sizeof(CEX4A_SPEED_IDX));
+ zc->speed_rating = CEX4A_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5A";
zc->user_space_type = ZCRYPT_CEX5;
- memcpy(zc->speed_rating, CEX5A_SPEED_IDX,
- sizeof(CEX5A_SPEED_IDX));
- } else {
+ zc->speed_rating = CEX5A_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6A";
zc->user_space_type = ZCRYPT_CEX6;
- memcpy(zc->speed_rating, CEX6A_SPEED_IDX,
- sizeof(CEX6A_SPEED_IDX));
+ zc->speed_rating = CEX6A_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) {
+ zc->type_string = "CEX7A";
+ zc->speed_rating = CEX7A_SPEED_IDX;
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ } else {
+ zc->type_string = "CEX8A";
+ zc->speed_rating = CEX8A_SPEED_IDX;
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
}
zc->min_mod_size = CEX4A_MIN_MOD_SIZE;
- if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) &&
- ap_test_bit(&ac->functions, AP_FUNC_CRT4K)) {
+ if (ac->hwinfo.mex4k && ac->hwinfo.crt4k) {
zc->max_mod_size = CEX4A_MAX_MOD_SIZE_4K;
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_4K;
@@ -135,51 +510,73 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
zc->max_exp_bit_length =
CEX4A_MAX_MOD_SIZE_2K;
}
- } else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
+ } else if (ac->hwinfo.cca) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4C";
- /* wrong user space type, must be CEX4
+ zc->speed_rating = CEX4C_SPEED_IDX;
+ /* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
- memcpy(zc->speed_rating, CEX4C_SPEED_IDX,
- sizeof(CEX4C_SPEED_IDX));
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5C";
- /* wrong user space type, must be CEX5
+ zc->speed_rating = CEX5C_SPEED_IDX;
+ /* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
- memcpy(zc->speed_rating, CEX5C_SPEED_IDX,
- sizeof(CEX5C_SPEED_IDX));
- } else {
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6C";
- /* wrong user space type, must be CEX6
+ zc->speed_rating = CEX6C_SPEED_IDX;
+ /* wrong user space type, must be CEX3C
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) {
+ zc->type_string = "CEX7C";
+ zc->speed_rating = CEX7C_SPEED_IDX;
+ /* wrong user space type, must be CEX3C
+ * just keep it for cca compatibility
+ */
+ zc->user_space_type = ZCRYPT_CEX3C;
+ } else {
+ zc->type_string = "CEX8C";
+ zc->speed_rating = CEX8C_SPEED_IDX;
+ /* wrong user space type, must be CEX3C
* just keep it for cca compatibility
*/
zc->user_space_type = ZCRYPT_CEX3C;
- memcpy(zc->speed_rating, CEX6C_SPEED_IDX,
- sizeof(CEX6C_SPEED_IDX));
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
zc->max_exp_bit_length = CEX4C_MAX_MOD_SIZE;
- } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ } else if (ac->hwinfo.ep11) {
if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX4) {
zc->type_string = "CEX4P";
zc->user_space_type = ZCRYPT_CEX4;
- memcpy(zc->speed_rating, CEX4P_SPEED_IDX,
- sizeof(CEX4P_SPEED_IDX));
+ zc->speed_rating = CEX4P_SPEED_IDX;
} else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) {
zc->type_string = "CEX5P";
zc->user_space_type = ZCRYPT_CEX5;
- memcpy(zc->speed_rating, CEX5P_SPEED_IDX,
- sizeof(CEX5P_SPEED_IDX));
- } else {
+ zc->speed_rating = CEX5P_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX6) {
zc->type_string = "CEX6P";
zc->user_space_type = ZCRYPT_CEX6;
- memcpy(zc->speed_rating, CEX6P_SPEED_IDX,
- sizeof(CEX6P_SPEED_IDX));
+ zc->speed_rating = CEX6P_SPEED_IDX;
+ } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX7) {
+ zc->type_string = "CEX7P";
+ zc->speed_rating = CEX7P_SPEED_IDX;
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
+ } else {
+ zc->type_string = "CEX8P";
+ zc->speed_rating = CEX8P_SPEED_IDX;
+ /* wrong user space type, just for compatibility
+ * with the ZCRYPT_STATUS_MASK ioctl.
+ */
+ zc->user_space_type = ZCRYPT_CEX6;
}
zc->min_mod_size = CEX4C_MIN_MOD_SIZE;
zc->max_mod_size = CEX4C_MAX_MOD_SIZE;
@@ -192,23 +589,44 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
rc = zcrypt_card_register(zc);
if (rc) {
- ac->private = NULL;
zcrypt_card_free(zc);
+ return rc;
+ }
+
+ if (ac->hwinfo.cca) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_card_attr_grp);
+ if (rc) {
+ zcrypt_card_unregister(zc);
+ zcrypt_card_free(zc);
+ }
+ } else if (ac->hwinfo.ep11) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_card_attr_grp);
+ if (rc) {
+ zcrypt_card_unregister(zc);
+ zcrypt_card_free(zc);
+ }
}
return rc;
}
-/**
- * This is called to remove the CEX4/CEX5/CEX6 card driver information
- * if an AP card device is removed.
+/*
+ * This is called to remove the CEX[45678] card driver
+ * information if an AP card device is removed.
*/
static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
{
- struct zcrypt_card *zc = to_ap_card(&ap_dev->device)->private;
+ struct zcrypt_card *zc = dev_get_drvdata(&ap_dev->device);
+ struct ap_card *ac = to_ap_card(&ap_dev->device);
+
+ if (ac->hwinfo.cca)
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+ else if (ac->hwinfo.ep11)
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
- if (zc)
- zcrypt_card_unregister(zc);
+ zcrypt_card_unregister(zc);
}
static struct ap_driver zcrypt_cex4_card_driver = {
@@ -218,8 +636,8 @@ static struct ap_driver zcrypt_cex4_card_driver = {
.flags = AP_DRIVER_FLAG_DEFAULT,
};
-/**
- * Probe function for CEX4/CEX5/CEX6 queue device. It always
+/*
+ * Probe function for CEX[45678] queue device. It always
* accepts the AP device since the bus_match already checked
* the hardware type.
* @ap_dev: pointer to the AP device.
@@ -230,20 +648,20 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
struct zcrypt_queue *zq;
int rc;
- if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL)) {
- zq = zcrypt_queue_alloc(CEX4A_MAX_MESSAGE_SIZE);
+ if (aq->card->hwinfo.accel) {
+ zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE50_NAME,
MSGTYPE50_VARIANT_DEFAULT);
- } else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
- zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ } else if (aq->card->hwinfo.cca) {
+ zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
MSGTYPE06_VARIANT_DEFAULT);
- } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
- zq = zcrypt_queue_alloc(CEX4C_MAX_MESSAGE_SIZE);
+ } else if (aq->card->hwinfo.ep11) {
+ zq = zcrypt_queue_alloc(aq->card->maxmsgsize);
if (!zq)
return -ENOMEM;
zq->ops = zcrypt_msgtype(MSGTYPE06_NAME,
@@ -251,39 +669,59 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
} else {
return -ENODEV;
}
+
zq->queue = aq;
zq->online = 1;
atomic_set(&zq->load, 0);
+ ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX4_CLEANUP_TIME,
- aq->private = zq;
+ aq->request_timeout = CEX4_CLEANUP_TIME;
+ dev_set_drvdata(&ap_dev->device, zq);
rc = zcrypt_queue_register(zq);
if (rc) {
- aq->private = NULL;
zcrypt_queue_free(zq);
+ return rc;
+ }
+
+ if (aq->card->hwinfo.cca) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &cca_queue_attr_grp);
+ if (rc) {
+ zcrypt_queue_unregister(zq);
+ zcrypt_queue_free(zq);
+ }
+ } else if (aq->card->hwinfo.ep11) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_queue_attr_grp);
+ if (rc) {
+ zcrypt_queue_unregister(zq);
+ zcrypt_queue_free(zq);
+ }
}
return rc;
}
-/**
- * This is called to remove the CEX4/CEX5/CEX6 queue driver
+/*
+ * This is called to remove the CEX[45678] queue driver
* information if an AP queue device is removed.
*/
static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
{
+ struct zcrypt_queue *zq = dev_get_drvdata(&ap_dev->device);
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
- struct zcrypt_queue *zq = aq->private;
- if (zq)
- zcrypt_queue_unregister(zq);
+ if (aq->card->hwinfo.cca)
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+ else if (aq->card->hwinfo.ep11)
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
+
+ zcrypt_queue_unregister(zq);
}
static struct ap_driver zcrypt_cex4_queue_driver = {
.probe = zcrypt_cex4_queue_probe,
.remove = zcrypt_cex4_queue_remove,
- .suspend = ap_queue_suspend,
- .resume = ap_queue_resume,
.ids = zcrypt_cex4_queue_ids,
.flags = AP_DRIVER_FLAG_DEFAULT,
};
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 241dbb5f75bf..9a208dc4c200 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -17,10 +17,16 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 5
+#define ZCRYPT_DBF_MAX_SPRINTF_ARGS 6
#define ZCRYPT_DBF(...) \
debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
+#define ZCRYPT_DBF_ERR(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_ERR, ##__VA_ARGS__)
+#define ZCRYPT_DBF_WARN(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
+#define ZCRYPT_DBF_INFO(...) \
+ debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
extern debug_info_t *zcrypt_dbf_info;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 000000000000..3dda9589f2b9
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1636 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define pr_fmt(fmt) "zcrypt: " fmt
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+#include <crypto/aes.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define EP11_PINBLOB_V1_BYTES 56
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/*
+ * Cprb memory pool held for urgent cases where no memory
+ * can be allocated via kmalloc. This pool is only used when
+ * alloc_cprbmem() is called with the xflag ZCRYPT_XFLAG_NOMEMALLOC.
+ */
+#define CPRB_MEMPOOL_ITEM_SIZE (8 * 1024)
+static mempool_t *cprb_mempool;
+
+/*
+ * This is a pre-allocated memory for the device status array
+ * used within the ep11_findcard2() function. It is currently
+ * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is
+ * controlled via dev_status_mem_mutex. Needs adaption if more
+ * than 128 cards or domains to be are supported.
+ */
+#define ZCRYPT_DEV_STATUS_CARD_MAX 128
+#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128
+#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \
+ ZCRYPT_DEV_STATUS_QUEUE_MAX)
+#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \
+ sizeof(struct zcrypt_device_status_ext))
+static void *dev_status_mem;
+static DEFINE_MUTEX(dev_status_mem_mutex);
+
+static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver,
+ struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
+ u8 **kbpl, size_t *kbplsize)
+{
+ struct ep11kblob_header *hdr = NULL;
+ size_t hdrsize, plsize = 0;
+ int rc = -EINVAL;
+ u8 *pl = NULL;
+
+ if (kblen < sizeof(struct ep11kblob_header))
+ goto out;
+ hdr = (struct ep11kblob_header *)kb;
+
+ switch (kbver) {
+ case TOKVER_EP11_AES:
+ /* header overlays the payload */
+ hdrsize = 0;
+ break;
+ case TOKVER_EP11_ECC_WITH_HEADER:
+ case TOKVER_EP11_AES_WITH_HEADER:
+ /* payload starts after the header */
+ hdrsize = sizeof(struct ep11kblob_header);
+ break;
+ default:
+ goto out;
+ }
+
+ plsize = kblen - hdrsize;
+ pl = (u8 *)kb + hdrsize;
+
+ if (kbhdr)
+ *kbhdr = hdr;
+ if (kbhdrsize)
+ *kbhdrsize = hdrsize;
+ if (kbpl)
+ *kbpl = pl;
+ if (kbplsize)
+ *kbplsize = plsize;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int ep11_kb_decode(const u8 *kb, size_t kblen,
+ struct ep11kblob_header **kbhdr, size_t *kbhdrsize,
+ struct ep11keyblob **kbpl, size_t *kbplsize)
+{
+ struct ep11kblob_header *tmph, *hdr = NULL;
+ size_t hdrsize = 0, plsize = 0;
+ struct ep11keyblob *pl = NULL;
+ int rc = -EINVAL;
+ u8 *tmpp;
+
+ if (kblen < sizeof(struct ep11kblob_header))
+ goto out;
+ tmph = (struct ep11kblob_header *)kb;
+
+ if (tmph->type != TOKTYPE_NON_CCA &&
+ tmph->len > kblen)
+ goto out;
+
+ if (ep11_kb_split(kb, kblen, tmph->version,
+ &hdr, &hdrsize, &tmpp, &plsize))
+ goto out;
+
+ if (plsize < sizeof(struct ep11keyblob))
+ goto out;
+
+ if (!is_ep11_keyblob(tmpp))
+ goto out;
+
+ pl = (struct ep11keyblob *)tmpp;
+ plsize = hdr->len - hdrsize;
+
+ if (kbhdr)
+ *kbhdr = hdr;
+ if (kbhdrsize)
+ *kbhdrsize = hdrsize;
+ if (kbpl)
+ *kbpl = pl;
+ if (kbplsize)
+ *kbplsize = plsize;
+
+ rc = 0;
+out:
+ return rc;
+}
+
+/*
+ * For valid ep11 keyblobs, returns a reference to the wrappingkey verification
+ * pattern. Otherwise NULL.
+ */
+const u8 *ep11_kb_wkvp(const u8 *keyblob, u32 keybloblen)
+{
+ struct ep11keyblob *kb;
+
+ if (ep11_kb_decode(keyblob, keybloblen, NULL, NULL, &kb, NULL))
+ return NULL;
+ return kb->wkvp;
+}
+EXPORT_SYMBOL(ep11_kb_wkvp);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, u32 keylen, int checkcpacfexp)
+{
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+ DBF("%s key check failed, keylen %u < %zu\n",
+ __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (hdr->type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int)hdr->type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (hdr->hver != 0x00) {
+ if (dbg)
+ DBF("%s key check failed, header version 0x%02x != 0x00\n",
+ __func__, (int)hdr->hver);
+ return -EINVAL;
+ }
+ if (hdr->version != TOKVER_EP11_AES_WITH_HEADER) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int)hdr->version, TOKVER_EP11_AES_WITH_HEADER);
+ return -EINVAL;
+ }
+ if (hdr->len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
+ __func__, (int)hdr->len, keylen);
+ return -EINVAL;
+ }
+ if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int)kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, u32 keylen, int checkcpacfexp)
+{
+ struct ep11kblob_header *hdr = (struct ep11kblob_header *)key;
+ struct ep11keyblob *kb = (struct ep11keyblob *)(key + sizeof(*hdr));
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (keylen < sizeof(*hdr) + sizeof(*kb)) {
+ DBF("%s key check failed, keylen %u < %zu\n",
+ __func__, keylen, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (hdr->type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int)hdr->type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (hdr->hver != 0x00) {
+ if (dbg)
+ DBF("%s key check failed, header version 0x%02x != 0x00\n",
+ __func__, (int)hdr->hver);
+ return -EINVAL;
+ }
+ if (hdr->version != TOKVER_EP11_ECC_WITH_HEADER) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int)hdr->version, TOKVER_EP11_ECC_WITH_HEADER);
+ return -EINVAL;
+ }
+ if (hdr->len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
+ __func__, (int)hdr->len, keylen);
+ return -EINVAL;
+ }
+ if (hdr->len < sizeof(*hdr) + sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int)hdr->len, sizeof(*hdr) + sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int)kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_ecc_key_with_hdr);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+ const u8 *key, u32 keylen, int checkcpacfexp)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (keylen < sizeof(*kb)) {
+ DBF("%s key check failed, keylen %u < %zu\n",
+ __func__, keylen, sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->head.type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int)kb->head.type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (kb->head.version != TOKVER_EP11_AES) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int)kb->head.version, TOKVER_EP11_AES);
+ return -EINVAL;
+ }
+ if (kb->head.len > keylen) {
+ if (dbg)
+ DBF("%s key check failed, header len %d keylen %u mismatch\n",
+ __func__, (int)kb->head.len, keylen);
+ return -EINVAL;
+ }
+ if (kb->head.len < sizeof(*kb)) {
+ if (dbg)
+ DBF("%s key check failed, header len %d < %zu\n",
+ __func__, (int)kb->head.len, sizeof(*kb));
+ return -EINVAL;
+ }
+
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, blob magic 0x%04x != 0x%04x\n",
+ __func__, (int)kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ if (checkcpacfexp && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is off\n",
+ __func__);
+ return -EINVAL;
+ }
+
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aes_key);
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static void *alloc_cprbmem(size_t payload_len, u32 xflags)
+{
+ size_t len = sizeof(struct ep11_cprb) + payload_len;
+ struct ep11_cprb *cprb = NULL;
+
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) {
+ if (len <= CPRB_MEMPOOL_ITEM_SIZE)
+ cprb = mempool_alloc_preallocated(cprb_mempool);
+ } else {
+ cprb = kmalloc(len, GFP_KERNEL);
+ }
+ if (!cprb)
+ return NULL;
+ memset(cprb, 0, len);
+
+ cprb->cprb_len = sizeof(struct ep11_cprb);
+ cprb->cprb_ver_id = 0x04;
+ memcpy(cprb->func_id, "T4", 2);
+ cprb->ret_code = 0xFFFFFFFF;
+ cprb->payload_len = payload_len;
+
+ return cprb;
+}
+
+/*
+ * Free ep11 cprb buffer space.
+ */
+static void free_cprbmem(void *mem, size_t payload_len, bool scrub, u32 xflags)
+{
+ if (mem && scrub)
+ memzero_explicit(mem, sizeof(struct ep11_cprb) + payload_len);
+
+ if (xflags & ZCRYPT_XFLAG_NOMEMALLOC)
+ mempool_free(mem, cprb_mempool);
+ else
+ kfree(mem);
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+ ptr[0] = tag;
+ if (valuelen > 255) {
+ ptr[1] = 0x82;
+ *((u16 *)(ptr + 2)) = valuelen;
+ memcpy(ptr + 4, pvalue, valuelen);
+ return 4 + valuelen;
+ }
+ if (valuelen > 127) {
+ ptr[1] = 0x81;
+ ptr[2] = (u8)valuelen;
+ memcpy(ptr + 3, pvalue, valuelen);
+ return 3 + valuelen;
+ }
+ ptr[1] = (u8)valuelen;
+ memcpy(ptr + 2, pvalue, valuelen);
+ return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+ u8 tag;
+ u8 lenfmt;
+ u16 len;
+ u8 func_tag;
+ u8 func_len;
+ u32 func;
+ u8 dom_tag;
+ u8 dom_len;
+ u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+ size_t pl_size, int api, int func)
+{
+ h->tag = 0x30;
+ h->lenfmt = 0x82;
+ h->len = pl_size - 4;
+ h->func_tag = 0x04;
+ h->func_len = sizeof(u32);
+ h->func = (api << 16) + func;
+ h->dom_tag = 0x04;
+ h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+ struct ep11_target_dev *t, int nt,
+ struct ep11_cprb *req, size_t req_len,
+ struct ep11_cprb *rep, size_t rep_len)
+{
+ memset(u, 0, sizeof(*u));
+ u->targets = (u8 __user *)t;
+ u->targets_num = nt;
+ u->req = (u8 __user *)req;
+ u->req_len = req_len;
+ u->resp = (u8 __user *)rep;
+ u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+ int len;
+ u32 ret;
+
+ /* start tag */
+ if (*pl++ != 0x30) {
+ ZCRYPT_DBF_ERR("%s reply start tag mismatch\n", func);
+ return -EIO;
+ }
+
+ /* payload length format */
+ if (*pl < 127) {
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x81) {
+ pl++;
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x82) {
+ pl++;
+ len = *((u16 *)pl);
+ pl += 2;
+ } else {
+ ZCRYPT_DBF_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
+ return -EIO;
+ }
+
+ /* len should cover at least 3 fields with 32 bit value each */
+ if (len < 3 * 6) {
+ ZCRYPT_DBF_ERR("%s reply length %d too small\n", func, len);
+ return -EIO;
+ }
+
+ /* function tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ ZCRYPT_DBF_ERR("%s function tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* dom tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ ZCRYPT_DBF_ERR("%s dom tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* return value tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ ZCRYPT_DBF_ERR("%s return value tag or length mismatch\n",
+ func);
+ return -EIO;
+ }
+ pl += 2;
+ ret = *((u32 *)pl);
+ if (ret != 0) {
+ ZCRYPT_DBF_ERR("%s return value 0x%08x != 0\n", func, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* Check ep11 reply cprb, return 0 or suggested errno value. */
+static int check_reply_cprb(const struct ep11_cprb *rep, const char *func)
+{
+ /* check ep11 reply return code field */
+ if (rep->ret_code) {
+ ZCRYPT_DBF_ERR("%s ep11 reply ret_code=0x%08x\n", __func__,
+ rep->ret_code);
+ if (rep->ret_code == 0x000c0003)
+ return -EBUSY;
+ else
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+ size_t buflen, u8 *buf, u32 xflags)
+{
+ struct ep11_info_req_pl {
+ struct pl_head head;
+ u8 query_type_tag;
+ u8 query_type_len;
+ u32 query_type;
+ u8 query_subtype_tag;
+ u8 query_subtype_len;
+ u32 query_subtype;
+ } __packed * req_pl;
+ struct ep11_info_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb urb;
+ int api = EP11_API_V1, rc = -ENOMEM;
+
+ /* request cprb and payload */
+ req = alloc_cprbmem(sizeof(struct ep11_info_req_pl), xflags);
+ if (!req)
+ goto out;
+ req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req));
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+ req_pl->query_type_tag = 0x04;
+ req_pl->query_type_len = sizeof(u32);
+ req_pl->query_type = query_type;
+ req_pl->query_subtype_tag = 0x04;
+ req_pl->query_subtype_len = sizeof(u32);
+
+ /* reply cprb and payload */
+ rep = alloc_cprbmem(sizeof(struct ep11_info_rep_pl) + buflen, xflags);
+ if (!rep)
+ goto out;
+ rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep));
+
+ /* urb and target */
+ target.ap_id = cardnr;
+ target.dom_id = domain;
+ prep_urb(&urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
+ goto out;
+ }
+
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > buflen) {
+ ZCRYPT_DBF_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+ free_cprbmem(req, 0, false, xflags);
+ free_cprbmem(rep, 0, false, xflags);
+ return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags)
+{
+ int rc;
+ struct ep11_module_query_info {
+ u32 API_ord_nr;
+ u32 firmware_id;
+ u8 FW_major_vers;
+ u8 FW_minor_vers;
+ u8 CSP_major_vers;
+ u8 CSP_minor_vers;
+ u8 fwid[32];
+ u8 xcp_config_hash[32];
+ u8 CSP_config_hash[32];
+ u8 serial[16];
+ u8 module_date_time[16];
+ u64 op_mode;
+ u32 PKCS11_flags;
+ u32 ext_flags;
+ u32 domains;
+ u32 sym_state_bytes;
+ u32 digest_state_bytes;
+ u32 pin_blob_bytes;
+ u32 SPKI_bytes;
+ u32 priv_key_blob_bytes;
+ u32 sym_blob_bytes;
+ u32 max_payload_bytes;
+ u32 CP_profile_bytes;
+ u32 max_CP_index;
+ } __packed * pmqi = NULL;
+
+ /* use the cprb mempool to satisfy this short term mem alloc */
+ pmqi = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!pmqi)
+ return -ENOMEM;
+ rc = ep11_query_info(card, AUTOSEL_DOM,
+ 0x01 /* module info query */,
+ sizeof(*pmqi), (u8 *)pmqi, xflags);
+ if (rc)
+ goto out;
+
+ memset(info, 0, sizeof(*info));
+ info->API_ord_nr = pmqi->API_ord_nr;
+ info->FW_version = (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+ memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+ info->op_mode = pmqi->op_mode;
+
+out:
+ mempool_free(pmqi, cprb_mempool);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain,
+ struct ep11_domain_info *info, u32 xflags)
+{
+ int rc;
+ struct ep11_domain_query_info {
+ u32 dom_index;
+ u8 cur_WK_VP[32];
+ u8 new_WK_VP[32];
+ u32 dom_flags;
+ u64 op_mode;
+ } __packed dom_query_info;
+
+ rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+ sizeof(dom_query_info), (u8 *)&dom_query_info,
+ xflags);
+ if (rc)
+ goto out;
+
+ memset(info, 0, sizeof(*info));
+ info->cur_wk_state = '0';
+ info->new_wk_state = '0';
+ if (dom_query_info.dom_flags & 0x10 /* left imprint mode */) {
+ if (dom_query_info.dom_flags & 0x02 /* cur wk valid */) {
+ info->cur_wk_state = '1';
+ memcpy(info->cur_wkvp, dom_query_info.cur_WK_VP, 32);
+ }
+ if (dom_query_info.dom_flags & 0x04 || /* new wk present */
+ dom_query_info.dom_flags & 0x08 /* new wk committed */) {
+ info->new_wk_state =
+ dom_query_info.dom_flags & 0x08 ? '2' : '1';
+ memcpy(info->new_wkvp, dom_query_info.new_WK_VP, 32);
+ }
+ }
+ info->op_mode = dom_query_info.op_mode;
+
+out:
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+static int _ep11_genaeskey(u16 card, u16 domain,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize, u32 xflags)
+{
+ struct keygen_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 keybytes_tag;
+ u8 keybytes_len;
+ u32 keybytes;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_val_len_type;
+ u32 attr_val_len_value;
+ /* followed by empty pin tag or empty pinblob tag */
+ } __packed * req_pl;
+ struct keygen_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ size_t req_pl_size, pinblob_size = 0;
+ struct ep11_target_dev target;
+ struct ep11_urb urb;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* request cprb and payload */
+ api = (!keygenflags || keygenflags & 0x00200000) ?
+ EP11_API_V4 : EP11_API_V1;
+ if (ap_is_se_guest()) {
+ /*
+ * genkey within SE environment requires API ordinal 6
+ * with empty pinblob
+ */
+ api = EP11_API_V6;
+ pinblob_size = EP11_PINBLOB_V1_BYTES;
+ }
+ req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size);
+ req = alloc_cprbmem(req_pl_size, xflags);
+ if (!req)
+ goto out;
+ req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req));
+ prep_head(&req_pl->head, req_pl_size, api, 21); /* GenerateKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ req_pl->keybytes_tag = 0x04;
+ req_pl->keybytes_len = sizeof(u32);
+ req_pl->keybytes = keybitsize / 8;
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32);
+ req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 5 * sizeof(u32);
+ req_pl->attr_header = 0x10010000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
+ /* pin tag */
+ *p++ = 0x04;
+ *p++ = pinblob_size;
+
+ /* reply cprb and payload */
+ rep = alloc_cprbmem(sizeof(struct keygen_rep_pl), xflags);
+ if (!rep)
+ goto out;
+ rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep));
+
+ /* urb and target */
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(&urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
+ goto out;
+ }
+
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+
+out:
+ free_cprbmem(req, 0, false, xflags);
+ free_cprbmem(rep, sizeof(struct keygen_rep_pl), true, xflags);
+ return rc;
+}
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags)
+{
+ struct ep11kblob_header *hdr;
+ size_t hdr_size, pl_size;
+ u8 *pl;
+ int rc;
+
+ switch (keybufver) {
+ case TOKVER_EP11_AES:
+ case TOKVER_EP11_AES_WITH_HEADER:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
+ &hdr, &hdr_size, &pl, &pl_size);
+ if (rc)
+ return rc;
+
+ rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags,
+ pl, &pl_size, xflags);
+ if (rc)
+ return rc;
+
+ *keybufsize = hdr_size + pl_size;
+
+ /* update header information */
+ hdr->type = TOKTYPE_NON_CCA;
+ hdr->len = *keybufsize;
+ hdr->version = keybufver;
+ hdr->bitlen = keybitsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+ u16 mode, u32 mech, const u8 *iv,
+ const u8 *key, size_t keysize,
+ const u8 *inbuf, size_t inbufsize,
+ u8 *outbuf, size_t *outbufsize,
+ u32 xflags)
+{
+ struct crypt_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by key tag + key blob
+ * followed by plaintext tag + plaintext
+ */
+ } __packed * req_pl;
+ struct crypt_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ /* data follows */
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb urb;
+ size_t req_pl_size, rep_pl_size = 0;
+ int n, api = EP11_API_V1, rc = -ENOMEM;
+ u8 *p;
+
+ /* the simple asn1 coding used has length limits */
+ if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+ return -EINVAL;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+ req = alloc_cprbmem(req_pl_size, xflags);
+ if (!req)
+ goto out;
+ req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req));
+ prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key and input data */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+ /* reply cprb and payload, assume out data size <= in data size + 32 */
+ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+ rep = alloc_cprbmem(rep_pl_size, xflags);
+ if (!rep)
+ goto out;
+ rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep));
+
+ /* urb and target */
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(&urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + rep_pl_size);
+
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
+ goto out;
+ }
+
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04) {
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ p = ((u8 *)rep_pl) + sizeof(*rep_pl);
+ if (rep_pl->data_lenfmt <= 127) {
+ n = rep_pl->data_lenfmt;
+ } else if (rep_pl->data_lenfmt == 0x81) {
+ n = *p++;
+ } else if (rep_pl->data_lenfmt == 0x82) {
+ n = *((u16 *)p);
+ p += 2;
+ } else {
+ ZCRYPT_DBF_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
+ rc = -EIO;
+ goto out;
+ }
+ if (n > *outbufsize) {
+ ZCRYPT_DBF_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(outbuf, p, n);
+ *outbufsize = n;
+
+out:
+ free_cprbmem(req, req_pl_size, true, xflags);
+ free_cprbmem(rep, rep_pl_size, true, xflags);
+ return rc;
+}
+
+static int _ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize, u32 xflags)
+{
+ struct uw_req_pl {
+ struct pl_head head;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_key_type;
+ u32 attr_key_type_value;
+ u32 attr_val_len;
+ u32 attr_val_len_value;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by kek tag + kek blob
+ * followed by empty mac tag
+ * followed by empty pin tag or empty pinblob tag
+ * followed by encryted key tag + bytes
+ */
+ } __packed * req_pl;
+ struct uw_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ size_t req_pl_size, pinblob_size = 0;
+ struct ep11_target_dev target;
+ struct ep11_urb urb;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ api = (!keygenflags || keygenflags & 0x00200000) ?
+ EP11_API_V4 : EP11_API_V1;
+ if (ap_is_se_guest()) {
+ /*
+ * unwrap within SE environment requires API ordinal 6
+ * with empty pinblob
+ */
+ api = EP11_API_V6;
+ pinblob_size = EP11_PINBLOB_V1_BYTES;
+ }
+ req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keksize) + ASN1TAGLEN(0)
+ + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize);
+ req = alloc_cprbmem(req_pl_size, xflags);
+ if (!req)
+ goto out;
+ req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req));
+ prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 7 * sizeof(u32);
+ req_pl->attr_header = 0x10020000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+ req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+ req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* kek */
+ p += asn1tag_write(p, 0x04, kek, keksize);
+ /* empty mac key tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* pin tag */
+ *p++ = 0x04;
+ *p++ = pinblob_size;
+ p += pinblob_size;
+ /* encrypted key value tag and bytes */
+ p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+ /* reply cprb and payload */
+ rep = alloc_cprbmem(sizeof(struct uw_rep_pl), xflags);
+ if (!rep)
+ goto out;
+ rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep));
+
+ /* urb and target */
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(&urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
+ goto out;
+ }
+
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+
+out:
+ free_cprbmem(req, req_pl_size, true, xflags);
+ free_cprbmem(rep, sizeof(struct uw_rep_pl), true, xflags);
+ return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, u32 *keybufsize,
+ u8 keybufver, u32 xflags)
+{
+ struct ep11kblob_header *hdr;
+ size_t hdr_size, pl_size;
+ u8 *pl;
+ int rc;
+
+ rc = ep11_kb_split(keybuf, *keybufsize, keybufver,
+ &hdr, &hdr_size, &pl, &pl_size);
+ if (rc)
+ return rc;
+
+ rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize,
+ mech, iv, keybitsize, keygenflags,
+ pl, &pl_size, xflags);
+ if (rc)
+ return rc;
+
+ *keybufsize = hdr_size + pl_size;
+
+ /* update header information */
+ hdr = (struct ep11kblob_header *)keybuf;
+ hdr->type = TOKTYPE_NON_CCA;
+ hdr->len = *keybufsize;
+ hdr->version = keybufver;
+ hdr->bitlen = keybitsize;
+
+ return 0;
+}
+
+static int _ep11_wrapkey(u16 card, u16 domain,
+ const u8 *key, size_t keysize,
+ u32 mech, const u8 *iv,
+ u8 *databuf, size_t *datasize, u32 xflags)
+{
+ struct wk_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * followed by iv data
+ * followed by key tag + key blob
+ * followed by dummy kek param
+ * followed by dummy mac param
+ */
+ } __packed * req_pl;
+ struct wk_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[1024];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb urb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + 4;
+ req = alloc_cprbmem(req_pl_size, xflags);
+ if (!req)
+ goto out;
+ if (!mech || mech == 0x80060001)
+ req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+ req_pl = (struct wk_req_pl *)(((u8 *)req) + sizeof(*req));
+ api = (!mech || mech == 0x80060001) ? /* CKM_IBM_CPACF_WRAP */
+ EP11_API_V4 : EP11_API_V1;
+ prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+ p = ((u8 *)req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key blob */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ /* empty kek tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty mac tag */
+ *p++ = 0x04;
+ *p++ = 0;
+
+ /* reply cprb and payload */
+ rep = alloc_cprbmem(sizeof(struct wk_rep_pl), xflags);
+ if (!rep)
+ goto out;
+ rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep));
+
+ /* urb and target */
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(&urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = zcrypt_send_ep11_cprb(&urb, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
+ goto out;
+ }
+
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *datasize) {
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy the data from the cprb to the data buffer */
+ memcpy(databuf, rep_pl->data, rep_pl->data_len);
+ *datasize = rep_pl->data_len;
+
+out:
+ free_cprbmem(req, req_pl_size, true, xflags);
+ free_cprbmem(rep, sizeof(struct wk_rep_pl), true, xflags);
+ return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
+ u32 keytype, u32 xflags)
+{
+ int rc;
+ void *mem;
+ u8 encbuf[64], *kek;
+ size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+ if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) {
+ clrkeylen = keybitsize / 8;
+ } else {
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate space for the temp kek.
+ * Also we only need up to MAXEP11AESKEYBLOBSIZE bytes for this
+ * we use the already existing cprb mempool to solve this
+ * short term memory requirement.
+ */
+ mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+ kek = (u8 *)mem;
+ keklen = MAXEP11AESKEYBLOBSIZE;
+
+ /* Step 1: generate AES 256 bit random kek key */
+ rc = _ep11_genaeskey(card, domain, 256,
+ 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
+ kek, &keklen, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Step 2: encrypt clear key value with the kek key */
+ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+ clrkey, clrkeylen, encbuf, &encbuflen, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Step 3: import the encrypted key value as a new key */
+ rc = ep11_unwrapkey(card, domain, kek, keklen,
+ encbuf, encbuflen, 0, def_iv,
+ keybitsize, keygenflags,
+ keybuf, keybufsize,
+ keytype, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+out:
+ mempool_free(mem, cprb_mempool);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_kblob2protkey(u16 card, u16 dom,
+ const u8 *keyblob, u32 keybloblen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags)
+{
+ struct ep11kblob_header *hdr;
+ struct ep11keyblob *key;
+ size_t wkbuflen, keylen;
+ struct wk_info {
+ u16 version;
+ u8 res1[16];
+ u32 pkeytype;
+ u32 pkeybitsize;
+ u64 pkeysize;
+ u8 res2[8];
+ u8 pkey[];
+ } __packed * wki;
+ u8 *wkbuf = NULL;
+ int rc = -EIO;
+
+ if (ep11_kb_decode((u8 *)keyblob, keybloblen, &hdr, NULL, &key, &keylen))
+ return -EINVAL;
+
+ if (hdr->version == TOKVER_EP11_AES) {
+ /* wipe overlayed header */
+ memset(hdr, 0, sizeof(*hdr));
+ }
+ /* !!! hdr is no longer a valid header !!! */
+
+ /* need a temp working buffer */
+ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1));
+ if (wkbuflen > CPRB_MEMPOOL_ITEM_SIZE) {
+ /* this should never happen */
+ rc = -ENOMEM;
+ ZCRYPT_DBF_WARN("%s wkbuflen %d > cprb mempool item size %d, rc=%d\n",
+ __func__, (int)wkbuflen, CPRB_MEMPOOL_ITEM_SIZE, rc);
+ return rc;
+ }
+ /* use the cprb mempool to satisfy this short term mem allocation */
+ wkbuf = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ?
+ mempool_alloc_preallocated(cprb_mempool) :
+ mempool_alloc(cprb_mempool, GFP_ATOMIC);
+ if (!wkbuf) {
+ rc = -ENOMEM;
+ ZCRYPT_DBF_WARN("%s allocating tmp buffer via cprb mempool failed, rc=%d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ /* ep11 secure key -> protected key + info */
+ rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
+ 0, def_iv, wkbuf, &wkbuflen, xflags);
+ if (rc) {
+ ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ wki = (struct wk_info *)wkbuf;
+
+ /* check struct version and pkey type */
+ if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
+ ZCRYPT_DBF_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int)wki->version, (int)wki->pkeytype);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* check protected key type field */
+ switch (wki->pkeytype) {
+ case 1: /* AES */
+ switch (wki->pkeysize) {
+ case 16 + 32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24 + 32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32 + 32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported AES pkeysize %d\n",
+ __func__, (int)wki->pkeysize);
+ rc = -EIO;
+ goto out;
+ }
+ break;
+ case 3: /* EC-P */
+ case 4: /* EC-ED */
+ case 5: /* EC-BP */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_ECC;
+ break;
+ case 2: /* TDES */
+ default:
+ ZCRYPT_DBF_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int)wki->pkeytype);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the translated protected key */
+ if (wki->pkeysize > *protkeylen) {
+ ZCRYPT_DBF_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+ __func__, wki->pkeysize, *protkeylen);
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(protkey, wki->pkey, wki->pkeysize);
+ *protkeylen = wki->pkeysize;
+
+out:
+ mempool_free(wkbuf, cprb_mempool);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_kblob2protkey);
+
+int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp, u32 xflags)
+{
+ struct zcrypt_device_status_ext *device_status;
+ struct ep11_domain_info edi;
+ struct ep11_card_info eci;
+ u32 _nr_apqns = 0;
+ int i, card, dom;
+
+ /* occupy the device status memory */
+ mutex_lock(&dev_status_mem_mutex);
+ memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE);
+ device_status = (struct zcrypt_device_status_ext *)dev_status_mem;
+
+ /* fetch crypto device status into this struct */
+ zcrypt_device_status_mask_ext(device_status,
+ ZCRYPT_DEV_STATUS_CARD_MAX,
+ ZCRYPT_DEV_STATUS_QUEUE_MAX);
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for ep11 functions */
+ if (!(device_status[i].functions & 0x01))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* check min hardware type */
+ if (minhwtype && device_status[i].hwtype < minhwtype)
+ continue;
+ /* check min api version if given */
+ if (minapi > 0) {
+ if (ep11_get_card_info(card, &eci, xflags))
+ continue;
+ if (minapi > eci.API_ord_nr)
+ continue;
+ }
+ /* check wkvp if given */
+ if (wkvp) {
+ if (ep11_get_domain_info(card, dom, &edi, xflags))
+ continue;
+ if (edi.cur_wk_state != '1')
+ continue;
+ if (memcmp(wkvp, edi.cur_wkvp, 16))
+ continue;
+ }
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < *nr_apqns)
+ apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom);
+ }
+
+ *nr_apqns = _nr_apqns;
+
+ mutex_unlock(&dev_status_mem_mutex);
+
+ return _nr_apqns ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+int __init zcrypt_ep11misc_init(void)
+{
+ /* Pre-allocate a small memory pool for ep11 cprbs. */
+ cprb_mempool = mempool_create_kmalloc_pool(2 * zcrypt_mempool_threshold,
+ CPRB_MEMPOOL_ITEM_SIZE);
+ if (!cprb_mempool)
+ return -ENOMEM;
+
+ /* Pre-allocate one crypto status card struct used in ep11_findcard2() */
+ dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL);
+ if (!dev_status_mem) {
+ mempool_destroy(cprb_mempool);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void zcrypt_ep11misc_exit(void)
+{
+ mutex_lock(&dev_status_mem_mutex);
+ kvfree(dev_status_mem);
+ mutex_unlock(&dev_status_mem_mutex);
+ mempool_destroy(cprb_mempool);
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 000000000000..b5e6fd861815
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,158 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define EP11_API_V1 1 /* min EP11 API, default if no higher api required */
+#define EP11_API_V4 4 /* supported EP11 API for the ep11misc cprbs */
+#define EP11_API_V6 6 /* min EP11 API for some cprbs in SE environment */
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x00200000
+
+/*
+ * Internal used values for the version field of the key header.
+ * Should match to the enum pkey_key_type in pkey.h.
+ */
+#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob (old style) */
+#define TOKVER_EP11_AES_WITH_HEADER 0x06 /* EP11 AES key blob with header */
+#define TOKVER_EP11_ECC_WITH_HEADER 0x07 /* EP11 ECC key blob with header */
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+ union {
+ u8 session[32];
+ /* only used for PKEY_TYPE_EP11: */
+ struct ep11kblob_header head;
+ };
+ u8 wkvp[16]; /* wrapping key verification pattern */
+ u64 attr; /* boolean key attributes */
+ u64 mode; /* mode bits */
+ u16 version; /* 0x1234, EP11_STRUCT_MAGIC */
+ u8 iv[14];
+ u8 encrypted_key_data[144];
+ u8 mac[32];
+} __packed;
+
+/* check ep11 key magic to find out if this is an ep11 key blob */
+static inline bool is_ep11_keyblob(const u8 *key)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+ return (kb->version == EP11_STRUCT_MAGIC);
+}
+
+/*
+ * For valid ep11 keyblobs, returns a reference to the wrappingkey verification
+ * pattern. Otherwise NULL.
+ */
+const u8 *ep11_kb_wkvp(const u8 *kblob, u32 kbloblen);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, u32 keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 ECC key blob with header.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_ecc_key_with_hdr(debug_info_t *dbg, int dbflvl,
+ const u8 *key, u32 keylen, int checkcpacfexp);
+
+/*
+ * Simple check if the key blob is a valid EP11 AES key blob with
+ * the header in the session field (old style EP11 AES key).
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aes_key(debug_info_t *dbg, int dbflvl,
+ const u8 *key, u32 keylen, int checkcpacfexp);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+ u32 API_ord_nr; /* API ordinal number */
+ u16 FW_version; /* Firmware major and minor version */
+ char serial[16]; /* serial number string (16 ascii, no 0x00 !) */
+ u64 op_mode; /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+ char cur_wk_state; /* '0' invalid, '1' valid */
+ char new_wk_state; /* '0' empty, '1' uncommitted, '2' committed */
+ u8 cur_wkvp[32]; /* current wrapping key verification pattern */
+ u8 new_wkvp[32]; /* new wrapping key verification pattern */
+ u64 op_mode; /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain,
+ struct ep11_domain_info *info, u32 xflags);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, u32 *keybufsize,
+ u32 keytype, u32 xflags);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ * to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ * key for this domain. When a wkvp is given there will always be a re-fetch
+ * of the domain info for the potential apqn - so this triggers an request
+ * reply to each apqn eligible.
+ * The caller should set *nr_apqns to the nr of elements available in *apqns.
+ * On return *nr_apqns is then updated with the nr of apqns filled into *apqns.
+ * The return value is either 0 for success or a negative errno value.
+ * If no apqn meeting the criteria is found, -ENODEV is returned.
+ */
+int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp, u32 xflags);
+
+/*
+ * Derive proteced key from EP11 key blob (AES and ECC keys).
+ */
+int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype,
+ u32 xflags);
+
+int zcrypt_ep11misc_init(void);
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index f34ee41cbed8..46e27b43a8af 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -52,7 +52,6 @@ struct error_hdr {
#define REP82_ERROR_INVALID_COMMAND 0x30
#define REP82_ERROR_MALFORMED_MSG 0x40
#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
-#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
#define REP82_ERROR_WORD_ALIGNMENT 0x60
#define REP82_ERROR_MESSAGE_LENGTH 0x80
@@ -61,12 +60,12 @@ struct error_hdr {
#define REP82_ERROR_EVEN_MOD_IN_OPND 0x85
#define REP82_ERROR_RESERVED_FIELD 0x88
#define REP82_ERROR_INVALID_DOMAIN_PENDING 0x8A
+#define REP82_ERROR_FILTERED_BY_HYPERVISOR 0x8B
#define REP82_ERROR_TRANSPORT_FAIL 0x90
#define REP82_ERROR_PACKET_TRUNCATED 0xA0
#define REP82_ERROR_ZERO_BUFFER_LEN 0xB0
#define REP88_ERROR_MODULE_FAILURE 0x10
-
#define REP88_ERROR_MESSAGE_TYPE 0x20
#define REP88_ERROR_MESSAGE_MALFORMD 0x22
#define REP88_ERROR_MESSAGE_LENGTH 0x23
@@ -79,82 +78,73 @@ struct error_hdr {
static inline int convert_error(struct zcrypt_queue *zq,
struct ap_message *reply)
{
- struct error_hdr *ehdr = reply->message;
+ struct error_hdr *ehdr = reply->msg;
int card = AP_QID_CARD(zq->queue->qid);
int queue = AP_QID_QUEUE(zq->queue->qid);
switch (ehdr->reply_code) {
- case REP82_ERROR_OPERAND_INVALID:
- case REP82_ERROR_OPERAND_SIZE:
- case REP82_ERROR_EVEN_MOD_IN_OPND:
- case REP88_ERROR_MESSAGE_MALFORMD:
- case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
- case REP82_ERROR_INVALID_DOMAIN_PENDING:
- case REP82_ERROR_INVALID_SPECIAL_CMD:
- // REP88_ERROR_INVALID_KEY // '82' CEX2A
- // REP88_ERROR_OPERAND // '84' CEX2A
- // REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
- /* Invalid input data. */
- ZCRYPT_DBF(DBF_WARN,
- "device=%02x.%04x reply=0x%02x => rc=EINVAL\n",
- card, queue, ehdr->reply_code);
+ case REP82_ERROR_INVALID_MSG_LEN: /* 0x23 */
+ case REP82_ERROR_RESERVD_FIELD: /* 0x24 */
+ case REP82_ERROR_FORMAT_FIELD: /* 0x29 */
+ case REP82_ERROR_MALFORMED_MSG: /* 0x40 */
+ case REP82_ERROR_INVALID_SPECIAL_CMD: /* 0x41 */
+ case REP82_ERROR_MESSAGE_LENGTH: /* 0x80 */
+ case REP82_ERROR_OPERAND_INVALID: /* 0x82 */
+ case REP82_ERROR_OPERAND_SIZE: /* 0x84 */
+ case REP82_ERROR_EVEN_MOD_IN_OPND: /* 0x85 */
+ case REP82_ERROR_INVALID_DOMAIN_PENDING: /* 0x8A */
+ case REP82_ERROR_FILTERED_BY_HYPERVISOR: /* 0x8B */
+ case REP82_ERROR_PACKET_TRUNCATED: /* 0xA0 */
+ case REP88_ERROR_MESSAGE_MALFORMD: /* 0x22 */
+ case REP88_ERROR_KEY_TYPE: /* 0x34 */
+ /* RY indicates malformed request */
+ if (ehdr->reply_code == REP82_ERROR_FILTERED_BY_HYPERVISOR &&
+ ehdr->type == TYPE86_RSP_CODE) {
+ struct {
+ struct type86_hdr hdr;
+ struct type86_fmt2_ext fmt2;
+ } __packed * head = reply->msg;
+ unsigned int apfs = *((u32 *)head->fmt2.apfs);
+
+ ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x apfs=0x%x => rc=EINVAL\n",
+ __func__, card, queue,
+ ehdr->reply_code, apfs);
+ } else {
+ ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EINVAL\n",
+ __func__, card, queue,
+ ehdr->reply_code);
+ }
return -EINVAL;
- case REP82_ERROR_MESSAGE_TYPE:
- // REP88_ERROR_MESSAGE_TYPE // '20' CEX2A
+ case REP82_ERROR_MACHINE_FAILURE: /* 0x10 */
+ case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
+ case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */
/*
- * To sent a message of the wrong type is a bug in the
- * device driver. Send error msg, disable the device
- * and then repeat the request.
+ * Msg to wrong type or card/infrastructure failure. Return
+ * EAGAIN, the upper layer may do a retry on the request.
*/
- atomic_set(&zcrypt_rescan_req, 1);
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
- card, queue, ehdr->reply_code);
- return -EAGAIN;
- case REP82_ERROR_TRANSPORT_FAIL:
- /* Card or infrastructure failure, disable card */
- atomic_set(&zcrypt_rescan_req, 1);
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
/* For type 86 response show the apfs value (failure reason) */
- if (ehdr->type == TYPE86_RSP_CODE) {
+ if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
+ ehdr->type == TYPE86_RSP_CODE) {
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
- } __packed * head = reply->message;
+ } __packed * head = reply->msg;
unsigned int apfs = *((u32 *)head->fmt2.apfs);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x apfs=0x%x => online=0 rc=EAGAIN\n",
- card, queue, apfs, ehdr->reply_code);
- } else
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
- card, queue, ehdr->reply_code);
- return -EAGAIN;
- case REP82_ERROR_MACHINE_FAILURE:
- // REP88_ERROR_MODULE_FAILURE // '10' CEX2A
- /* If a card fails disable it and repeat the request. */
- atomic_set(&zcrypt_rescan_req, 1);
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
- card, queue, ehdr->reply_code);
+ ZCRYPT_DBF_WARN(
+ "%s dev=%02x.%04x RY=0x%02x apfs=0x%x => bus rescan, rc=EAGAIN\n",
+ __func__, card, queue, ehdr->reply_code, apfs);
+ } else {
+ ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => bus rescan, rc=EAGAIN\n",
+ __func__, card, queue,
+ ehdr->reply_code);
+ }
return -EAGAIN;
default:
- zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
- card, queue);
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x reply=0x%02x => online=0 rc=EAGAIN\n",
- card, queue, ehdr->reply_code);
- return -EAGAIN; /* repeat the request on a different device. */
+ /* Assume request is valid and a retry will be worth it */
+ ZCRYPT_DBF_WARN("%s dev=%02x.%04x RY=0x%02x => rc=EAGAIN\n",
+ __func__, card, queue, ehdr->reply_code);
+ return -EAGAIN;
}
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index fc4295b3d801..d6fc2d8e7fad 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -10,8 +10,7 @@
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*/
-#define KMSG_COMPONENT "zcrypt"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "zcrypt: " fmt
#include <linux/module.h>
#include <linux/slab.h>
@@ -28,18 +27,15 @@
/* >= CEX3A: 4096 bits */
#define CEX3A_MAX_MOD_SIZE 512
-/* CEX2A: max outputdatalength + type80_hdr */
-#define CEX2A_MAX_RESPONSE_SIZE 0x110
-
/* >= CEX3A: 512 bit modulus, (max outputdatalength) + type80_hdr */
#define CEX3A_MAX_RESPONSE_SIZE 0x210
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Accelerator (message type 50), " \
- "Copyright IBM Corp. 2001, 2012");
+ "Copyright IBM Corp. 2001, 2023");
MODULE_LICENSE("GPL");
-/**
+/*
* The type 50 message family is associated with a CEXxA cards.
*
* The four members of the family are described below.
@@ -136,7 +132,7 @@ struct type50_crb3_msg {
unsigned char message[512];
} __packed;
-/**
+/*
* The type 80 response family is associated with a CEXxA cards.
*
* Note that all unsigned char arrays are right-justified and left-padded
@@ -156,9 +152,8 @@ struct type80_hdr {
unsigned char reserved3[8];
} __packed;
-unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
+int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
{
-
if (!mex->inputdatalength)
return -EINVAL;
@@ -172,9 +167,8 @@ unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fcode)
return 0;
}
-unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
+int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
{
-
if (!crt->inputdatalength)
return -EINVAL;
@@ -188,7 +182,7 @@ unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fcode)
return 0;
}
-/**
+/*
* Convert a ICAMEX message to a type50 MEX message.
*
* @zq: crypto queue pointer
@@ -207,10 +201,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
mod_len = mex->inputdatalength;
if (mod_len <= 128) {
- struct type50_meb1_msg *meb1 = ap_msg->message;
+ struct type50_meb1_msg *meb1 = ap_msg->msg;
memset(meb1, 0, sizeof(*meb1));
- ap_msg->length = sizeof(*meb1);
+ ap_msg->len = sizeof(*meb1);
meb1->header.msg_type_code = TYPE50_TYPE_CODE;
meb1->header.msg_len = sizeof(*meb1);
meb1->keyblock_type = TYPE50_MEB1_FMT;
@@ -218,10 +212,10 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
exp = meb1->exponent + sizeof(meb1->exponent) - mod_len;
inp = meb1->message + sizeof(meb1->message) - mod_len;
} else if (mod_len <= 256) {
- struct type50_meb2_msg *meb2 = ap_msg->message;
+ struct type50_meb2_msg *meb2 = ap_msg->msg;
memset(meb2, 0, sizeof(*meb2));
- ap_msg->length = sizeof(*meb2);
+ ap_msg->len = sizeof(*meb2);
meb2->header.msg_type_code = TYPE50_TYPE_CODE;
meb2->header.msg_len = sizeof(*meb2);
meb2->keyblock_type = TYPE50_MEB2_FMT;
@@ -229,27 +223,29 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq,
exp = meb2->exponent + sizeof(meb2->exponent) - mod_len;
inp = meb2->message + sizeof(meb2->message) - mod_len;
} else if (mod_len <= 512) {
- struct type50_meb3_msg *meb3 = ap_msg->message;
+ struct type50_meb3_msg *meb3 = ap_msg->msg;
memset(meb3, 0, sizeof(*meb3));
- ap_msg->length = sizeof(*meb3);
+ ap_msg->len = sizeof(*meb3);
meb3->header.msg_type_code = TYPE50_TYPE_CODE;
meb3->header.msg_len = sizeof(*meb3);
meb3->keyblock_type = TYPE50_MEB3_FMT;
mod = meb3->modulus + sizeof(meb3->modulus) - mod_len;
exp = meb3->exponent + sizeof(meb3->exponent) - mod_len;
inp = meb3->message + sizeof(meb3->message) - mod_len;
- } else
+ } else {
return -EINVAL;
+ }
if (copy_from_user(mod, mex->n_modulus, mod_len) ||
copy_from_user(exp, mex->b_key, mod_len) ||
copy_from_user(inp, mex->inputdata, mod_len))
return -EFAULT;
+
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type50 CRT message.
*
* @zq: crypto queue pointer
@@ -275,10 +271,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
* 512 byte modulus (4k keys).
*/
if (mod_len <= 128) { /* up to 1024 bit key size */
- struct type50_crb1_msg *crb1 = ap_msg->message;
+ struct type50_crb1_msg *crb1 = ap_msg->msg;
memset(crb1, 0, sizeof(*crb1));
- ap_msg->length = sizeof(*crb1);
+ ap_msg->len = sizeof(*crb1);
crb1->header.msg_type_code = TYPE50_TYPE_CODE;
crb1->header.msg_len = sizeof(*crb1);
crb1->keyblock_type = TYPE50_CRB1_FMT;
@@ -289,10 +285,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
u = crb1->u + sizeof(crb1->u) - short_len;
inp = crb1->message + sizeof(crb1->message) - mod_len;
} else if (mod_len <= 256) { /* up to 2048 bit key size */
- struct type50_crb2_msg *crb2 = ap_msg->message;
+ struct type50_crb2_msg *crb2 = ap_msg->msg;
memset(crb2, 0, sizeof(*crb2));
- ap_msg->length = sizeof(*crb2);
+ ap_msg->len = sizeof(*crb2);
crb2->header.msg_type_code = TYPE50_TYPE_CODE;
crb2->header.msg_len = sizeof(*crb2);
crb2->keyblock_type = TYPE50_CRB2_FMT;
@@ -304,10 +300,10 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
inp = crb2->message + sizeof(crb2->message) - mod_len;
} else if ((mod_len <= 512) && /* up to 4096 bit key size */
(zq->zcard->max_mod_size == CEX3A_MAX_MOD_SIZE)) {
- struct type50_crb3_msg *crb3 = ap_msg->message;
+ struct type50_crb3_msg *crb3 = ap_msg->msg;
memset(crb3, 0, sizeof(*crb3));
- ap_msg->length = sizeof(*crb3);
+ ap_msg->len = sizeof(*crb3);
crb3->header.msg_type_code = TYPE50_TYPE_CODE;
crb3->header.msg_len = sizeof(*crb3);
crb3->keyblock_type = TYPE50_CRB3_FMT;
@@ -317,8 +313,9 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
dq = crb3->dq + sizeof(crb3->dq) - short_len;
u = crb3->u + sizeof(crb3->u) - short_len;
inp = crb3->message + sizeof(crb3->message) - mod_len;
- } else
+ } else {
return -EINVAL;
+ }
/*
* correct the offset of p, bp and mult_inv according zcrypt.h
@@ -335,7 +332,7 @@ static int ICACRT_msg_to_type50CRT_msg(struct zcrypt_queue *zq,
return 0;
}
-/**
+/*
* Copy results from a type 80 reply message back to user space.
*
* @zq: crypto device pointer
@@ -350,27 +347,23 @@ static int convert_type80(struct zcrypt_queue *zq,
char __user *outputdata,
unsigned int outputdatalength)
{
- struct type80_hdr *t80h = reply->message;
+ struct type80_hdr *t80h = reply->msg;
unsigned char *data;
if (t80h->len < sizeof(*t80h) + outputdatalength) {
/* The result is too short, the CEXxA card may not do that.. */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- t80h->code);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid), t80h->code);
+ ZCRYPT_DBF_ERR("%s dev=%02x.%04x code=0x%02x => online=0 rc=EAGAIN\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), t80h->code);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
+ return -EAGAIN;
}
- if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- BUG_ON(t80h->len > CEX2A_MAX_RESPONSE_SIZE);
- else
- BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
- data = reply->message + t80h->len - outputdatalength;
+ BUG_ON(t80h->len > CEX3A_MAX_RESPONSE_SIZE);
+ data = reply->msg + t80h->len - outputdatalength;
if (copy_to_user(outputdata, data, outputdatalength))
return -EFAULT;
return 0;
@@ -382,7 +375,7 @@ static int convert_response(struct zcrypt_queue *zq,
unsigned int outputdatalength)
{
/* Response type byte is the second byte in the response. */
- unsigned char rtype = ((unsigned char *) reply->message)[1];
+ unsigned char rtype = ((unsigned char *)reply->msg)[1];
switch (rtype) {
case TYPE82_RSP_CODE:
@@ -393,19 +386,20 @@ static int convert_response(struct zcrypt_queue *zq,
outputdata, outputdatalength);
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (unsigned int) rtype);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)rtype);
+ ZCRYPT_DBF_ERR(
+ "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), (int)rtype);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
+ return -EAGAIN;
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -413,135 +407,145 @@ static int convert_response(struct zcrypt_queue *zq,
* @msg: pointer to the AP message
* @reply: pointer to the AP reply message
*/
-static void zcrypt_cex2a_receive(struct ap_queue *aq,
- struct ap_message *msg,
- struct ap_message *reply)
+static void zcrypt_msgtype50_receive(struct ap_queue *aq,
+ struct ap_message *msg,
+ struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
struct type80_hdr *t80h;
- int length;
+ int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
- t80h = reply->message;
+ t80h = reply->msg;
if (t80h->type == TYPE80_RSP_CODE) {
- if (aq->ap_dev.device_type == AP_DEVICE_TYPE_CEX2A)
- length = min_t(int,
- CEX2A_MAX_RESPONSE_SIZE, t80h->len);
- else
- length = min_t(int,
- CEX3A_MAX_RESPONSE_SIZE, t80h->len);
- memcpy(msg->message, reply->message, length);
- } else
- memcpy(msg->message, reply->message, sizeof(error_reply));
+ len = t80h->len;
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ pr_debug("len mismatch => EMSGSIZE\n");
+ msg->rc = -EMSGSIZE;
+ goto out;
+ }
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
+ } else {
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
+ }
out:
- complete((struct completion *) msg->private);
+ complete(&msg->response.work);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxA device to the request distributor
* @mex: pointer to the modexpo request buffer
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
-static long zcrypt_cex2a_modexpo(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo *mex)
+static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo *mex,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct completion work;
int rc;
- ap_init_message(&ap_msg);
- if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
- GFP_KERNEL);
- else
- ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
- GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_cex2a_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &work;
- rc = ICAMEX_msg_to_type50MEX_msg(zq, &ap_msg, mex);
+ if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE)
+ return -EMSGSIZE;
+ ap_msg->receive = zcrypt_msgtype50_receive;
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex);
+ if (rc)
+ goto out;
+ init_completion(&ap_msg->response.work);
+ rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
- goto out_free;
- init_completion(&work);
- ap_queue_message(zq->queue, &ap_msg);
- rc = wait_for_completion_interruptible(&work);
+ goto out;
+ rc = wait_for_completion_interruptible(&ap_msg->response.work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response(zq, &ap_msg, mex->outputdata,
+ rc = convert_response(zq, ap_msg,
+ mex->outputdata,
mex->outputdatalength);
- } else
+ } else {
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
-out_free:
- kfree(ap_msg.message);
+ ap_cancel_message(zq->queue, ap_msg);
+ }
+
+out:
+ if (rc)
+ pr_debug("send me cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxA
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxA device to the request distributor
* @crt: pointer to the modexpoc_crt request buffer
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
-static long zcrypt_cex2a_modexpo_crt(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo_crt *crt)
+static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
+ struct ica_rsa_modexpo_crt *crt,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct completion work;
int rc;
- ap_init_message(&ap_msg);
- if (zq->zcard->user_space_type == ZCRYPT_CEX2A)
- ap_msg.message = kmalloc(MSGTYPE50_CRB2_MAX_MSG_SIZE,
- GFP_KERNEL);
- else
- ap_msg.message = kmalloc(MSGTYPE50_CRB3_MAX_MSG_SIZE,
- GFP_KERNEL);
- if (!ap_msg.message)
- return -ENOMEM;
- ap_msg.receive = zcrypt_cex2a_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &work;
- rc = ICACRT_msg_to_type50CRT_msg(zq, &ap_msg, crt);
+ if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE)
+ return -EMSGSIZE;
+ ap_msg->receive = zcrypt_msgtype50_receive;
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt);
if (rc)
- goto out_free;
- init_completion(&work);
- ap_queue_message(zq->queue, &ap_msg);
- rc = wait_for_completion_interruptible(&work);
+ goto out;
+ init_completion(&ap_msg->response.work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&ap_msg->response.work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response(zq, &ap_msg, crt->outputdata,
+ rc = convert_response(zq, ap_msg,
+ crt->outputdata,
crt->outputdatalength);
- } else
+ } else {
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
-out_free:
- kfree(ap_msg.message);
+ ap_cancel_message(zq->queue, ap_msg);
+ }
+
+out:
+ if (rc)
+ pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
-/**
+/*
* The crypto operations for message type 50.
*/
static struct zcrypt_ops zcrypt_msgtype50_ops = {
- .rsa_modexpo = zcrypt_cex2a_modexpo,
- .rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
+ .rsa_modexpo = zcrypt_msgtype50_modexpo,
+ .rsa_modexpo_crt = zcrypt_msgtype50_modexpo_crt,
.owner = THIS_MODULE,
.name = MSGTYPE50_NAME,
.variant = MSGTYPE50_VARIANT_DEFAULT,
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.h b/drivers/s390/crypto/zcrypt_msgtype50.h
index 66bec4f45c56..323e93b90b12 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.h
+++ b/drivers/s390/crypto/zcrypt_msgtype50.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -15,13 +15,12 @@
#define MSGTYPE50_NAME "zcrypt_msgtype50"
#define MSGTYPE50_VARIANT_DEFAULT 0
-#define MSGTYPE50_CRB2_MAX_MSG_SIZE 0x390 /* sizeof(struct type50_crb2_msg) */
#define MSGTYPE50_CRB3_MAX_MSG_SIZE 0x710 /* sizeof(struct type50_crb3_msg) */
#define MSGTYPE_ADJUSTMENT 0x08 /* type04 extension (not needed in type50) */
-unsigned int get_rsa_modex_fc(struct ica_rsa_modexpo *, int *);
-unsigned int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *, int *);
+int get_rsa_modex_fc(struct ica_rsa_modexpo *mex, int *fc);
+int get_rsa_crt_fc(struct ica_rsa_modexpo_crt *crt, int *fc);
void zcrypt_msgtype50_init(void);
void zcrypt_msgtype50_exit(void);
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 0cbcc238ef98..a0dcab5dc4f2 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright IBM Corp. 2001, 2012
+ * Copyright IBM Corp. 2001, 2023
* Author(s): Robert Burroughs
* Eric Rossman (edrossma@us.ibm.com)
*
@@ -10,8 +10,7 @@
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*/
-#define KMSG_COMPONENT "zcrypt"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "zcrypt: " fmt
#include <linux/module.h>
#include <linux/init.h>
@@ -29,85 +28,24 @@
#define CEXXC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
-#define CEIL4(x) ((((x)+3)/4)*4)
+#define CEIL4(x) ((((x) + 3) / 4) * 4)
-struct response_type {
- struct completion work;
- int type;
-};
#define CEXXC_RESPONSE_TYPE_ICA 0
#define CEXXC_RESPONSE_TYPE_XCRB 1
#define CEXXC_RESPONSE_TYPE_EP11 2
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Cryptographic Coprocessor (message type 6), " \
- "Copyright IBM Corp. 2001, 2012");
+ "Copyright IBM Corp. 2001, 2023");
MODULE_LICENSE("GPL");
-/**
- * CPRB
- * Note that all shorts, ints and longs are little-endian.
- * All pointer fields are 32-bits long, and mean nothing
- *
- * A request CPRB is followed by a request_parameter_block.
- *
- * The request (or reply) parameter block is organized thus:
- * function code
- * VUD block
- * key block
- */
-struct CPRB {
- unsigned short cprb_len; /* CPRB length */
- unsigned char cprb_ver_id; /* CPRB version id. */
- unsigned char pad_000; /* Alignment pad byte. */
- unsigned char srpi_rtcode[4]; /* SRPI return code LELONG */
- unsigned char srpi_verb; /* SRPI verb type */
- unsigned char flags; /* flags */
- unsigned char func_id[2]; /* function id */
- unsigned char checkpoint_flag; /* */
- unsigned char resv2; /* reserved */
- unsigned short req_parml; /* request parameter buffer */
- /* length 16-bit little endian */
- unsigned char req_parmp[4]; /* request parameter buffer *
- * pointer (means nothing: the *
- * parameter buffer follows *
- * the CPRB). */
- unsigned char req_datal[4]; /* request data buffer */
- /* length ULELONG */
- unsigned char req_datap[4]; /* request data buffer */
- /* pointer */
- unsigned short rpl_parml; /* reply parameter buffer */
- /* length 16-bit little endian */
- unsigned char pad_001[2]; /* Alignment pad bytes. ULESHORT */
- unsigned char rpl_parmp[4]; /* reply parameter buffer *
- * pointer (means nothing: the *
- * parameter buffer follows *
- * the CPRB). */
- unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
- unsigned char rpl_datap[4]; /* reply data buffer */
- /* pointer */
- unsigned short ccp_rscode; /* server reason code ULESHORT */
- unsigned short ccp_rtcode; /* server return code ULESHORT */
- unsigned char repd_parml[2]; /* replied parameter len ULESHORT*/
- unsigned char mac_data_len[2]; /* Mac Data Length ULESHORT */
- unsigned char repd_datal[4]; /* replied data length ULELONG */
- unsigned char req_pc[2]; /* PC identifier */
- unsigned char res_origin[8]; /* resource origin */
- unsigned char mac_value[8]; /* Mac Value */
- unsigned char logon_id[8]; /* Logon Identifier */
- unsigned char usage_domain[2]; /* cdx */
- unsigned char resv3[18]; /* reserved for requestor */
- unsigned short svr_namel; /* server name length ULESHORT */
- unsigned char svr_name[8]; /* server name */
-} __packed;
-
struct function_and_rules_block {
unsigned char function_code[2];
unsigned short ulen;
unsigned char only_rule[8];
} __packed;
-/**
+/*
* The following is used to initialize the CPRBX passed to the CEXxC/CEXxP
* card in a type6 message. The 3 fields that must be filled in at execution
* time are req_parml, rpl_parml and usage_domain.
@@ -235,8 +173,7 @@ int speed_idx_ep11(int req_type)
}
}
-
-/**
+/*
* Convert a ICAMEX message to a type6 MEX message.
*
* @zq: crypto device pointer
@@ -245,7 +182,7 @@ int speed_idx_ep11(int req_type)
*
* Returns 0 on success or negative errno value.
*/
-static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
+static int icamex_msg_to_type6mex_msgx(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo *mex)
{
@@ -265,8 +202,8 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
struct CPRBX cprbx;
struct function_and_rules_block fr;
unsigned short length;
- char text[0];
- } __packed * msg = ap_msg->message;
+ char text[];
+ } __packed * msg = ap_msg->msg;
int size;
/*
@@ -283,29 +220,29 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
return -EFAULT;
/* Set up key which is located after the variable length text. */
- size = zcrypt_type6_mex_key_en(mex, msg->text+mex->inputdatalength);
+ size = zcrypt_type6_mex_key_en(mex, msg->text + mex->inputdatalength);
if (size < 0)
return size;
size += sizeof(*msg) + mex->inputdatalength;
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
- msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+ msg->hdr.tocardlen1 = size - sizeof(msg->hdr);
+ msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
- msg->cprbx.rpl_msgbl = msg->hdr.FromCardLen1;
+ msg->cprbx.rpl_msgbl = msg->hdr.fromcardlen1;
msg->fr = static_pke_fnr;
msg->cprbx.req_parml = size - sizeof(msg->hdr) - sizeof(msg->cprbx);
- ap_msg->length = size;
+ ap_msg->len = size;
return 0;
}
-/**
+/*
* Convert a ICACRT message to a type6 CRT message.
*
* @zq: crypto device pointer
@@ -314,7 +251,7 @@ static int ICAMEX_msg_to_type6MEX_msgX(struct zcrypt_queue *zq,
*
* Returns 0 on success or negative errno value.
*/
-static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
+static int icacrt_msg_to_type6crt_msgx(struct zcrypt_queue *zq,
struct ap_message *ap_msg,
struct ica_rsa_modexpo_crt *crt)
{
@@ -335,8 +272,8 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
struct CPRBX cprbx;
struct function_and_rules_block fr;
unsigned short length;
- char text[0];
- } __packed * msg = ap_msg->message;
+ char text[];
+ } __packed * msg = ap_msg->msg;
int size;
/*
@@ -360,8 +297,8 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
/* message header, cprbx and f&r */
msg->hdr = static_type6_hdrX;
- msg->hdr.ToCardLen1 = size - sizeof(msg->hdr);
- msg->hdr.FromCardLen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
+ msg->hdr.tocardlen1 = size - sizeof(msg->hdr);
+ msg->hdr.fromcardlen1 = CEXXC_MAX_ICA_RESPONSE_SIZE - sizeof(msg->hdr);
msg->cprbx = static_cprbx;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
@@ -370,11 +307,11 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_queue *zq,
msg->fr = static_pkd_fnr;
- ap_msg->length = size;
+ ap_msg->len = size;
return 0;
}
-/**
+/*
* Convert a XCRB message to a type6 CPRB message.
*
* @zq: crypto device pointer
@@ -388,8 +325,8 @@ struct type86_fmt2_msg {
struct type86_fmt2_ext fmt2;
} __packed;
-static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
- struct ica_xcRB *xcRB,
+static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
+ struct ica_xcRB *xcrb,
unsigned int *fcode,
unsigned short **dom)
{
@@ -399,77 +336,76 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
};
struct {
struct type6_hdr hdr;
- struct CPRBX cprbx;
- } __packed * msg = ap_msg->message;
-
- int rcblen = CEIL4(xcRB->request_control_blk_length);
- int replylen, req_sumlen, resp_sumlen;
- char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
+ union {
+ struct CPRBX cprbx;
+ DECLARE_FLEX_ARRAY(u8, userdata);
+ };
+ } __packed * msg = ap_msg->msg;
+
+ int rcblen = CEIL4(xcrb->request_control_blk_length);
+ int req_sumlen, resp_sumlen;
+ char *req_data = ap_msg->msg + sizeof(struct type6_hdr) + rcblen;
char *function_code;
- if (CEIL4(xcRB->request_control_blk_length) <
- xcRB->request_control_blk_length)
+ if (CEIL4(xcrb->request_control_blk_length) <
+ xcrb->request_control_blk_length)
return -EINVAL; /* overflow after alignment*/
/* length checks */
- ap_msg->length = sizeof(struct type6_hdr) +
- CEIL4(xcRB->request_control_blk_length) +
- xcRB->request_data_length;
- if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
+ ap_msg->len = sizeof(struct type6_hdr) +
+ CEIL4(xcrb->request_control_blk_length) +
+ xcrb->request_data_length;
+ if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
/*
* Overflow check
* sum must be greater (or equal) than the largest operand
*/
- req_sumlen = CEIL4(xcRB->request_control_blk_length) +
- xcRB->request_data_length;
- if ((CEIL4(xcRB->request_control_blk_length) <=
- xcRB->request_data_length) ?
- (req_sumlen < xcRB->request_data_length) :
- (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+ req_sumlen = CEIL4(xcrb->request_control_blk_length) +
+ xcrb->request_data_length;
+ if ((CEIL4(xcrb->request_control_blk_length) <=
+ xcrb->request_data_length) ?
+ req_sumlen < xcrb->request_data_length :
+ req_sumlen < CEIL4(xcrb->request_control_blk_length)) {
return -EINVAL;
}
- if (CEIL4(xcRB->reply_control_blk_length) <
- xcRB->reply_control_blk_length)
+ if (CEIL4(xcrb->reply_control_blk_length) <
+ xcrb->reply_control_blk_length)
return -EINVAL; /* overflow after alignment*/
- replylen = sizeof(struct type86_fmt2_msg) +
- CEIL4(xcRB->reply_control_blk_length) +
- xcRB->reply_data_length;
- if (replylen > MSGTYPE06_MAX_MSG_SIZE)
- return -EINVAL;
-
/*
* Overflow check
* sum must be greater (or equal) than the largest operand
*/
- resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
- xcRB->reply_data_length;
- if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
- (resp_sumlen < xcRB->reply_data_length) :
- (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+ resp_sumlen = CEIL4(xcrb->reply_control_blk_length) +
+ xcrb->reply_data_length;
+ if ((CEIL4(xcrb->reply_control_blk_length) <=
+ xcrb->reply_data_length) ?
+ resp_sumlen < xcrb->reply_data_length :
+ resp_sumlen < CEIL4(xcrb->reply_control_blk_length)) {
return -EINVAL;
}
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
- memcpy(msg->hdr.agent_id, &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
- msg->hdr.ToCardLen1 = xcRB->request_control_blk_length;
- if (xcRB->request_data_length) {
+ memcpy(msg->hdr.agent_id, &xcrb->agent_ID, sizeof(xcrb->agent_ID));
+ msg->hdr.tocardlen1 = xcrb->request_control_blk_length;
+ if (xcrb->request_data_length) {
msg->hdr.offset2 = msg->hdr.offset1 + rcblen;
- msg->hdr.ToCardLen2 = xcRB->request_data_length;
+ msg->hdr.tocardlen2 = xcrb->request_data_length;
}
- msg->hdr.FromCardLen1 = xcRB->reply_control_blk_length;
- msg->hdr.FromCardLen2 = xcRB->reply_data_length;
+ msg->hdr.fromcardlen1 = xcrb->reply_control_blk_length;
+ msg->hdr.fromcardlen2 = xcrb->reply_data_length;
/* prepare CPRB */
- if (copy_from_user(&(msg->cprbx), xcRB->request_control_blk_addr,
- xcRB->request_control_blk_length))
+ if (z_copy_from_user(userspace, msg->userdata,
+ xcrb->request_control_blk_addr,
+ xcrb->request_control_blk_length))
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
- xcRB->request_control_blk_length)
+ xcrb->request_control_blk_length)
return -EINVAL;
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code,
@@ -478,24 +414,40 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg,
*fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1];
*dom = (unsigned short *)&msg->cprbx.domain;
- if (memcmp(function_code, "US", 2) == 0
- || memcmp(function_code, "AU", 2) == 0)
- ap_msg->special = 1;
- else
- ap_msg->special = 0;
+ /* check subfunction, US and AU need special flag with NQAP */
+ if (memcmp(function_code, "US", 2) == 0 ||
+ memcmp(function_code, "AU", 2) == 0)
+ ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+ /* check CPRB minor version, set info bits in ap_message flag field */
+ switch (*(unsigned short *)(&msg->cprbx.func_id[0])) {
+ case 0x5432: /* "T2" */
+ ap_msg->flags |= AP_MSG_FLAG_USAGE;
+ break;
+ case 0x5433: /* "T3" */
+ case 0x5435: /* "T5" */
+ case 0x5436: /* "T6" */
+ case 0x5437: /* "T7" */
+ ap_msg->flags |= AP_MSG_FLAG_ADMIN;
+ break;
+ default:
+ pr_debug("unknown CPRB minor version '%c%c'\n",
+ msg->cprbx.func_id[0], msg->cprbx.func_id[1]);
+ }
/* copy data block */
- if (xcRB->request_data_length &&
- copy_from_user(req_data, xcRB->request_data_address,
- xcRB->request_data_length))
+ if (xcrb->request_data_length &&
+ z_copy_from_user(userspace, req_data, xcrb->request_data_address,
+ xcrb->request_data_length))
return -EFAULT;
return 0;
}
-static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
- struct ep11_urb *xcRB,
- unsigned int *fcode)
+static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap_msg,
+ struct ep11_urb *xcrb,
+ unsigned int *fcode,
+ unsigned int *domain)
{
unsigned int lfmt;
static struct type6_hdr static_type6_ep11_hdr = {
@@ -509,10 +461,15 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
struct {
struct type6_hdr hdr;
- struct ep11_cprb cprbx;
- unsigned char pld_tag; /* fixed value 0x30 */
- unsigned char pld_lenfmt; /* payload length format */
- } __packed * msg = ap_msg->message;
+ union {
+ struct {
+ struct ep11_cprb cprbx;
+ unsigned char pld_tag; /* fixed value 0x30 */
+ unsigned char pld_lenfmt; /* length format */
+ } __packed;
+ DECLARE_FLEX_ARRAY(u8, userdata);
+ };
+ } __packed * msg = ap_msg->msg;
struct pld_hdr {
unsigned char func_tag; /* fixed value 0x4 */
@@ -523,30 +480,25 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
unsigned int dom_val; /* domain id */
} __packed * payload_hdr = NULL;
- if (CEIL4(xcRB->req_len) < xcRB->req_len)
+ if (CEIL4(xcrb->req_len) < xcrb->req_len)
return -EINVAL; /* overflow after alignment*/
/* length checks */
- ap_msg->length = sizeof(struct type6_hdr) + xcRB->req_len;
- if (CEIL4(xcRB->req_len) > MSGTYPE06_MAX_MSG_SIZE -
- (sizeof(struct type6_hdr)))
+ ap_msg->len = sizeof(struct type6_hdr) + CEIL4(xcrb->req_len);
+ if (ap_msg->len > ap_msg->bufsize)
return -EINVAL;
- if (CEIL4(xcRB->resp_len) < xcRB->resp_len)
+ if (CEIL4(xcrb->resp_len) < xcrb->resp_len)
return -EINVAL; /* overflow after alignment*/
- if (CEIL4(xcRB->resp_len) > MSGTYPE06_MAX_MSG_SIZE -
- (sizeof(struct type86_fmt2_msg)))
- return -EINVAL;
-
/* prepare type6 header */
msg->hdr = static_type6_ep11_hdr;
- msg->hdr.ToCardLen1 = xcRB->req_len;
- msg->hdr.FromCardLen1 = xcRB->resp_len;
+ msg->hdr.tocardlen1 = xcrb->req_len;
+ msg->hdr.fromcardlen1 = xcrb->resp_len;
/* Import CPRB data from the ioctl input parameter */
- if (copy_from_user(&(msg->cprbx.cprb_len),
- (char __force __user *)xcRB->req, xcRB->req_len)) {
+ if (z_copy_from_user(userspace, msg->userdata,
+ (char __force __user *)xcrb->req, xcrb->req_len)) {
return -EFAULT;
}
@@ -564,13 +516,25 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(struct ap_message *ap_msg,
} else {
lfmt = 1; /* length format #1 */
}
- payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt);
*fcode = payload_hdr->func_val & 0xFFFF;
+ /* enable special processing based on the cprbs flags special bit */
+ if (msg->cprbx.flags & 0x20)
+ ap_msg->flags |= AP_MSG_FLAG_SPECIAL;
+
+ /* set info bits in ap_message flag field */
+ if (msg->cprbx.flags & 0x80)
+ ap_msg->flags |= AP_MSG_FLAG_ADMIN;
+ else
+ ap_msg->flags |= AP_MSG_FLAG_USAGE;
+
+ *domain = msg->cprbx.target_id;
+
return 0;
}
-/**
+/*
* Copy results from a type 86 ICA reply message back to user space.
*
* @zq: crypto device pointer
@@ -585,8 +549,8 @@ struct type86x_reply {
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
unsigned char pad[4]; /* 4 byte function code/rules block ? */
- unsigned short length;
- char text[0];
+ unsigned short length; /* length of data including length field size */
+ char data[];
} __packed;
struct type86_ep11_reply {
@@ -596,49 +560,13 @@ struct type86_ep11_reply {
} __packed;
static int convert_type86_ica(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
{
- static unsigned char static_pad[] = {
- 0x00, 0x02,
- 0x1B, 0x7B, 0x5D, 0xB5, 0x75, 0x01, 0x3D, 0xFD,
- 0x8D, 0xD1, 0xC7, 0x03, 0x2D, 0x09, 0x23, 0x57,
- 0x89, 0x49, 0xB9, 0x3F, 0xBB, 0x99, 0x41, 0x5B,
- 0x75, 0x21, 0x7B, 0x9D, 0x3B, 0x6B, 0x51, 0x39,
- 0xBB, 0x0D, 0x35, 0xB9, 0x89, 0x0F, 0x93, 0xA5,
- 0x0B, 0x47, 0xF1, 0xD3, 0xBB, 0xCB, 0xF1, 0x9D,
- 0x23, 0x73, 0x71, 0xFF, 0xF3, 0xF5, 0x45, 0xFB,
- 0x61, 0x29, 0x23, 0xFD, 0xF1, 0x29, 0x3F, 0x7F,
- 0x17, 0xB7, 0x1B, 0xA9, 0x19, 0xBD, 0x57, 0xA9,
- 0xD7, 0x95, 0xA3, 0xCB, 0xED, 0x1D, 0xDB, 0x45,
- 0x7D, 0x11, 0xD1, 0x51, 0x1B, 0xED, 0x71, 0xE9,
- 0xB1, 0xD1, 0xAB, 0xAB, 0x21, 0x2B, 0x1B, 0x9F,
- 0x3B, 0x9F, 0xF7, 0xF7, 0xBD, 0x63, 0xEB, 0xAD,
- 0xDF, 0xB3, 0x6F, 0x5B, 0xDB, 0x8D, 0xA9, 0x5D,
- 0xE3, 0x7D, 0x77, 0x49, 0x47, 0xF5, 0xA7, 0xFD,
- 0xAB, 0x2F, 0x27, 0x35, 0x77, 0xD3, 0x49, 0xC9,
- 0x09, 0xEB, 0xB1, 0xF9, 0xBF, 0x4B, 0xCB, 0x2B,
- 0xEB, 0xEB, 0x05, 0xFF, 0x7D, 0xC7, 0x91, 0x8B,
- 0x09, 0x83, 0xB9, 0xB9, 0x69, 0x33, 0x39, 0x6B,
- 0x79, 0x75, 0x19, 0xBF, 0xBB, 0x07, 0x1D, 0xBD,
- 0x29, 0xBF, 0x39, 0x95, 0x93, 0x1D, 0x35, 0xC7,
- 0xC9, 0x4D, 0xE5, 0x97, 0x0B, 0x43, 0x9B, 0xF1,
- 0x16, 0x93, 0x03, 0x1F, 0xA5, 0xFB, 0xDB, 0xF3,
- 0x27, 0x4F, 0x27, 0x61, 0x05, 0x1F, 0xB9, 0x23,
- 0x2F, 0xC3, 0x81, 0xA9, 0x23, 0x71, 0x55, 0x55,
- 0xEB, 0xED, 0x41, 0xE5, 0xF3, 0x11, 0xF1, 0x43,
- 0x69, 0x03, 0xBD, 0x0B, 0x37, 0x0F, 0x51, 0x8F,
- 0x0B, 0xB5, 0x89, 0x5B, 0x67, 0xA9, 0xD9, 0x4F,
- 0x01, 0xF9, 0x21, 0x77, 0x37, 0x73, 0x79, 0xC5,
- 0x7F, 0x51, 0xC1, 0xCF, 0x97, 0xA1, 0x75, 0xAD,
- 0x35, 0x9D, 0xD3, 0xD3, 0xA7, 0x9D, 0x5D, 0x41,
- 0x6F, 0x65, 0x1B, 0xCF, 0xA9, 0x87, 0x91, 0x09
- };
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
unsigned short service_rc, service_rs;
- unsigned int reply_len, pad_len;
- char *data;
+ unsigned int data_len;
service_rc = msg->cprbx.ccp_rtcode;
if (unlikely(service_rc != 0)) {
@@ -648,122 +576,117 @@ static int convert_type86_ica(struct zcrypt_queue *zq,
(service_rc == 8 && service_rs == 72) ||
(service_rc == 8 && service_rs == 770) ||
(service_rc == 12 && service_rs == 769)) {
- ZCRYPT_DBF(DBF_DEBUG,
- "device=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
+ ZCRYPT_DBF_WARN("%s dev=%02x.%04x rc/rs=%d/%d => rc=EINVAL\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)service_rc, (int)service_rs);
return -EINVAL;
}
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x rc/rs=%d/%d online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) service_rc, (int) service_rs);
- return -EAGAIN; /* repeat the request on a different device. */
- }
- data = msg->text;
- reply_len = msg->length - 2;
- if (reply_len > outputdatalength)
- return -EINVAL;
- /*
- * For all encipher requests, the length of the ciphertext (reply_len)
- * will always equal the modulus length. For MEX decipher requests
- * the output needs to get padded. Minimum pad size is 10.
- *
- * Currently, the cases where padding will be added is for:
- * - PCIXCC_MCL2 using a CRT form token (since PKD didn't support
- * ZERO-PAD and CRT is only supported for PKD requests)
- * - PCICC, always
- */
- pad_len = outputdatalength - reply_len;
- if (pad_len > 0) {
- if (pad_len < 10)
- return -EINVAL;
- /* 'restore' padding left in the CEXXC card. */
- if (copy_to_user(outputdata, static_pad, pad_len - 1))
- return -EFAULT;
- if (put_user(0, outputdata + pad_len - 1))
- return -EFAULT;
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)service_rc, (int)service_rs);
+ ZCRYPT_DBF_ERR("%s dev=%02x.%04x rc/rs=%d/%d => online=0 rc=EAGAIN\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)service_rc, (int)service_rs);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
+ return -EAGAIN;
}
+ data_len = msg->length - sizeof(msg->length);
+ if (data_len > outputdatalength)
+ return -EMSGSIZE;
+
/* Copy the crypto response to user space. */
- if (copy_to_user(outputdata + pad_len, data, reply_len))
+ if (copy_to_user(outputdata, msg->data, data_len))
return -EFAULT;
return 0;
}
-/**
+/*
* Copy results from a type 86 XCRB reply message back to user space.
*
* @zq: crypto device pointer
* @reply: reply AP message.
- * @xcRB: pointer to XCRB
+ * @xcrb: pointer to XCRB
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
- struct ica_xcRB *xcRB)
+ struct ica_xcRB *xcrb)
{
- struct type86_fmt2_msg *msg = reply->message;
- char *data = reply->message;
+ struct type86_fmt2_msg *msg = reply->msg;
+ char *data = reply->msg;
/* Copy CPRB to user */
- if (copy_to_user(xcRB->reply_control_blk_addr,
- data + msg->fmt2.offset1, msg->fmt2.count1))
+ if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
+ pr_debug("reply_control_blk_length %u < required %u => EMSGSIZE\n",
+ xcrb->reply_control_blk_length, msg->fmt2.count1);
+ return -EMSGSIZE;
+ }
+ if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
- xcRB->reply_control_blk_length = msg->fmt2.count1;
+ xcrb->reply_control_blk_length = msg->fmt2.count1;
/* Copy data buffer to user */
- if (msg->fmt2.count2)
- if (copy_to_user(xcRB->reply_data_addr,
- data + msg->fmt2.offset2, msg->fmt2.count2))
+ if (msg->fmt2.count2) {
+ if (xcrb->reply_data_length < msg->fmt2.count2) {
+ pr_debug("reply_data_length %u < required %u => EMSGSIZE\n",
+ xcrb->reply_data_length, msg->fmt2.count2);
+ return -EMSGSIZE;
+ }
+ if (z_copy_to_user(userspace, xcrb->reply_data_addr,
+ data + msg->fmt2.offset2, msg->fmt2.count2))
return -EFAULT;
- xcRB->reply_data_length = msg->fmt2.count2;
+ }
+ xcrb->reply_data_length = msg->fmt2.count2;
+
return 0;
}
-/**
+/*
* Copy results from a type 86 EP11 XCRB reply message back to user space.
*
* @zq: crypto device pointer
* @reply: reply AP message.
- * @xcRB: pointer to EP11 user request block
+ * @xcrb: pointer to EP11 user request block
*
* Returns 0 on success or -EINVAL, -EFAULT, -EAGAIN in case of an error.
*/
-static int convert_type86_ep11_xcrb(struct zcrypt_queue *zq,
+static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
struct ap_message *reply,
- struct ep11_urb *xcRB)
+ struct ep11_urb *xcrb)
{
- struct type86_fmt2_msg *msg = reply->message;
- char *data = reply->message;
+ struct type86_fmt2_msg *msg = reply->msg;
+ char *data = reply->msg;
- if (xcRB->resp_len < msg->fmt2.count1)
- return -EINVAL;
+ if (xcrb->resp_len < msg->fmt2.count1) {
+ pr_debug("resp_len %u < required %u => EMSGSIZE\n",
+ (unsigned int)xcrb->resp_len, msg->fmt2.count1);
+ return -EMSGSIZE;
+ }
/* Copy response CPRB to user */
- if (copy_to_user((char __force __user *)xcRB->resp,
- data + msg->fmt2.offset1, msg->fmt2.count1))
+ if (z_copy_to_user(userspace, (char __force __user *)xcrb->resp,
+ data + msg->fmt2.offset1, msg->fmt2.count1))
return -EFAULT;
- xcRB->resp_len = msg->fmt2.count1;
+ xcrb->resp_len = msg->fmt2.count1;
return 0;
}
static int convert_type86_rng(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char *buffer)
+ struct ap_message *reply,
+ char *buffer)
{
struct {
struct type86_hdr hdr;
struct type86_fmt2_ext fmt2;
struct CPRBX cprbx;
- } __packed * msg = reply->message;
- char *data = reply->message;
+ } __packed * msg = reply->msg;
+ char *data = reply->msg;
if (msg->cprbx.ccp_rtcode != 0 || msg->cprbx.ccp_rscode != 0)
return -EINVAL;
@@ -772,11 +695,11 @@ static int convert_type86_rng(struct zcrypt_queue *zq,
}
static int convert_response_ica(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char __user *outputdata,
- unsigned int outputdatalength)
+ struct ap_message *reply,
+ char __user *outputdata,
+ unsigned int outputdatalength)
{
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
@@ -784,78 +707,76 @@ static int convert_response_ica(struct zcrypt_queue *zq,
return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->cprbx.ccp_rtcode &&
- (msg->cprbx.ccp_rscode == 0x14f) &&
- (outputdatalength > 256)) {
+ msg->cprbx.ccp_rscode == 0x14f &&
+ outputdatalength > 256) {
if (zq->zcard->max_exp_bit_length <= 17) {
zq->zcard->max_exp_bit_length = 17;
return -EAGAIN;
- } else
+ } else {
return -EINVAL;
+ }
}
if (msg->hdr.reply_code)
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
- default: /* Unknown response type, this should NEVER EVER happen */
+ fallthrough; /* wrong cprb version is an unknown response */
+ default:
+ /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)msg->hdr.type);
+ ZCRYPT_DBF_ERR(
+ "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
+ return -EAGAIN;
}
}
-static int convert_response_xcrb(struct zcrypt_queue *zq,
- struct ap_message *reply,
- struct ica_xcRB *xcRB)
+static int convert_response_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply,
+ struct ica_xcRB *xcrb)
{
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
case TYPE88_RSP_CODE:
- xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ xcrb->status = 0x0008044DL; /* HDD_InvalidParm */
return convert_error(zq, reply);
case TYPE86_RSP_CODE:
if (msg->hdr.reply_code) {
- memcpy(&(xcRB->status), msg->fmt2.apfs, sizeof(u32));
+ memcpy(&xcrb->status, msg->fmt2.apfs, sizeof(u32));
return convert_error(zq, reply);
}
if (msg->cprbx.cprb_ver_id == 0x02)
- return convert_type86_xcrb(zq, reply, xcRB);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ return convert_type86_xcrb(userspace, zq, reply, xcrb);
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
- xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
+ xcrb->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)msg->hdr.type);
+ ZCRYPT_DBF_ERR(
+ "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
+ return -EAGAIN;
}
}
-static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
- struct ap_message *reply, struct ep11_urb *xcRB)
+static int convert_response_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
+ struct ap_message *reply, struct ep11_urb *xcrb)
{
- struct type86_ep11_reply *msg = reply->message;
+ struct type86_ep11_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
@@ -865,27 +786,28 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
if (msg->hdr.reply_code)
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
- return convert_type86_ep11_xcrb(zq, reply, xcRB);
- /* Fall through, no break, incorrect cprb version is an unknown resp.*/
+ return convert_type86_ep11_xcrb(userspace, zq, reply, xcrb);
+ fallthrough; /* wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)msg->hdr.type);
+ ZCRYPT_DBF_ERR(
+ "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
+ return -EAGAIN;
}
}
static int convert_response_rng(struct zcrypt_queue *zq,
- struct ap_message *reply,
- char *data)
+ struct ap_message *reply,
+ char *data)
{
- struct type86x_reply *msg = reply->message;
+ struct type86x_reply *msg = reply->msg;
switch (msg->hdr.type) {
case TYPE82_RSP_CODE:
@@ -896,25 +818,23 @@ static int convert_response_rng(struct zcrypt_queue *zq,
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ fallthrough; /* wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
- pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
+ pr_err("Crypto dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid));
- ZCRYPT_DBF(DBF_ERR,
- "device=%02x.%04x rtype=0x%02x => online=0 rc=EAGAIN\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- (int) msg->hdr.type);
- return -EAGAIN; /* repeat the request on a different device. */
+ AP_QID_QUEUE(zq->queue->qid),
+ (int)msg->hdr.type);
+ ZCRYPT_DBF_ERR(
+ "%s dev=%02x.%04x unknown response type 0x%02x => online=0 rc=EAGAIN\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), (int)msg->hdr.type);
+ ap_send_online_uevent(&zq->queue->ap_dev, zq->online);
+ return -EAGAIN;
}
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -923,47 +843,62 @@ static int convert_response_rng(struct zcrypt_queue *zq,
* @reply: pointer to the AP reply message
*/
static void zcrypt_msgtype6_receive(struct ap_queue *aq,
- struct ap_message *msg,
- struct ap_message *reply)
+ struct ap_message *msg,
+ struct ap_message *reply)
{
static struct error_hdr error_reply = {
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct response_type *resp_type =
- (struct response_type *) msg->private;
+ struct ap_response_type *resp_type = &msg->response;
struct type86x_reply *t86r;
- int length;
+ int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
- t86r = reply->message;
+ t86r = reply->msg;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
- t86r->cprbx.cprb_ver_id == 0x02) {
+ t86r->cprbx.cprb_ver_id == 0x02) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_ICA:
- length = sizeof(struct type86x_reply)
- + t86r->length - 2;
- length = min(CEXXC_MAX_ICA_RESPONSE_SIZE, length);
- memcpy(msg->message, reply->message, length);
+ len = sizeof(struct type86x_reply) + t86r->length;
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ pr_debug("len mismatch => EMSGSIZE\n");
+ msg->rc = -EMSGSIZE;
+ goto out;
+ }
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
break;
case CEXXC_RESPONSE_TYPE_XCRB:
- length = t86r->fmt2.offset2 + t86r->fmt2.count2;
- length = min(MSGTYPE06_MAX_MSG_SIZE, length);
- memcpy(msg->message, reply->message, length);
+ if (t86r->fmt2.count2)
+ len = t86r->fmt2.offset2 + t86r->fmt2.count2;
+ else
+ len = t86r->fmt2.offset1 + t86r->fmt2.count1;
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ pr_debug("len mismatch => EMSGSIZE\n");
+ msg->rc = -EMSGSIZE;
+ goto out;
+ }
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
break;
default:
- memcpy(msg->message, &error_reply,
- sizeof(error_reply));
+ memcpy(msg->msg, &error_reply, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
- } else
- memcpy(msg->message, reply->message, sizeof(error_reply));
+ } else {
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
+ }
out:
- complete(&(resp_type->work));
+ complete(&resp_type->work);
}
-/**
+/*
* This function is called from the AP bus code after a crypto request
* "msg" has finished with the reply message "reply".
* It is called from tasklet context.
@@ -979,36 +914,43 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
.type = TYPE82_RSP_CODE,
.reply_code = REP82_ERROR_MACHINE_FAILURE,
};
- struct response_type *resp_type =
- (struct response_type *)msg->private;
+ struct ap_response_type *resp_type = &msg->response;
struct type86_ep11_reply *t86r;
- int length;
+ int len;
/* Copy the reply message to the request message buffer. */
if (!reply)
goto out; /* ap_msg->rc indicates the error */
- t86r = reply->message;
+ t86r = reply->msg;
if (t86r->hdr.type == TYPE86_RSP_CODE &&
t86r->cprbx.cprb_ver_id == 0x04) {
switch (resp_type->type) {
case CEXXC_RESPONSE_TYPE_EP11:
- length = t86r->fmt2.offset1 + t86r->fmt2.count1;
- length = min(MSGTYPE06_MAX_MSG_SIZE, length);
- memcpy(msg->message, reply->message, length);
+ len = t86r->fmt2.offset1 + t86r->fmt2.count1;
+ if (len > reply->bufsize || len > msg->bufsize ||
+ len != reply->len) {
+ pr_debug("len mismatch => EMSGSIZE\n");
+ msg->rc = -EMSGSIZE;
+ goto out;
+ }
+ memcpy(msg->msg, reply->msg, len);
+ msg->len = len;
break;
default:
- memcpy(msg->message, &error_reply, sizeof(error_reply));
+ memcpy(msg->msg, &error_reply, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
} else {
- memcpy(msg->message, reply->message, sizeof(error_reply));
+ memcpy(msg->msg, reply->msg, sizeof(error_reply));
+ msg->len = sizeof(error_reply);
}
out:
- complete(&(resp_type->work));
+ complete(&resp_type->work);
}
static atomic_t zcrypt_step = ATOMIC_INIT(0);
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1016,43 +958,46 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0);
* @mex: pointer to the modexpo request buffer
*/
static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo *mex)
+ struct ica_rsa_modexpo *mex,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_ICA,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
int rc;
- ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
+ ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = ICAMEX_msg_to_type6MEX_msgX(zq, &ap_msg, mex);
+ ap_msg->bufsize = PAGE_SIZE;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex);
+ if (rc)
+ goto out_free;
+ resp_type->type = CEXXC_RESPONSE_TYPE_ICA;
+ init_completion(&resp_type->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zq->queue, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ica(zq, &ap_msg,
+ rc = convert_response_ica(zq, ap_msg,
mex->outputdata,
mex->outputdatalength);
- } else
+ } else {
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
+ ap_cancel_message(zq->queue, ap_msg);
+ }
+
out_free:
- free_page((unsigned long) ap_msg.message);
+ free_page((unsigned long)ap_msg->msg);
+ ap_msg->msg = NULL;
return rc;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a modexpo_crt request.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1060,145 +1005,179 @@ out_free:
* @crt: pointer to the modexpoc_crt request buffer
*/
static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq,
- struct ica_rsa_modexpo_crt *crt)
+ struct ica_rsa_modexpo_crt *crt,
+ struct ap_message *ap_msg)
{
- struct ap_message ap_msg;
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_ICA,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
int rc;
- ap_init_message(&ap_msg);
- ap_msg.message = (void *) get_zeroed_page(GFP_KERNEL);
- if (!ap_msg.message)
+ ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!ap_msg->msg)
return -ENOMEM;
- ap_msg.receive = zcrypt_msgtype6_receive;
- ap_msg.psmid = (((unsigned long long) current->pid) << 32) +
- atomic_inc_return(&zcrypt_step);
- ap_msg.private = &resp_type;
- rc = ICACRT_msg_to_type6CRT_msgX(zq, &ap_msg, crt);
+ ap_msg->bufsize = PAGE_SIZE;
+ ap_msg->receive = zcrypt_msgtype6_receive;
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
+ atomic_inc_return(&zcrypt_step);
+ rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt);
+ if (rc)
+ goto out_free;
+ resp_type->type = CEXXC_RESPONSE_TYPE_ICA;
+ init_completion(&resp_type->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
if (rc)
goto out_free;
- init_completion(&resp_type.work);
- ap_queue_message(zq->queue, &ap_msg);
- rc = wait_for_completion_interruptible(&resp_type.work);
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
- rc = ap_msg.rc;
+ rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ica(zq, &ap_msg,
+ rc = convert_response_ica(zq, ap_msg,
crt->outputdata,
crt->outputdatalength);
} else {
/* Signal pending. */
- ap_cancel_message(zq->queue, &ap_msg);
+ ap_cancel_message(zq->queue, ap_msg);
}
+
out_free:
- free_page((unsigned long) ap_msg.message);
+ free_page((unsigned long)ap_msg->msg);
+ ap_msg->msg = NULL;
return rc;
}
-/**
- * Fetch function code from cprb.
- * Extracting the fc requires to copy the cprb from userspace.
- * So this function allocates memory and needs an ap_msg prepared
- * by the caller with ap_init_message(). Also the caller has to
- * make sure ap_release_message() is always called even on failure.
+/*
+ * Prepare a CCA AP msg request.
+ * Prepare a CCA AP msg: fetch the required data from userspace,
+ * prepare the AP msg, fill some info into the ap_message struct,
+ * extract some data from the CPRB and give back to the caller.
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
-unsigned int get_cprb_fc(struct ica_xcRB *xcRB,
- struct ap_message *ap_msg,
- unsigned int *func_code, unsigned short **dom)
+int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *func_code, unsigned short **dom)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_XCRB,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
- ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg->message)
- return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private)
- return -ENOMEM;
- return XCRB_msg_to_type6CPRB_msgX(ap_msg, xcRB, func_code, dom);
+ resp_type->type = CEXXC_RESPONSE_TYPE_XCRB;
+ return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom);
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to handle a send_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEXxC device to the request distributor
- * @xcRB: pointer to the send_cprb request buffer
+ * @xcrb: pointer to the send_cprb request buffer
*/
-static long zcrypt_msgtype6_send_cprb(struct zcrypt_queue *zq,
- struct ica_xcRB *xcRB,
- struct ap_message *ap_msg)
+static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
+ struct ica_xcRB *xcrb,
+ struct ap_message *ap_msg)
{
- int rc;
- struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct ap_response_type *resp_type = &ap_msg->response;
+ struct {
+ struct type6_hdr hdr;
+ struct CPRBX cprbx;
+ /* ... more data blocks ... */
+ } __packed * msg = ap_msg->msg;
+ unsigned int max_payload_size;
+ int rc, delta;
+
+ /* calculate maximum payload for this card and msg type */
+ max_payload_size = zq->reply.bufsize - sizeof(struct type86_fmt2_msg);
+
+ /* limit each of the two from fields to the maximum payload size */
+ msg->hdr.fromcardlen1 = min(msg->hdr.fromcardlen1, max_payload_size);
+ msg->hdr.fromcardlen2 = min(msg->hdr.fromcardlen2, max_payload_size);
+
+ /* calculate delta if the sum of both exceeds max payload size */
+ delta = msg->hdr.fromcardlen1 + msg->hdr.fromcardlen2
+ - max_payload_size;
+ if (delta > 0) {
+ /*
+ * Sum exceeds maximum payload size, prune fromcardlen1
+ * (always trust fromcardlen2)
+ */
+ if (delta > msg->hdr.fromcardlen1) {
+ rc = -EINVAL;
+ goto out;
+ }
+ msg->hdr.fromcardlen1 -= delta;
+ }
- init_completion(&rtype->work);
- ap_queue_message(zq->queue, ap_msg);
- rc = wait_for_completion_interruptible(&rtype->work);
+ init_completion(&resp_type->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_xcrb(zq, ap_msg, xcRB);
- } else
+ rc = convert_response_xcrb(userspace, zq, ap_msg, xcrb);
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
+
+ if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
+ rc = -EIO; /* do not retry administrative requests */
+out:
+ if (rc)
+ pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
-/**
- * Fetch function code from ep11 cprb.
- * Extracting the fc requires to copy the ep11 cprb from userspace.
- * So this function allocates memory and needs an ap_msg prepared
- * by the caller with ap_init_message(). Also the caller has to
- * make sure ap_release_message() is always called even on failure.
+/*
+ * Prepare an EP11 AP msg request.
+ * Prepare an EP11 AP msg: fetch the required data from userspace,
+ * prepare the AP msg, fill some info into the ap_message struct,
+ * extract some data from the CPRB and give back to the caller.
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_msg->bufsize is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
*/
-unsigned int get_ep11cprb_fc(struct ep11_urb *xcrb,
- struct ap_message *ap_msg,
- unsigned int *func_code)
+int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *func_code, unsigned int *domain)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_EP11,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
- ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg->message)
- return -ENOMEM;
ap_msg->receive = zcrypt_msgtype6_receive_ep11;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private)
- return -ENOMEM;
- return xcrb_msg_to_type6_ep11cprb_msgx(ap_msg, xcrb, func_code);
+ resp_type->type = CEXXC_RESPONSE_TYPE_EP11;
+ return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb,
+ func_code, domain);
}
-/**
+/*
* The request distributor calls this function if it picked the CEX4P
* device to handle a send_ep11_cprb request.
* @zq: pointer to zcrypt_queue structure that identifies the
* CEX4P device to the request distributor
- * @xcRB: pointer to the ep11 user request block
+ * @xcrb: pointer to the ep11 user request block
*/
-static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
+static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *zq,
struct ep11_urb *xcrb,
struct ap_message *ap_msg)
{
int rc;
unsigned int lfmt;
- struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ struct ap_response_type *resp_type = &ap_msg->response;
struct {
struct type6_hdr hdr;
struct ep11_cprb cprbx;
unsigned char pld_tag; /* fixed value 0x30 */
unsigned char pld_lenfmt; /* payload length format */
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
struct pld_hdr {
unsigned char func_tag; /* fixed value 0x4 */
unsigned char func_len; /* fixed value 0x4 */
@@ -1208,8 +1187,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
unsigned int dom_val; /* domain id */
} __packed * payload_hdr = NULL;
-
- /**
+ /*
* The target domain field within the cprb body/payload block will be
* replaced by the usage domain for non-management commands only.
* Therefore we check the first bit of the 'flags' parameter for
@@ -1235,49 +1213,70 @@ static long zcrypt_msgtype6_send_ep11_cprb(struct zcrypt_queue *zq,
} else {
lfmt = 1; /* length format #1 */
}
- payload_hdr = (struct pld_hdr *)((&(msg->pld_lenfmt))+lfmt);
+ payload_hdr = (struct pld_hdr *)((&msg->pld_lenfmt) + lfmt);
payload_hdr->dom_val = (unsigned int)
AP_QID_QUEUE(zq->queue->qid);
}
- init_completion(&rtype->work);
- ap_queue_message(zq->queue, ap_msg);
- rc = wait_for_completion_interruptible(&rtype->work);
+ /*
+ * Set the queue's reply buffer length minus the two prepend headers
+ * as reply limit for the card firmware.
+ */
+ msg->hdr.fromcardlen1 = zq->reply.bufsize -
+ sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext);
+
+ init_completion(&resp_type->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
- rc = convert_response_ep11_xcrb(zq, ap_msg, xcrb);
- } else
+ rc = convert_response_ep11_xcrb(userspace, zq, ap_msg, xcrb);
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
+ }
+
+ if (rc == -EAGAIN && ap_msg->flags & AP_MSG_FLAG_ADMIN)
+ rc = -EIO; /* do not retry administrative requests */
+out:
+ if (rc)
+ pr_debug("send cprb at dev=%02x.%04x rc=%d\n",
+ AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
-unsigned int get_rng_fc(struct ap_message *ap_msg, int *func_code,
- unsigned int *domain)
+/*
+ * Prepare a CEXXC get random request ap message.
+ * This function assumes that ap_msg has been initialized with
+ * ap_init_apmsg() and thus a valid buffer with the size of
+ * ap_max_msg_size is available within ap_msg. Also the caller has
+ * to make sure ap_release_apmsg() is always called even on failure.
+ */
+int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code,
+ unsigned int *domain)
{
- struct response_type resp_type = {
- .type = CEXXC_RESPONSE_TYPE_XCRB,
- };
+ struct ap_response_type *resp_type = &ap_msg->response;
- ap_msg->message = kmalloc(MSGTYPE06_MAX_MSG_SIZE, GFP_KERNEL);
- if (!ap_msg->message)
- return -ENOMEM;
+ if (ap_msg->bufsize < AP_DEFAULT_MAX_MSG_SIZE)
+ return -EMSGSIZE;
ap_msg->receive = zcrypt_msgtype6_receive;
- ap_msg->psmid = (((unsigned long long) current->pid) << 32) +
+ ap_msg->psmid = (((unsigned long)current->pid) << 32) +
atomic_inc_return(&zcrypt_step);
- ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL);
- if (!ap_msg->private)
- return -ENOMEM;
- rng_type6CPRB_msgX(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
+ resp_type->type = CEXXC_RESPONSE_TYPE_XCRB;
+
+ rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain);
*func_code = HWRNG;
return 0;
}
-/**
+/*
* The request distributor calls this function if it picked the CEXxC
* device to generate random data.
* @zq: pointer to zcrypt_queue structure that identifies the
@@ -1295,37 +1294,32 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq,
char rule[8];
short int verb_length;
short int key_length;
- } __packed * msg = ap_msg->message;
- struct response_type *rtype = (struct response_type *)(ap_msg->private);
+ } __packed * msg = ap_msg->msg;
+ struct ap_response_type *resp_type = &ap_msg->response;
int rc;
msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid);
- init_completion(&rtype->work);
- ap_queue_message(zq->queue, ap_msg);
- rc = wait_for_completion_interruptible(&rtype->work);
+ init_completion(&resp_type->work);
+ rc = ap_queue_message(zq->queue, ap_msg);
+ if (rc)
+ goto out;
+ rc = wait_for_completion_interruptible(&resp_type->work);
if (rc == 0) {
rc = ap_msg->rc;
if (rc == 0)
rc = convert_response_rng(zq, ap_msg, buffer);
- } else
+ } else {
/* Signal pending. */
ap_cancel_message(zq->queue, ap_msg);
-
+ }
+out:
return rc;
}
-/**
+/*
* The crypto operations for a CEXxC card.
*/
-static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
- .owner = THIS_MODULE,
- .name = MSGTYPE06_NAME,
- .variant = MSGTYPE06_VARIANT_NORNG,
- .rsa_modexpo = zcrypt_msgtype6_modexpo,
- .rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
- .send_cprb = zcrypt_msgtype6_send_cprb,
-};
static struct zcrypt_ops zcrypt_msgtype6_ops = {
.owner = THIS_MODULE,
@@ -1348,14 +1342,12 @@ static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
void __init zcrypt_msgtype6_init(void)
{
- zcrypt_msgtype_register(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ops);
zcrypt_msgtype_register(&zcrypt_msgtype6_ep11_ops);
}
void __exit zcrypt_msgtype6_exit(void)
{
- zcrypt_msgtype_unregister(&zcrypt_msgtype6_norng_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ops);
zcrypt_msgtype_unregister(&zcrypt_msgtype6_ep11_ops);
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.h b/drivers/s390/crypto/zcrypt_msgtype6.h
index 41a0df5f070f..6f5ced8d6cda 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.h
+++ b/drivers/s390/crypto/zcrypt_msgtype6.h
@@ -19,8 +19,6 @@
#define MSGTYPE06_VARIANT_NORNG 1
#define MSGTYPE06_VARIANT_EP11 2
-#define MSGTYPE06_MAX_MSG_SIZE (12*1024)
-
/**
* The type 6 message family is associated with CEXxC/CEXxP cards.
*
@@ -47,14 +45,14 @@ struct type6_hdr {
unsigned char reserved5[2]; /* 0x0000 */
unsigned char function_code[2]; /* for PKD, 0x5044 (ascii 'PD') */
unsigned char reserved6[2]; /* 0x0000 */
- unsigned int ToCardLen1; /* (request CPRB len + 3) & -4 */
- unsigned int ToCardLen2; /* db len 0x00000000 for PKD */
- unsigned int ToCardLen3; /* 0x00000000 */
- unsigned int ToCardLen4; /* 0x00000000 */
- unsigned int FromCardLen1; /* response buffer length */
- unsigned int FromCardLen2; /* db len 0x00000000 for PKD */
- unsigned int FromCardLen3; /* 0x00000000 */
- unsigned int FromCardLen4; /* 0x00000000 */
+ unsigned int tocardlen1; /* (request CPRB len + 3) & -4 */
+ unsigned int tocardlen2; /* db len 0x00000000 for PKD */
+ unsigned int tocardlen3; /* 0x00000000 */
+ unsigned int tocardlen4; /* 0x00000000 */
+ unsigned int fromcardlen1; /* response buffer length */
+ unsigned int fromcardlen2; /* db len 0x00000000 for PKD */
+ unsigned int fromcardlen3; /* 0x00000000 */
+ unsigned int fromcardlen4; /* 0x00000000 */
} __packed;
/**
@@ -96,11 +94,14 @@ struct type86_fmt2_ext {
unsigned int offset4; /* 0x00000000 */
} __packed;
-unsigned int get_cprb_fc(struct ica_xcRB *, struct ap_message *,
- unsigned int *, unsigned short **);
-unsigned int get_ep11cprb_fc(struct ep11_urb *, struct ap_message *,
- unsigned int *);
-unsigned int get_rng_fc(struct ap_message *, int *, unsigned int *);
+int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *fc, unsigned short **dom);
+int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb,
+ struct ap_message *ap_msg,
+ unsigned int *fc, unsigned int *dom);
+int prep_rng_ap_msg(struct ap_message *ap_msg,
+ int *fc, unsigned int *dom);
#define LOW 10
#define MEDIUM 100
@@ -115,7 +116,7 @@ int speed_idx_ep11(int);
* @ap_dev: AP device pointer
* @ap_msg: pointer to AP message
*/
-static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
+static inline void rng_type6cprb_msgx(struct ap_message *ap_msg,
unsigned int random_number_length,
unsigned int *domain)
{
@@ -127,14 +128,14 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
char rule[8];
short int verb_length;
short int key_length;
- } __packed * msg = ap_msg->message;
+ } __packed * msg = ap_msg->msg;
static struct type6_hdr static_type6_hdrX = {
.type = 0x06,
.offset1 = 0x00000058,
.agent_id = {'C', 'A'},
.function_code = {'R', 'L'},
- .ToCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
- .FromCardLen1 = sizeof(*msg) - sizeof(msg->hdr),
+ .tocardlen1 = sizeof(*msg) - sizeof(msg->hdr),
+ .fromcardlen1 = sizeof(*msg) - sizeof(msg->hdr),
};
static struct CPRBX local_cprbx = {
.cprb_len = 0x00dc,
@@ -146,15 +147,15 @@ static inline void rng_type6CPRB_msgX(struct ap_message *ap_msg,
};
msg->hdr = static_type6_hdrX;
- msg->hdr.FromCardLen2 = random_number_length,
+ msg->hdr.fromcardlen2 = random_number_length;
msg->cprbx = local_cprbx;
- msg->cprbx.rpl_datal = random_number_length,
+ msg->cprbx.rpl_datal = random_number_length;
memcpy(msg->function_code, msg->hdr.function_code, 0x02);
msg->rule_length = 0x0a;
memcpy(msg->rule, "RANDOM ", 8);
msg->verb_length = 0x02;
msg->key_length = 0x02;
- ap_msg->length = sizeof(*msg);
+ ap_msg->len = sizeof(*msg);
*domain = (unsigned short)msg->cprbx.domain;
}
diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c
index 522c4bc69a08..a173d32eb6e8 100644
--- a/drivers/s390/crypto/zcrypt_queue.c
+++ b/drivers/s390/crypto/zcrypt_queue.c
@@ -11,6 +11,7 @@
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -18,7 +19,6 @@
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/atomic.h>
#include <linux/uaccess.h>
@@ -40,30 +40,37 @@ static ssize_t online_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
+ struct ap_queue *aq = to_ap_queue(dev);
+ int online = aq->config && !aq->chkstop && zq->online ? 1 : 0;
- return snprintf(buf, PAGE_SIZE, "%d\n", zq->online);
+ return sysfs_emit(buf, "%d\n", online);
}
static ssize_t online_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
+ struct ap_queue *aq = to_ap_queue(dev);
struct zcrypt_card *zc = zq->zcard;
int online;
if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
return -EINVAL;
+ if (online && (!aq->config || !aq->card->config ||
+ aq->chkstop || aq->card->chkstop))
+ return -ENODEV;
if (online && !zc->online)
return -EINVAL;
zq->online = online;
- ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x online=%d\n",
- AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid),
- online);
+ ZCRYPT_DBF_INFO("%s queue=%02x.%04x online=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), online);
+
+ ap_send_online_uevent(&aq->ap_dev, online);
if (!online)
ap_flush_queue(zq->queue);
@@ -76,9 +83,9 @@ static ssize_t load_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ struct zcrypt_queue *zq = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&zq->load));
+ return sysfs_emit(buf, "%d\n", atomic_read(&zq->load));
}
static DEVICE_ATTR_RO(load);
@@ -93,24 +100,28 @@ static const struct attribute_group zcrypt_queue_attr_group = {
.attrs = zcrypt_queue_attrs,
};
-void zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
+bool zcrypt_queue_force_online(struct zcrypt_queue *zq, int online)
{
- zq->online = online;
- if (!online)
- ap_flush_queue(zq->queue);
+ if (!!zq->online != !!online) {
+ zq->online = online;
+ if (!online)
+ ap_flush_queue(zq->queue);
+ return true;
+ }
+ return false;
}
-struct zcrypt_queue *zcrypt_queue_alloc(size_t max_response_size)
+struct zcrypt_queue *zcrypt_queue_alloc(size_t reply_buf_size)
{
struct zcrypt_queue *zq;
- zq = kzalloc(sizeof(struct zcrypt_queue), GFP_KERNEL);
+ zq = kzalloc(sizeof(*zq), GFP_KERNEL);
if (!zq)
return NULL;
- zq->reply.message = kmalloc(max_response_size, GFP_KERNEL);
- if (!zq->reply.message)
+ zq->reply.msg = kmalloc(reply_buf_size, GFP_KERNEL);
+ if (!zq->reply.msg)
goto out_free;
- zq->reply.length = max_response_size;
+ zq->reply.bufsize = reply_buf_size;
INIT_LIST_HEAD(&zq->list);
kref_init(&zq->refcount);
return zq;
@@ -123,7 +134,7 @@ EXPORT_SYMBOL(zcrypt_queue_alloc);
void zcrypt_queue_free(struct zcrypt_queue *zq)
{
- kfree(zq->reply.message);
+ kfree(zq->reply.msg);
kfree(zq);
}
EXPORT_SYMBOL(zcrypt_queue_free);
@@ -159,23 +170,22 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
int rc;
spin_lock(&zcrypt_list_lock);
- zc = zq->queue->card->private;
+ zc = dev_get_drvdata(&zq->queue->card->ap_dev.device);
zcrypt_card_get(zc);
zq->zcard = zc;
zq->online = 1; /* New devices are online by default. */
- ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x register online=1\n",
- AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF_INFO("%s queue=%02x.%04x register online=1\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
list_add_tail(&zq->list, &zc->zqueues);
- zcrypt_device_count++;
spin_unlock(&zcrypt_list_lock);
rc = sysfs_create_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
if (rc)
goto out;
- get_device(&zq->queue->ap_dev.device);
if (zq->ops->rng) {
rc = zcrypt_rng_device_add();
@@ -187,7 +197,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq)
out_unregister:
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
- put_device(&zq->queue->ap_dev.device);
out:
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
@@ -207,20 +216,19 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq)
{
struct zcrypt_card *zc;
- ZCRYPT_DBF(DBF_INFO, "queue=%02x.%04x unregister\n",
- AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid));
+ ZCRYPT_DBF_INFO("%s queue=%02x.%04x unregister\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid));
zc = zq->zcard;
spin_lock(&zcrypt_list_lock);
list_del_init(&zq->list);
- zcrypt_device_count--;
spin_unlock(&zcrypt_list_lock);
- zcrypt_card_put(zc);
if (zq->ops->rng)
zcrypt_rng_device_remove();
sysfs_remove_group(&zq->queue->ap_dev.device.kobj,
&zcrypt_queue_attr_group);
- put_device(&zq->queue->ap_dev.device);
+ zcrypt_card_put(zc);
zcrypt_queue_put(zq);
}
EXPORT_SYMBOL(zcrypt_queue_unregister);