diff options
Diffstat (limited to 'drivers/s390/crypto/ap_queue.c')
| -rw-r--r-- | drivers/s390/crypto/ap_queue.c | 452 |
1 files changed, 314 insertions, 138 deletions
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 30df83735adf..4a32c1e19a1e 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -1,36 +1,51 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright IBM Corp. 2016 + * Copyright IBM Corp. 2016, 2023 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * * Adjunct processor bus, queue related code. */ -#define KMSG_COMPONENT "ap" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "ap: " fmt +#include <linux/export.h> #include <linux/init.h> #include <linux/slab.h> #include <asm/facility.h> +#define CREATE_TRACE_POINTS +#include <asm/trace/ap.h> + #include "ap_bus.h" #include "ap_debug.h" +EXPORT_TRACEPOINT_SYMBOL(s390_ap_nqap); +EXPORT_TRACEPOINT_SYMBOL(s390_ap_dqap); + static void __ap_flush_queue(struct ap_queue *aq); /* * some AP queue helper functions */ +static inline bool ap_q_supported_in_se(struct ap_queue *aq) +{ + return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel; +} + static inline bool ap_q_supports_bind(struct ap_queue *aq) { - return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) || - ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL); + return aq->card->hwinfo.ep11 || aq->card->hwinfo.accel; } static inline bool ap_q_supports_assoc(struct ap_queue *aq) { - return ap_test_bit(&aq->card->functions, AP_FUNC_EP11); + return aq->card->hwinfo.ep11; +} + +static inline bool ap_q_needs_bind(struct ap_queue *aq) +{ + return ap_q_supports_bind(aq) && ap_sb_available(); } /** @@ -88,55 +103,18 @@ static inline struct ap_queue_status __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen, int special) { + struct ap_queue_status status; + if (special) qid |= 0x400000UL; - return ap_nqap(qid, psmid, msg, msglen); -} -int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen) -{ - struct ap_queue_status status; - - status = __ap_send(qid, psmid, msg, msglen, 0); - if (status.async) - return -EPERM; - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - return 0; - case AP_RESPONSE_Q_FULL: - case AP_RESPONSE_RESET_IN_PROGRESS: - return -EBUSY; - case AP_RESPONSE_REQ_FAC_NOT_INST: - return -EINVAL; - default: /* Device is gone. */ - return -ENODEV; - } -} -EXPORT_SYMBOL(ap_send); + status = ap_nqap(qid, psmid, msg, msglen); -int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen) -{ - struct ap_queue_status status; + trace_s390_ap_nqap(AP_QID_CARD(qid), AP_QID_QUEUE(qid), + status.value, psmid); - if (!msg) - return -EINVAL; - status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL); - if (status.async) - return -EPERM; - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - return 0; - case AP_RESPONSE_NO_PENDING_REPLY: - if (status.queue_empty) - return -ENOENT; - return -EBUSY; - case AP_RESPONSE_RESET_IN_PROGRESS: - return -EBUSY; - default: - return -ENODEV; - } + return status; } -EXPORT_SYMBOL(ap_recv); /* State machine definitions and helpers */ @@ -175,8 +153,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) parts++; } while (status.response_code == 0xFF && resgr0 != 0); + trace_s390_ap_dqap(AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), + status.value, aq->reply->psmid); + switch (status.response_code) { case AP_RESPONSE_NORMAL: + print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1, + aq->reply->msg, aq->reply->len, false); aq->queue_count = max_t(int, 0, aq->queue_count - 1); if (!status.queue_empty && !aq->queue_count) aq->queue_count++; @@ -210,6 +193,9 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq) aq->queue_count = 0; list_splice_init(&aq->pendingq, &aq->requestq); aq->requestq_count += aq->pendingq_count; + pr_debug("queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n", + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid), + aq->pendingq_count, aq->requestq_count); aq->pendingq_count = 0; break; default: @@ -240,13 +226,13 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq) return AP_SM_WAIT_AGAIN; } aq->sm_state = AP_SM_STATE_IDLE; - return AP_SM_WAIT_NONE; + break; case AP_RESPONSE_NO_PENDING_REPLY: if (aq->queue_count > 0) - return aq->interrupt ? + return status.irq_enabled ? AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT; aq->sm_state = AP_SM_STATE_IDLE; - return AP_SM_WAIT_NONE; + break; default: aq->dev_state = AP_DEV_STATE_ERROR; aq->last_err_rc = status.response_code; @@ -255,6 +241,16 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq) AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } + /* Check and maybe enable irq support (again) on this queue */ + if (!status.irq_enabled && status.queue_empty) { + void *lsi_ptr = ap_airq_ptr(); + + if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) { + aq->sm_state = AP_SM_STATE_SETIRQ_WAIT; + return AP_SM_WAIT_AGAIN; + } + } + return AP_SM_WAIT_NONE; } /** @@ -274,6 +270,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) /* Start the next request on the queue. */ ap_msg = list_entry(aq->requestq.next, struct ap_message, list); + print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1, + ap_msg->msg, ap_msg->len, false); status = __ap_send(qid, ap_msg->psmid, ap_msg->msg, ap_msg->len, ap_msg->flags & AP_MSG_FLAG_SPECIAL); @@ -287,14 +285,14 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq) list_move_tail(&ap_msg->list, &aq->pendingq); aq->requestq_count--; aq->pendingq_count++; - if (aq->queue_count < aq->card->queue_depth) { + if (aq->queue_count < aq->card->hwinfo.qd) { aq->sm_state = AP_SM_STATE_WORKING; return AP_SM_WAIT_AGAIN; } fallthrough; case AP_RESPONSE_Q_FULL: aq->sm_state = AP_SM_STATE_QUEUE_FULL; - return aq->interrupt ? + return status.irq_enabled ? AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT; case AP_RESPONSE_RESET_IN_PROGRESS: aq->sm_state = AP_SM_STATE_RESET_WAIT; @@ -347,7 +345,6 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq) case AP_RESPONSE_NORMAL: case AP_RESPONSE_RESET_IN_PROGRESS: aq->sm_state = AP_SM_STATE_RESET_WAIT; - aq->interrupt = false; aq->rapq_fbit = 0; return AP_SM_WAIT_LOW_TIMEOUT; default: @@ -369,17 +366,15 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq) static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq) { struct ap_queue_status status; + struct ap_tapq_hwinfo hwinfo; void *lsi_ptr; - if (aq->queue_count > 0 && aq->reply) - /* Try to read a completed message and get the status */ - status = ap_sm_recv(aq); - else - /* Get the status with TAPQ */ - status = ap_tapq(aq->qid, NULL); + /* Get the status with TAPQ */ + status = ap_test_queue(aq->qid, 1, &hwinfo); switch (status.response_code) { case AP_RESPONSE_NORMAL: + aq->se_bstate = hwinfo.bs; lsi_ptr = ap_airq_ptr(); if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) aq->sm_state = AP_SM_STATE_SETIRQ_WAIT; @@ -422,7 +417,6 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq) if (status.irq_enabled == 1) { /* Irqs are now enabled */ - aq->interrupt = true; aq->sm_state = (aq->queue_count > 0) ? AP_SM_STATE_WORKING : AP_SM_STATE_IDLE; } @@ -452,9 +446,9 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq) static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) { struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); /* handle asynchronous error on this queue */ if (status.async && status.response_code) { aq->dev_state = AP_DEV_STATE_ERROR; @@ -473,14 +467,17 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) return AP_SM_WAIT_NONE; } + /* update queue's SE bind state */ + aq->se_bstate = hwinfo.bs; + /* check bs bits */ - switch (info.bs) { + switch (hwinfo.bs) { case AP_BS_Q_USABLE: /* association is through */ aq->sm_state = AP_SM_STATE_IDLE; - AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n", - __func__, AP_QID_CARD(aq->qid), - AP_QID_QUEUE(aq->qid), aq->assoc_idx); + pr_debug("queue 0x%02x.%04x associated with %u\n", + AP_QID_CARD(aq->qid), + AP_QID_QUEUE(aq->qid), aq->assoc_idx); return AP_SM_WAIT_NONE; case AP_BS_Q_USABLE_NO_SECURE_KEY: /* association still pending */ @@ -491,7 +488,7 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq) aq->dev_state = AP_DEV_STATE_ERROR; aq->last_err_rc = status.response_code; AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n", - __func__, info.bs, + __func__, hwinfo.bs, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return AP_SM_WAIT_NONE; } @@ -665,16 +662,21 @@ static ssize_t interrupt_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ap_queue *aq = to_ap_queue(dev); + struct ap_queue_status status; int rc = 0; spin_lock_bh(&aq->lock); - if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) + if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) { rc = sysfs_emit(buf, "Enable Interrupt pending.\n"); - else if (aq->interrupt) - rc = sysfs_emit(buf, "Interrupts enabled.\n"); - else - rc = sysfs_emit(buf, "Interrupts disabled.\n"); + } else { + status = ap_tapq(aq->qid, NULL); + if (status.irq_enabled) + rc = sysfs_emit(buf, "Interrupts enabled.\n"); + else + rc = sysfs_emit(buf, "Interrupts disabled.\n"); + } spin_unlock_bh(&aq->lock); + return rc; } @@ -713,22 +715,74 @@ static ssize_t ap_functions_show(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } - return sysfs_emit(buf, "0x%08X\n", info.fac); + return sysfs_emit(buf, "0x%08X\n", hwinfo.fac); } static DEVICE_ATTR_RO(ap_functions); -#ifdef CONFIG_ZCRYPT_DEBUG +static ssize_t driver_override_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ap_queue *aq = to_ap_queue(dev); + struct ap_device *ap_dev = &aq->ap_dev; + int rc; + + device_lock(dev); + if (ap_dev->driver_override) + rc = sysfs_emit(buf, "%s\n", ap_dev->driver_override); + else + rc = sysfs_emit(buf, "\n"); + device_unlock(dev); + + return rc; +} + +static ssize_t driver_override_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ap_queue *aq = to_ap_queue(dev); + struct ap_device *ap_dev = &aq->ap_dev; + int rc = -EINVAL; + bool old_value; + + if (mutex_lock_interruptible(&ap_attr_mutex)) + return -ERESTARTSYS; + + /* Do not allow driver override if apmask/aqmask is in use */ + if (ap_apmask_aqmask_in_use) + goto out; + + old_value = ap_dev->driver_override ? true : false; + rc = driver_set_override(dev, &ap_dev->driver_override, buf, count); + if (rc) + goto out; + if (old_value && !ap_dev->driver_override) + --ap_driver_override_ctr; + else if (!old_value && ap_dev->driver_override) + ++ap_driver_override_ctr; + + rc = count; + +out: + mutex_unlock(&ap_attr_mutex); + return rc; +} + +static DEVICE_ATTR_RW(driver_override); + +#ifdef CONFIG_AP_DEBUG static ssize_t states_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -840,7 +894,8 @@ static struct attribute *ap_queue_dev_attrs[] = { &dev_attr_config.attr, &dev_attr_chkstop.attr, &dev_attr_ap_functions.attr, -#ifdef CONFIG_ZCRYPT_DEBUG + &dev_attr_driver_override.attr, +#ifdef CONFIG_AP_DEBUG &dev_attr_states.attr, &dev_attr_last_err_rc.attr, #endif @@ -866,19 +921,25 @@ static ssize_t se_bind_show(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; if (!ap_q_supports_bind(aq)) return sysfs_emit(buf, "-\n"); - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } - switch (info.bs) { + + /* update queue's SE bind state */ + spin_lock_bh(&aq->lock); + aq->se_bstate = hwinfo.bs; + spin_unlock_bh(&aq->lock); + + switch (hwinfo.bs) { case AP_BS_Q_USABLE: case AP_BS_Q_USABLE_NO_SECURE_KEY: return sysfs_emit(buf, "bound\n"); @@ -893,6 +954,7 @@ static ssize_t se_bind_store(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; + struct ap_tapq_hwinfo hwinfo; bool value; int rc; @@ -904,34 +966,80 @@ static ssize_t se_bind_store(struct device *dev, if (rc) return rc; - if (value) { - /* bind, do BAPQ */ - spin_lock_bh(&aq->lock); - if (aq->sm_state < AP_SM_STATE_IDLE) { - spin_unlock_bh(&aq->lock); - return -EBUSY; - } - status = ap_bapq(aq->qid); - spin_unlock_bh(&aq->lock); - if (status.response_code) { - AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), - AP_QID_QUEUE(aq->qid)); - return -EIO; - } - } else { - /* unbind, set F bit arg and trigger RAPQ */ + if (!value) { + /* Unbind. Set F bit arg and trigger RAPQ */ spin_lock_bh(&aq->lock); __ap_flush_queue(aq); aq->rapq_fbit = 1; - aq->assoc_idx = ASSOC_IDX_INVALID; - aq->sm_state = AP_SM_STATE_RESET_START; - ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); - spin_unlock_bh(&aq->lock); + _ap_queue_init_state(aq); + rc = count; + goto out; } - return count; + /* Bind. Check current SE bind state */ + status = ap_test_queue(aq->qid, 1, &hwinfo); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + return -EIO; + } + + /* Update BS state */ + spin_lock_bh(&aq->lock); + aq->se_bstate = hwinfo.bs; + if (hwinfo.bs != AP_BS_Q_AVAIL_FOR_BINDING) { + AP_DBF_WARN("%s bind attempt with bs %d on queue 0x%02x.%04x\n", + __func__, hwinfo.bs, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EINVAL; + goto out; + } + + /* Check SM state */ + if (aq->sm_state < AP_SM_STATE_IDLE) { + rc = -EBUSY; + goto out; + } + + /* invoke BAPQ */ + status = ap_bapq(aq->qid); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EIO; + goto out; + } + aq->assoc_idx = ASSOC_IDX_INVALID; + + /* verify SE bind state */ + status = ap_test_queue(aq->qid, 1, &hwinfo); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EIO; + goto out; + } + aq->se_bstate = hwinfo.bs; + if (!(hwinfo.bs == AP_BS_Q_USABLE || + hwinfo.bs == AP_BS_Q_USABLE_NO_SECURE_KEY)) { + AP_DBF_WARN("%s BAPQ success, but bs shows %d on queue 0x%02x.%04x\n", + __func__, hwinfo.bs, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EIO; + goto out; + } + + /* SE bind was successful */ + AP_DBF_INFO("%s bapq(0x%02x.%04x) success\n", __func__, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = count; + +out: + spin_unlock_bh(&aq->lock); + return rc; } static DEVICE_ATTR_RW(se_bind); @@ -941,20 +1049,25 @@ static ssize_t se_associate_show(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; - struct ap_tapq_gr2 info; + struct ap_tapq_hwinfo hwinfo; if (!ap_q_supports_assoc(aq)) return sysfs_emit(buf, "-\n"); - status = ap_test_queue(aq->qid, 1, &info); + status = ap_test_queue(aq->qid, 1, &hwinfo); if (status.response_code > AP_RESPONSE_BUSY) { - AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n", - __func__, status.response_code, - AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + pr_debug("RC 0x%02x on tapq(0x%02x.%04x)\n", + status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); return -EIO; } - switch (info.bs) { + /* update queue's SE bind state */ + spin_lock_bh(&aq->lock); + aq->se_bstate = hwinfo.bs; + spin_unlock_bh(&aq->lock); + + switch (hwinfo.bs) { case AP_BS_Q_USABLE: if (aq->assoc_idx == ASSOC_IDX_INVALID) { AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__); @@ -976,6 +1089,7 @@ static ssize_t se_associate_store(struct device *dev, { struct ap_queue *aq = to_ap_queue(dev); struct ap_queue_status status; + struct ap_tapq_hwinfo hwinfo; unsigned int value; int rc; @@ -989,18 +1103,28 @@ static ssize_t se_associate_store(struct device *dev, if (value >= ASSOC_IDX_INVALID) return -EINVAL; + /* check current SE bind state */ + status = ap_test_queue(aq->qid, 1, &hwinfo); + if (status.response_code) { + AP_DBF_WARN("%s RC 0x%02x on tapq(0x%02x.%04x)\n", + __func__, status.response_code, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + return -EIO; + } spin_lock_bh(&aq->lock); - - /* sm should be in idle state */ - if (aq->sm_state != AP_SM_STATE_IDLE) { - spin_unlock_bh(&aq->lock); - return -EBUSY; + aq->se_bstate = hwinfo.bs; + if (hwinfo.bs != AP_BS_Q_USABLE_NO_SECURE_KEY) { + AP_DBF_WARN("%s association attempt with bs %d on queue 0x%02x.%04x\n", + __func__, hwinfo.bs, + AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); + rc = -EINVAL; + goto out; } - /* already associated or association pending ? */ - if (aq->assoc_idx != ASSOC_IDX_INVALID) { - spin_unlock_bh(&aq->lock); - return -EINVAL; + /* check SM state */ + if (aq->sm_state != AP_SM_STATE_IDLE) { + rc = -EBUSY; + goto out; } /* trigger the asynchronous association request */ @@ -1011,17 +1135,20 @@ static ssize_t se_associate_store(struct device *dev, aq->sm_state = AP_SM_STATE_ASSOC_WAIT; aq->assoc_idx = value; ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); - spin_unlock_bh(&aq->lock); break; default: - spin_unlock_bh(&aq->lock); AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n", __func__, status.response_code, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid)); - return -EIO; + rc = -EIO; + goto out; } - return count; + rc = count; + +out: + spin_unlock_bh(&aq->lock); + return rc; } static DEVICE_ATTR_RW(se_associate); @@ -1052,21 +1179,21 @@ static void ap_queue_device_release(struct device *dev) kfree(aq); } -struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) +struct ap_queue *ap_queue_create(ap_qid_t qid, struct ap_card *ac) { struct ap_queue *aq; aq = kzalloc(sizeof(*aq), GFP_KERNEL); if (!aq) return NULL; + aq->card = ac; aq->ap_dev.device.release = ap_queue_device_release; aq->ap_dev.device.type = &ap_queue_type; - aq->ap_dev.device_type = device_type; - // add optional SE secure binding attributes group - if (ap_sb_available() && is_prot_virt_guest()) + aq->ap_dev.device_type = ac->ap_dev.device_type; + /* in SE environment add bind/associate attributes group */ + if (ap_is_se_guest() && ap_q_supported_in_se(aq)) aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups; aq->qid = qid; - aq->interrupt = false; spin_lock_init(&aq->lock); INIT_LIST_HEAD(&aq->pendingq); INIT_LIST_HEAD(&aq->requestq); @@ -1119,6 +1246,50 @@ int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg) EXPORT_SYMBOL(ap_queue_message); /** + * ap_queue_usable(): Check if queue is usable just now. + * @aq: The AP queue device to test for usability. + * This function is intended for the scheduler to query if it makes + * sense to enqueue a message into this AP queue device by calling + * ap_queue_message(). The perspective is very short-term as the + * state machine and device state(s) may change at any time. + */ +bool ap_queue_usable(struct ap_queue *aq) +{ + bool rc = true; + + spin_lock_bh(&aq->lock); + + /* check for not configured or checkstopped */ + if (!aq->config || aq->chkstop) { + rc = false; + goto unlock_and_out; + } + + /* device state needs to be ok */ + if (aq->dev_state != AP_DEV_STATE_OPERATING) { + rc = false; + goto unlock_and_out; + } + + /* SE guest's queues additionally need to be bound */ + if (ap_is_se_guest()) { + if (!ap_q_supported_in_se(aq)) { + rc = false; + goto unlock_and_out; + } + if (ap_q_needs_bind(aq) && + !(aq->se_bstate == AP_BS_Q_USABLE || + aq->se_bstate == AP_BS_Q_USABLE_NO_SECURE_KEY)) + rc = false; + } + +unlock_and_out: + spin_unlock_bh(&aq->lock); + return rc; +} +EXPORT_SYMBOL(ap_queue_usable); + +/** * ap_cancel_message(): Cancel a crypto request. * @aq: The AP device that has the message queued * @ap_msg: The message that is to be removed @@ -1188,7 +1359,7 @@ void ap_queue_prepare_remove(struct ap_queue *aq) /* move queue device state to SHUTDOWN in progress */ aq->dev_state = AP_DEV_STATE_SHUTDOWN; spin_unlock_bh(&aq->lock); - del_timer_sync(&aq->timeout); + timer_delete_sync(&aq->timeout); } void ap_queue_remove(struct ap_queue *aq) @@ -1205,14 +1376,19 @@ void ap_queue_remove(struct ap_queue *aq) spin_unlock_bh(&aq->lock); } -void ap_queue_init_state(struct ap_queue *aq) +void _ap_queue_init_state(struct ap_queue *aq) { - spin_lock_bh(&aq->lock); aq->dev_state = AP_DEV_STATE_OPERATING; aq->sm_state = AP_SM_STATE_RESET_START; aq->last_err_rc = 0; aq->assoc_idx = ASSOC_IDX_INVALID; ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL)); +} + +void ap_queue_init_state(struct ap_queue *aq) +{ + spin_lock_bh(&aq->lock); + _ap_queue_init_state(aq); spin_unlock_bh(&aq->lock); } EXPORT_SYMBOL(ap_queue_init_state); |
