summaryrefslogtreecommitdiff
path: root/drivers/s390/crypto/ap_bus.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/crypto/ap_bus.c')
-rw-r--r--drivers/s390/crypto/ap_bus.c921
1 files changed, 635 insertions, 286 deletions
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 339812efe822..a445494fd2be 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -11,11 +11,11 @@
* Adjunct processor bus.
*/
-#define KMSG_COMPONENT "ap"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "ap: " fmt
#include <linux/kernel_stat.h>
#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -26,6 +26,7 @@
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
+#include <asm/machine.h>
#include <asm/airq.h>
#include <asm/tpi.h>
#include <linux/atomic.h>
@@ -38,13 +39,17 @@
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/module.h>
+#include <asm/uv.h>
+#include <asm/chsc.h>
+#include <linux/mempool.h>
#include "ap_bus.h"
#include "ap_debug.h"
-/*
- * Module parameters; note though this file itself isn't modular.
- */
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("Adjunct Processor Bus driver");
+MODULE_LICENSE("GPL");
+
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
module_param_named(domain, ap_domain_index, int, 0440);
@@ -80,20 +85,27 @@ DEFINE_SPINLOCK(ap_queues_lock);
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
EXPORT_SYMBOL(ap_perms);
-DEFINE_MUTEX(ap_perms_mutex);
-EXPORT_SYMBOL(ap_perms_mutex);
-
-/* # of bus scans since init */
-static atomic64_t ap_scan_bus_count;
+/* true if apmask and/or aqmask are NOT default */
+bool ap_apmask_aqmask_in_use;
+/* counter for how many driver_overrides are currently active */
+int ap_driver_override_ctr;
+/*
+ * Mutex for consistent read and write of the ap_perms struct,
+ * ap_apmask_aqmask_in_use, ap_driver_override_ctr
+ * and the ap bus sysfs attributes apmask and aqmask.
+ */
+DEFINE_MUTEX(ap_attr_mutex);
+EXPORT_SYMBOL(ap_attr_mutex);
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
-/* completion for initial APQN bindings complete */
-static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
+/* completion for APQN bindings complete */
+static DECLARE_COMPLETION(ap_apqn_bindings_complete);
-static struct ap_config_info *ap_qci_info;
-static struct ap_config_info *ap_qci_info_old;
+static struct ap_config_info qci[2];
+static struct ap_config_info *const ap_qci_info = &qci[0];
+static struct ap_config_info *const ap_qci_info_old = &qci[1];
/*
* AP bus related debug feature things.
@@ -101,12 +113,38 @@ static struct ap_config_info *ap_qci_info_old;
debug_info_t *ap_dbf_info;
/*
- * Workqueue timer for bus rescan.
+ * There is a need for a do-not-allocate-memory path through the AP bus
+ * layer. The pkey layer may be triggered via the in-kernel interface from
+ * a protected key crypto algorithm (namely PAES) to convert a secure key
+ * into a protected key. This happens in a workqueue context, so sleeping
+ * is allowed but memory allocations causing IO operations are not permitted.
+ * To accomplish this, an AP message memory pool with pre-allocated space
+ * is established. When ap_init_apmsg() with use_mempool set to true is
+ * called, instead of kmalloc() the ap message buffer is allocated from
+ * the ap_msg_pool. This pool only holds a limited amount of buffers:
+ * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and
+ * exactly one of these items (if available) is returned if ap_init_apmsg()
+ * with the use_mempool arg set to true is called. When this pool is exhausted
+ * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without
+ * any attempt to allocate memory and the caller has to deal with that.
+ */
+static mempool_t *ap_msg_pool;
+static unsigned int ap_msg_pool_min_items = 8;
+module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440);
+MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items");
+
+/*
+ * AP bus rescan related things.
*/
-static struct timer_list ap_config_timer;
-static int ap_config_time = AP_CONFIG_TIME;
-static void ap_scan_bus(struct work_struct *);
-static DECLARE_WORK(ap_scan_work, ap_scan_bus);
+static bool ap_scan_bus(void);
+static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
+static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */
+static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
+static int ap_scan_bus_time = AP_CONFIG_TIME;
+static struct timer_list ap_scan_bus_timer;
+static void ap_scan_bus_wq_callback(struct work_struct *);
+static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback);
/*
* Tasklet & timer for AP request polling and interrupts
@@ -135,7 +173,7 @@ static int ap_max_domain_id = 15;
/* Maximum adapter id, if not given via qci */
static int ap_max_adapter_id = 63;
-static struct bus_type ap_bus_type;
+static const struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
static void ap_interrupt_handler(struct airq_struct *airq,
@@ -201,9 +239,7 @@ static int ap_apft_available(void)
*/
static inline int ap_qact_available(void)
{
- if (ap_qci_info)
- return ap_qci_info->qact;
- return 0;
+ return ap_qci_info->qact;
}
/*
@@ -213,9 +249,7 @@ static inline int ap_qact_available(void)
*/
int ap_sb_available(void)
{
- if (ap_qci_info)
- return ap_qci_info->apsb;
- return 0;
+ return ap_qci_info->apsb;
}
/*
@@ -227,23 +261,6 @@ bool ap_is_se_guest(void)
}
EXPORT_SYMBOL(ap_is_se_guest);
-/*
- * ap_fetch_qci_info(): Fetch cryptographic config info
- *
- * Returns the ap configuration info fetched via PQAP(QCI).
- * On success 0 is returned, on failure a negative errno
- * is returned, e.g. if the PQAP(QCI) instruction is not
- * available, the return value will be -EOPNOTSUPP.
- */
-static inline int ap_fetch_qci_info(struct ap_config_info *info)
-{
- if (!ap_qci_available())
- return -EOPNOTSUPP;
- if (!info)
- return -EINVAL;
- return ap_qci(info);
-}
-
/**
* ap_init_qci_info(): Allocate and query qci config info.
* Does also update the static variables ap_max_domain_id
@@ -251,27 +268,12 @@ static inline int ap_fetch_qci_info(struct ap_config_info *info)
*/
static void __init ap_init_qci_info(void)
{
- if (!ap_qci_available()) {
+ if (!ap_qci_available() ||
+ ap_qci(ap_qci_info)) {
AP_DBF_INFO("%s QCI not supported\n", __func__);
return;
}
-
- ap_qci_info = kzalloc(sizeof(*ap_qci_info), GFP_KERNEL);
- if (!ap_qci_info)
- return;
- ap_qci_info_old = kzalloc(sizeof(*ap_qci_info_old), GFP_KERNEL);
- if (!ap_qci_info_old) {
- kfree(ap_qci_info);
- ap_qci_info = NULL;
- return;
- }
- if (ap_fetch_qci_info(ap_qci_info) != 0) {
- kfree(ap_qci_info);
- kfree(ap_qci_info_old);
- ap_qci_info = NULL;
- ap_qci_info_old = NULL;
- return;
- }
+ memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
AP_DBF_INFO("%s successful fetched initial qci info\n", __func__);
if (ap_qci_info->apxa) {
@@ -286,8 +288,6 @@ static void __init ap_init_qci_info(void)
__func__, ap_max_domain_id);
}
}
-
- memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
}
/*
@@ -310,7 +310,7 @@ static inline int ap_test_config_card_id(unsigned int id)
{
if (id > ap_max_adapter_id)
return 0;
- if (ap_qci_info)
+ if (ap_qci_info->flags)
return ap_test_config(ap_qci_info->apm, id);
return 1;
}
@@ -327,7 +327,7 @@ int ap_test_config_usage_domain(unsigned int domain)
{
if (domain > ap_max_domain_id)
return 0;
- if (ap_qci_info)
+ if (ap_qci_info->flags)
return ap_test_config(ap_qci_info->aqm, domain);
return 1;
}
@@ -352,18 +352,17 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
/*
* ap_queue_info(): Check and get AP queue info.
* Returns: 1 if APQN exists and info is filled,
- * 0 if APQN seems to exit but there is no info
+ * 0 if APQN seems to exist but there is no info
* available (eg. caused by an asynch pending error)
* -1 invalid APQN, TAPQ error or AP queue status which
* indicates there is no APQN.
*/
-static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
- int *q_depth, int *q_ml, bool *q_decfg, bool *q_cstop)
+static int ap_queue_info(ap_qid_t qid, struct ap_tapq_hwinfo *hwinfo,
+ bool *decfg, bool *cstop)
{
struct ap_queue_status status;
- struct ap_tapq_gr2 tapq_info;
- tapq_info.value = 0;
+ hwinfo->value = 0;
/* make sure we don't run into a specifiation exception */
if (AP_QID_CARD(qid) > ap_max_adapter_id ||
@@ -371,11 +370,7 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
return -1;
/* call TAPQ on this APQN */
- status = ap_test_queue(qid, ap_apft_available(), &tapq_info);
-
- /* handle pending async error with return 'no info available' */
- if (status.async)
- return 0;
+ status = ap_test_queue(qid, ap_apft_available(), hwinfo);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
@@ -383,26 +378,23 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
case AP_RESPONSE_DECONFIGURED:
case AP_RESPONSE_CHECKSTOPPED:
case AP_RESPONSE_BUSY:
- /*
- * According to the architecture in all these cases the
- * info should be filled. All bits 0 is not possible as
- * there is at least one of the mode bits set.
- */
- if (WARN_ON_ONCE(!tapq_info.value))
- return 0;
- *q_type = tapq_info.at;
- *q_fac = tapq_info.fac;
- *q_depth = tapq_info.qd;
- *q_ml = tapq_info.ml;
- *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
- *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
- return 1;
+ /* For all these RCs the tapq info should be available */
+ break;
default:
- /*
- * A response code which indicates, there is no info available.
- */
- return -1;
+ /* On a pending async error the info should be available */
+ if (!status.async)
+ return -1;
+ break;
}
+
+ /* There should be at least one of the mode bits set */
+ if (WARN_ON_ONCE(!hwinfo->value))
+ return 0;
+
+ *decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+ *cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
+
+ return 1;
}
void ap_wait(enum ap_sm_wait wait)
@@ -445,7 +437,7 @@ void ap_wait(enum ap_sm_wait wait)
*/
void ap_request_timeout(struct timer_list *t)
{
- struct ap_queue *aq = from_timer(aq, t, timeout);
+ struct ap_queue *aq = timer_container_of(aq, t, timeout);
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
@@ -493,7 +485,7 @@ static void ap_tasklet_fn(unsigned long dummy)
* important that no requests on any AP get lost.
*/
if (ap_irq_flag)
- xchg(ap_airq.lsi_ptr, 0);
+ WRITE_ONCE(*ap_airq.lsi_ptr, 0);
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode) {
@@ -586,6 +578,48 @@ static void ap_poll_thread_stop(void)
#define is_card_dev(x) ((x)->parent == ap_root_device)
#define is_queue_dev(x) ((x)->parent != ap_root_device)
+/*
+ * ap_init_apmsg() - Initialize ap_message.
+ */
+int ap_init_apmsg(struct ap_message *ap_msg, u32 flags)
+{
+ unsigned int maxmsgsize;
+
+ memset(ap_msg, 0, sizeof(*ap_msg));
+ ap_msg->flags = flags;
+
+ if (flags & AP_MSG_FLAG_MEMPOOL) {
+ ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE;
+ return 0;
+ }
+
+ maxmsgsize = atomic_read(&ap_max_msg_size);
+ ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = maxmsgsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(ap_init_apmsg);
+
+/*
+ * ap_release_apmsg() - Release ap_message.
+ */
+void ap_release_apmsg(struct ap_message *ap_msg)
+{
+ if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) {
+ memzero_explicit(ap_msg->msg, ap_msg->bufsize);
+ mempool_free(ap_msg->msg, ap_msg_pool);
+ } else {
+ kfree_sensitive(ap_msg->msg);
+ }
+}
+EXPORT_SYMBOL(ap_release_apmsg);
+
/**
* ap_bus_match()
* @dev: Pointer to device
@@ -593,9 +627,9 @@ static void ap_poll_thread_stop(void)
*
* AP bus driver registration/unregistration.
*/
-static int ap_bus_match(struct device *dev, struct device_driver *drv)
+static int ap_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct ap_driver *ap_drv = to_ap_drv(drv);
+ const struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
/*
@@ -645,11 +679,11 @@ static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
return rc;
/* Add MODE=<accel|cca|ep11> */
- if (ap_test_bit(&ac->functions, AP_FUNC_ACCEL))
+ if (ac->hwinfo.accel)
rc = add_uevent_var(env, "MODE=accel");
- else if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
+ else if (ac->hwinfo.cca)
rc = add_uevent_var(env, "MODE=cca");
- else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ else if (ac->hwinfo.ep11)
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
@@ -657,11 +691,11 @@ static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
struct ap_queue *aq = to_ap_queue(&ap_dev->device);
/* Add MODE=<accel|cca|ep11> */
- if (ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL))
+ if (aq->card->hwinfo.accel)
rc = add_uevent_var(env, "MODE=accel");
- else if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
+ else if (aq->card->hwinfo.cca)
rc = add_uevent_var(env, "MODE=cca");
- else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ else if (aq->card->hwinfo.ep11)
rc = add_uevent_var(env, "MODE=ep11");
if (rc)
return rc;
@@ -761,7 +795,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
}
/*
- * After initial ap bus scan do check if all existing APQNs are
+ * After ap bus scan do check if all existing APQNs are
* bound to device drivers.
*/
static void ap_check_bindings_complete(void)
@@ -771,11 +805,11 @@ static void ap_check_bindings_complete(void)
if (atomic64_read(&ap_scan_bus_count) >= 1) {
ap_calc_bound_apqns(&apqns, &bound);
if (bound == apqns) {
- if (!completion_done(&ap_init_apqn_bindings_complete)) {
- complete_all(&ap_init_apqn_bindings_complete);
- AP_DBF_INFO("%s complete\n", __func__);
+ if (!completion_done(&ap_apqn_bindings_complete)) {
+ complete_all(&ap_apqn_bindings_complete);
+ ap_send_bindings_complete_uevent();
+ pr_debug("all apqn bindings complete\n");
}
- ap_send_bindings_complete_uevent();
}
}
}
@@ -790,27 +824,29 @@ static void ap_check_bindings_complete(void)
* -ETIME is returned. On failures negative return values are
* returned to the caller.
*/
-int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
+int ap_wait_apqn_bindings_complete(unsigned long timeout)
{
+ int rc = 0;
long l;
- if (completion_done(&ap_init_apqn_bindings_complete))
+ if (completion_done(&ap_apqn_bindings_complete))
return 0;
if (timeout)
l = wait_for_completion_interruptible_timeout(
- &ap_init_apqn_bindings_complete, timeout);
+ &ap_apqn_bindings_complete, timeout);
else
l = wait_for_completion_interruptible(
- &ap_init_apqn_bindings_complete);
+ &ap_apqn_bindings_complete);
if (l < 0)
- return l == -ERESTARTSYS ? -EINTR : l;
+ rc = l == -ERESTARTSYS ? -EINTR : l;
else if (l == 0 && timeout)
- return -ETIME;
+ rc = -ETIME;
- return 0;
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
-EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
+EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
@@ -825,21 +861,38 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
int rc, card, queue, devres, drvres;
if (is_queue_dev(dev)) {
- card = AP_QID_CARD(to_ap_queue(dev)->qid);
- queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
- mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm) &&
- test_bit_inv(queue, ap_perms.aqm);
- mutex_unlock(&ap_perms_mutex);
- drvres = to_ap_drv(dev->driver)->flags
- & AP_DRIVER_FLAG_DEFAULT;
- if (!!devres != !!drvres) {
- AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
- __func__, card, queue);
- rc = device_reprobe(dev);
- if (rc)
- AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
- __func__, card, queue);
+ struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_device *ap_dev = &aq->ap_dev;
+
+ card = AP_QID_CARD(aq->qid);
+ queue = AP_QID_QUEUE(aq->qid);
+
+ if (ap_dev->driver_override) {
+ if (strcmp(ap_dev->driver_override,
+ ap_drv->driver.name)) {
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
+ rc = device_reprobe(dev);
+ if (rc) {
+ AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
+ __func__, card, queue);
+ }
+ }
+ } else {
+ mutex_lock(&ap_attr_mutex);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_attr_mutex);
+ drvres = to_ap_drv(dev->driver)->flags
+ & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres) {
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
+ rc = device_reprobe(dev);
+ if (rc) {
+ AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
+ __func__, card, queue);
+ }
+ }
}
}
@@ -857,22 +910,37 @@ static void ap_bus_revise_bindings(void)
* @card: the APID of the adapter card to check
* @queue: the APQI of the queue to check
*
- * Note: the ap_perms_mutex must be locked by the caller of this function.
+ * Note: the ap_attr_mutex must be locked by the caller of this function.
*
* Return: an int specifying whether the AP adapter is reserved for the host (1)
* or not (0).
*/
int ap_owned_by_def_drv(int card, int queue)
{
+ struct ap_queue *aq;
int rc = 0;
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
+ aq = ap_get_qdev(AP_MKQID(card, queue));
+ if (aq) {
+ const struct device_driver *drv = aq->ap_dev.device.driver;
+ const struct ap_driver *ap_drv = to_ap_drv(drv);
+ bool override = !!aq->ap_dev.driver_override;
+
+ if (override && drv && ap_drv->flags & AP_DRIVER_FLAG_DEFAULT)
+ rc = 1;
+ put_device(&aq->ap_dev.device);
+ if (override)
+ goto out;
+ }
+
if (test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
+out:
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
@@ -884,7 +952,7 @@ EXPORT_SYMBOL(ap_owned_by_def_drv);
* @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
* @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
*
- * Note: the ap_perms_mutex must be locked by the caller of this function.
+ * Note: the ap_attr_mutex must be locked by the caller of this function.
*
* Return: an int specifying whether each APQN is reserved for the host (1) or
* not (0)
@@ -895,12 +963,10 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
int card, queue, rc = 0;
for (card = 0; !rc && card < AP_DEVICES; card++)
- if (test_bit_inv(card, apm) &&
- test_bit_inv(card, ap_perms.apm))
+ if (test_bit_inv(card, apm))
for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
- if (test_bit_inv(queue, aqm) &&
- test_bit_inv(queue, ap_perms.aqm))
- rc = 1;
+ if (test_bit_inv(queue, aqm))
+ rc = ap_owned_by_def_drv(card, queue);
return rc;
}
@@ -924,15 +990,27 @@ static int ap_device_probe(struct device *dev)
*/
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
- mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm) &&
- test_bit_inv(queue, ap_perms.aqm);
- mutex_unlock(&ap_perms_mutex);
- drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
- if (!!devres != !!drvres)
- goto out;
+ if (ap_dev->driver_override) {
+ if (strcmp(ap_dev->driver_override,
+ ap_drv->driver.name))
+ goto out;
+ } else {
+ mutex_lock(&ap_attr_mutex);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_attr_mutex);
+ drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres)
+ goto out;
+ }
}
+ /*
+ * Rearm the bindings complete completion to trigger
+ * bindings complete when all devices are bound again
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+
/* Add queue/card to list of active queues/cards */
spin_lock_bh(&ap_queues_lock);
if (is_queue_dev(dev))
@@ -947,13 +1025,20 @@ static int ap_device_probe(struct device *dev)
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
- } else {
- ap_check_bindings_complete();
}
out:
- if (rc)
+ if (rc) {
put_device(dev);
+ } else {
+ if (is_queue_dev(dev)) {
+ pr_debug("queue=%02x.%04x new driver=%s\n",
+ card, queue, ap_drv->driver.name);
+ } else {
+ pr_debug("card=%02x new driver=%s\n",
+ to_ap_card(dev)->id, ap_drv->driver.name);
+ }
+ }
return rc;
}
@@ -1006,11 +1091,16 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
struct device_driver *drv = &ap_drv->driver;
+ int rc;
drv->bus = &ap_bus_type;
drv->owner = owner;
drv->name = name;
- return driver_register(drv);
+ rc = driver_register(drv);
+
+ ap_check_bindings_complete();
+
+ return rc;
}
EXPORT_SYMBOL(ap_driver_register);
@@ -1020,34 +1110,85 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
}
EXPORT_SYMBOL(ap_driver_unregister);
-void ap_bus_force_rescan(void)
+/*
+ * Enforce a synchronous AP bus rescan.
+ * Returns true if the bus scan finds a change in the AP configuration
+ * and AP devices have been added or deleted when this function returns.
+ */
+bool ap_bus_force_rescan(void)
{
- /* processing a asynchronous bus rescan */
- del_timer(&ap_config_timer);
- queue_work(system_long_wq, &ap_scan_work);
- flush_work(&ap_scan_work);
+ unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
+ bool rc = false;
+
+ pr_debug("> scan counter=%lu\n", scan_counter);
+
+ /* Only trigger AP bus scans after the initial scan is done */
+ if (scan_counter <= 0)
+ goto out;
+
+ /*
+ * There is one unlikely but nevertheless valid scenario where the
+ * thread holding the mutex may try to send some crypto load but
+ * all cards are offline so a rescan is triggered which causes
+ * a recursive call of ap_bus_force_rescan(). A simple return if
+ * the mutex is already locked by this thread solves this.
+ */
+ if (mutex_is_locked(&ap_scan_bus_mutex)) {
+ if (ap_scan_bus_task == current)
+ goto out;
+ }
+
+ /* Try to acquire the AP scan bus mutex */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ /* mutex acquired, run the AP bus scan */
+ ap_scan_bus_task = current;
+ ap_scan_bus_result = ap_scan_bus();
+ rc = ap_scan_bus_result;
+ ap_scan_bus_task = NULL;
+ mutex_unlock(&ap_scan_bus_mutex);
+ goto out;
+ }
+
+ /*
+ * Mutex acquire failed. So there is currently another task
+ * already running the AP bus scan. Then let's simple wait
+ * for the lock which means the other task has finished and
+ * stored the result in ap_scan_bus_result.
+ */
+ if (mutex_lock_interruptible(&ap_scan_bus_mutex)) {
+ /* some error occurred, ignore and go out */
+ goto out;
+ }
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
+
+out:
+ pr_debug("rc=%d\n", rc);
+ return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
/*
* A config change has happened, force an ap bus rescan.
*/
-void ap_bus_cfg_chg(void)
+static int ap_bus_cfg_chg(struct notifier_block *nb,
+ unsigned long action, void *data)
{
- AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
+ if (action != CHSC_NOTIFY_AP_CFG)
+ return NOTIFY_DONE;
+
+ pr_debug("config change, forcing bus rescan\n");
ap_bus_force_rescan();
+
+ return NOTIFY_OK;
}
-/*
- * hex2bitmap() - parse hex mask string and set bitmap.
- * Valid strings are "0x012345678" with at least one valid hex number.
- * Rest of the bitmap to the right is padded with 0. No spaces allowed
- * within the string, the leading 0x may be omitted.
- * Returns the bitmask with exactly the bits set as given by the hex
- * string (both in big endian order).
- */
-static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
+static struct notifier_block ap_bus_nb = {
+ .notifier_call = ap_bus_cfg_chg,
+};
+
+int ap_hex2bitmap(const char *str, unsigned long *bitmap, int bits)
{
int i, n, b;
@@ -1074,6 +1215,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
return -EINVAL;
return 0;
}
+EXPORT_SYMBOL(ap_hex2bitmap);
/*
* modify_bitmap() - parse bitmask argument and modify an existing
@@ -1094,7 +1236,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
*/
static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
{
- int a, i, z;
+ unsigned long a, i, z;
char *np, sign;
/* bits needs to be a multiple of 8 */
@@ -1139,7 +1281,7 @@ static int ap_parse_bitmap_str(const char *str, unsigned long *bitmap, int bits,
rc = modify_bitmap(str, newmap, bits);
} else {
memset(newmap, 0, size);
- rc = hex2bitmap(str, newmap, bits);
+ rc = ap_hex2bitmap(str, newmap, bits);
}
return rc;
}
@@ -1205,7 +1347,7 @@ static BUS_ATTR_RW(ap_domain);
static ssize_t ap_control_domain_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
@@ -1219,7 +1361,7 @@ static BUS_ATTR_RO(ap_control_domain_mask);
static ssize_t ap_usage_domain_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
@@ -1233,7 +1375,7 @@ static BUS_ATTR_RO(ap_usage_domain_mask);
static ssize_t ap_adapter_mask_show(const struct bus_type *bus, char *buf)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "not supported\n");
return sysfs_emit(buf, "0x%08x%08x%08x%08x%08x%08x%08x%08x\n",
@@ -1254,7 +1396,7 @@ static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(const struct bus_type *bus, char *buf)
{
- return sysfs_emit(buf, "%d\n", ap_config_time);
+ return sysfs_emit(buf, "%d\n", ap_scan_bus_time);
}
static ssize_t config_time_store(const struct bus_type *bus,
@@ -1264,8 +1406,8 @@ static ssize_t config_time_store(const struct bus_type *bus,
if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
return -EINVAL;
- ap_config_time = time;
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_scan_bus_time = time;
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
return count;
}
@@ -1349,12 +1491,12 @@ static ssize_t apmask_show(const struct bus_type *bus, char *buf)
{
int rc;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
ap_perms.apm[0], ap_perms.apm[1],
ap_perms.apm[2], ap_perms.apm[3]);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
@@ -1364,6 +1506,7 @@ static int __verify_card_reservations(struct device_driver *drv, void *data)
int rc = 0;
struct ap_driver *ap_drv = to_ap_drv(drv);
unsigned long *newapm = (unsigned long *)data;
+ unsigned long aqm_any[BITS_TO_LONGS(AP_DOMAINS)];
/*
* increase the driver's module refcounter to be sure it is not
@@ -1373,7 +1516,8 @@ static int __verify_card_reservations(struct device_driver *drv, void *data)
return 0;
if (ap_drv->in_use) {
- rc = ap_drv->in_use(newapm, ap_perms.aqm);
+ bitmap_fill(aqm_any, AP_DOMAINS);
+ rc = ap_drv->in_use(newapm, aqm_any);
if (rc)
rc = -EBUSY;
}
@@ -1402,18 +1546,31 @@ static int apmask_commit(unsigned long *newapm)
memcpy(ap_perms.apm, newapm, APMASKSIZE);
+ /*
+ * Update ap_apmask_aqmask_in_use. Note that the
+ * ap_attr_mutex has to be obtained here.
+ */
+ ap_apmask_aqmask_in_use =
+ bitmap_full(ap_perms.apm, AP_DEVICES) &&
+ bitmap_full(ap_perms.aqm, AP_DOMAINS) ?
+ false : true;
+
return 0;
}
static ssize_t apmask_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- int rc, changes = 0;
DECLARE_BITMAP(newapm, AP_DEVICES);
+ int rc = -EINVAL, changes = 0;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
+ /* Do not allow apmask/aqmask if driver override is active */
+ if (ap_driver_override_ctr)
+ goto done;
+
rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
if (rc)
goto done;
@@ -1423,7 +1580,7 @@ static ssize_t apmask_store(const struct bus_type *bus, const char *buf,
rc = apmask_commit(newapm);
done:
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
if (rc)
return rc;
@@ -1441,12 +1598,12 @@ static ssize_t aqmask_show(const struct bus_type *bus, char *buf)
{
int rc;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
ap_perms.aqm[0], ap_perms.aqm[1],
ap_perms.aqm[2], ap_perms.aqm[3]);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
@@ -1456,6 +1613,7 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data)
int rc = 0;
struct ap_driver *ap_drv = to_ap_drv(drv);
unsigned long *newaqm = (unsigned long *)data;
+ unsigned long apm_any[BITS_TO_LONGS(AP_DEVICES)];
/*
* increase the driver's module refcounter to be sure it is not
@@ -1465,7 +1623,8 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data)
return 0;
if (ap_drv->in_use) {
- rc = ap_drv->in_use(ap_perms.apm, newaqm);
+ bitmap_fill(apm_any, AP_DEVICES);
+ rc = ap_drv->in_use(apm_any, newaqm);
if (rc)
rc = -EBUSY;
}
@@ -1494,18 +1653,31 @@ static int aqmask_commit(unsigned long *newaqm)
memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);
+ /*
+ * Update ap_apmask_aqmask_in_use. Note that the
+ * ap_attr_mutex has to be obtained here.
+ */
+ ap_apmask_aqmask_in_use =
+ bitmap_full(ap_perms.apm, AP_DEVICES) &&
+ bitmap_full(ap_perms.aqm, AP_DOMAINS) ?
+ false : true;
+
return 0;
}
static ssize_t aqmask_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- int rc, changes = 0;
DECLARE_BITMAP(newaqm, AP_DOMAINS);
+ int rc = -EINVAL, changes = 0;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
+ /* Do not allow apmask/aqmask if driver override is active */
+ if (ap_driver_override_ctr)
+ goto done;
+
rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
if (rc)
goto done;
@@ -1515,7 +1687,7 @@ static ssize_t aqmask_store(const struct bus_type *bus, const char *buf,
rc = aqmask_commit(newaqm);
done:
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
if (rc)
return rc;
@@ -1562,11 +1734,20 @@ static ssize_t bindings_show(const struct bus_type *bus, char *buf)
static BUS_ATTR_RO(bindings);
+static ssize_t bindings_complete_count_show(const struct bus_type *bus,
+ char *buf)
+{
+ return sysfs_emit(buf, "%llu\n",
+ atomic64_read(&ap_bindings_complete_count));
+}
+
+static BUS_ATTR_RO(bindings_complete_count);
+
static ssize_t features_show(const struct bus_type *bus, char *buf)
{
int n = 0;
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return sysfs_emit(buf, "-\n");
if (ap_qci_info->apsc)
@@ -1602,12 +1783,13 @@ static struct attribute *ap_bus_attrs[] = {
&bus_attr_aqmask.attr,
&bus_attr_scans.attr,
&bus_attr_bindings.attr,
+ &bus_attr_bindings_complete_count.attr,
&bus_attr_features.attr,
NULL,
};
ATTRIBUTE_GROUPS(ap_bus);
-static struct bus_type ap_bus_type = {
+static const struct bus_type ap_bus_type = {
.name = "ap",
.bus_groups = ap_bus_groups,
.match = &ap_bus_match,
@@ -1798,12 +1980,12 @@ static inline void ap_scan_rm_card_dev_and_queue_devs(struct ap_card *ac)
*/
static inline void ap_scan_domains(struct ap_card *ac)
{
- int rc, dom, depth, type, ml;
+ struct ap_tapq_hwinfo hwinfo;
bool decfg, chkstop;
struct ap_queue *aq;
struct device *dev;
- unsigned int func;
ap_qid_t qid;
+ int rc, dom;
/*
* Go through the configuration for the domains and compare them
@@ -1826,8 +2008,7 @@ static inline void ap_scan_domains(struct ap_card *ac)
goto put_dev_and_continue;
}
/* domain is valid, get info from this APQN */
- rc = ap_queue_info(qid, &type, &func, &depth,
- &ml, &decfg, &chkstop);
+ rc = ap_queue_info(qid, &hwinfo, &decfg, &chkstop);
switch (rc) {
case -1:
if (dev) {
@@ -1843,15 +2024,15 @@ static inline void ap_scan_domains(struct ap_card *ac)
}
/* if no queue device exists, create a new one */
if (!aq) {
- aq = ap_queue_create(qid, ac->ap_dev.device_type);
+ aq = ap_queue_create(qid, ac);
if (!aq) {
AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
__func__, ac->id, dom);
continue;
}
- aq->card = ac;
aq->config = !decfg;
aq->chkstop = chkstop;
+ aq->se_bstate = hwinfo.bs;
dev = &aq->ap_dev.device;
dev->bus = &ap_bus_type;
dev->parent = &ac->ap_dev.device;
@@ -1865,19 +2046,24 @@ static inline void ap_scan_domains(struct ap_card *ac)
}
/* get it and thus adjust reference counter */
get_device(dev);
- if (decfg)
+ if (decfg) {
AP_DBF_INFO("%s(%d,%d) new (decfg) queue dev created\n",
__func__, ac->id, dom);
- else if (chkstop)
+ } else if (chkstop) {
AP_DBF_INFO("%s(%d,%d) new (chkstop) queue dev created\n",
__func__, ac->id, dom);
- else
+ } else {
+ /* nudge the queue's state machine */
+ ap_queue_init_state(aq);
AP_DBF_INFO("%s(%d,%d) new queue dev created\n",
__func__, ac->id, dom);
+ }
goto put_dev_and_continue;
}
/* handle state changes on already existing queue device */
spin_lock_bh(&aq->lock);
+ /* SE bind state */
+ aq->se_bstate = hwinfo.bs;
/* checkstop state */
if (chkstop && !aq->chkstop) {
/* checkstop on */
@@ -1887,21 +2073,19 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop on\n",
+ ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
} else if (!chkstop && aq->chkstop) {
/* checkstop off */
aq->chkstop = false;
- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
- aq->dev_state = AP_DEV_STATE_OPERATING;
- aq->sm_state = AP_SM_STATE_RESET_START;
- }
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ _ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop off\n",
+ ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@@ -1913,8 +2097,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config off\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@@ -1922,13 +2106,11 @@ static inline void ap_scan_domains(struct ap_card *ac)
} else if (!decfg && !aq->config) {
/* config on this queue device */
aq->config = true;
- if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
- aq->dev_state = AP_DEV_STATE_OPERATING;
- aq->sm_state = AP_SM_STATE_RESET_START;
- }
+ if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
+ _ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config on\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@@ -1955,11 +2137,11 @@ put_dev_and_continue:
*/
static inline void ap_scan_adapter(int ap)
{
- int rc, dom, depth, type, comp_type, ml;
+ struct ap_tapq_hwinfo hwinfo;
+ int rc, dom, comp_type;
bool decfg, chkstop;
struct ap_card *ac;
struct device *dev;
- unsigned int func;
ap_qid_t qid;
/* Is there currently a card device for this adapter ? */
@@ -1989,8 +2171,7 @@ static inline void ap_scan_adapter(int ap)
for (dom = 0; dom <= ap_max_domain_id; dom++)
if (ap_test_config_usage_domain(dom)) {
qid = AP_MKQID(ap, dom);
- if (ap_queue_info(qid, &type, &func, &depth,
- &ml, &decfg, &chkstop) > 0)
+ if (ap_queue_info(qid, &hwinfo, &decfg, &chkstop) > 0)
break;
}
if (dom > ap_max_domain_id) {
@@ -2001,12 +2182,12 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
- __func__, ap);
+ pr_debug("(%d) no type info (no APQN found), ignored\n",
+ ap);
}
return;
}
- if (!type) {
+ if (!hwinfo.at) {
/* No apdater type info available, an unusable adapter */
if (ac) {
AP_DBF_INFO("%s(%d) no valid type (0) info, rm card and queue devs\n",
@@ -2014,23 +2195,22 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
- __func__, ap);
+ pr_debug("(%d) no valid type (0) info, ignored\n", ap);
}
return;
}
+ hwinfo.value &= TAPQ_CARD_HWINFO_MASK; /* filter card specific hwinfo */
if (ac) {
/* Check APQN against existing card device for changes */
- if (ac->raw_hwtype != type) {
+ if (ac->hwinfo.at != hwinfo.at) {
AP_DBF_INFO("%s(%d) hwtype %d changed, rm card and queue devs\n",
- __func__, ap, type);
+ __func__, ap, hwinfo.at);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
- } else if ((ac->functions & TAPQ_CARD_FUNC_CMP_MASK) !=
- (func & TAPQ_CARD_FUNC_CMP_MASK)) {
+ } else if (ac->hwinfo.fac != hwinfo.fac) {
AP_DBF_INFO("%s(%d) functions 0x%08x changed, rm card and queue devs\n",
- __func__, ap, func);
+ __func__, ap, hwinfo.fac);
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
ac = NULL;
@@ -2064,13 +2244,13 @@ static inline void ap_scan_adapter(int ap)
if (!ac) {
/* Build a new card device */
- comp_type = ap_get_compatible_type(qid, type, func);
+ comp_type = ap_get_compatible_type(qid, hwinfo.at, hwinfo.fac);
if (!comp_type) {
AP_DBF_WARN("%s(%d) type %d, can't get compatibility type\n",
- __func__, ap, type);
+ __func__, ap, hwinfo.at);
return;
}
- ac = ap_card_create(ap, depth, type, comp_type, func, ml);
+ ac = ap_card_create(ap, hwinfo, comp_type);
if (!ac) {
AP_DBF_WARN("%s(%d) ap_card_create() failed\n",
__func__, ap);
@@ -2101,13 +2281,13 @@ static inline void ap_scan_adapter(int ap)
get_device(dev);
if (decfg)
AP_DBF_INFO("%s(%d) new (decfg) card dev type=%d func=0x%08x created\n",
- __func__, ap, type, func);
+ __func__, ap, hwinfo.at, hwinfo.fac);
else if (chkstop)
AP_DBF_INFO("%s(%d) new (chkstop) card dev type=%d func=0x%08x created\n",
- __func__, ap, type, func);
+ __func__, ap, hwinfo.at, hwinfo.fac);
else
AP_DBF_INFO("%s(%d) new card dev type=%d func=0x%08x created\n",
- __func__, ap, type, func);
+ __func__, ap, hwinfo.at, hwinfo.fac);
}
/* Verify the domains and the queue devices for this card */
@@ -2129,33 +2309,90 @@ static inline void ap_scan_adapter(int ap)
*/
static bool ap_get_configuration(void)
{
- if (!ap_qci_info) /* QCI not supported */
+ if (!ap_qci_info->flags) /* QCI not supported */
return false;
memcpy(ap_qci_info_old, ap_qci_info, sizeof(*ap_qci_info));
- ap_fetch_qci_info(ap_qci_info);
+ ap_qci(ap_qci_info);
return memcmp(ap_qci_info, ap_qci_info_old,
sizeof(struct ap_config_info)) != 0;
}
+/*
+ * ap_config_has_new_aps - Check current against old qci info if
+ * new adapters have appeared. Returns true if at least one new
+ * adapter in the apm mask is showing up. Existing adapters or
+ * receding adapters are not counted.
+ */
+static bool ap_config_has_new_aps(void)
+{
+
+ unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
+
+ if (!ap_qci_info->flags)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
+ (unsigned long *)ap_qci_info_old->apm, AP_DEVICES);
+ if (!bitmap_empty(m, AP_DEVICES))
+ return true;
+
+ return false;
+}
+
+/*
+ * ap_config_has_new_doms - Check current against old qci info if
+ * new (usage) domains have appeared. Returns true if at least one
+ * new domain in the aqm mask is showing up. Existing domains or
+ * receding domains are not counted.
+ */
+static bool ap_config_has_new_doms(void)
+{
+ unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
+
+ if (!ap_qci_info->flags)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
+ (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS);
+ if (!bitmap_empty(m, AP_DOMAINS))
+ return true;
+
+ return false;
+}
+
/**
* ap_scan_bus(): Scan the AP bus for new devices
- * Runs periodically, workqueue timer (ap_config_time)
- * @unused: Unused pointer.
+ * Always run under mutex ap_scan_bus_mutex protection
+ * which needs to get locked/unlocked by the caller!
+ * Returns true if any config change has been detected
+ * during the scan, otherwise false.
*/
-static void ap_scan_bus(struct work_struct *unused)
+static bool ap_scan_bus(void)
{
- int ap, config_changed = 0;
+ bool config_changed;
+ int ap;
+
+ pr_debug(">\n");
- /* config change notify */
+ /* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
- if (config_changed)
+ if (config_changed) {
+ if (ap_config_has_new_aps() || ap_config_has_new_doms()) {
+ /*
+ * Appearance of new adapters and/or domains need to
+ * build new ap devices which need to get bound to an
+ * device driver. Thus reset the APQN bindings complete
+ * completion.
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+ }
+ /* post a config change notify */
notify_config_changed();
+ }
ap_select_domain();
- AP_DBF_DBG("%s running\n", __func__);
-
/* loop over all possible adapters */
for (ap = 0; ap <= ap_max_adapter_id; ap++)
ap_scan_adapter(ap);
@@ -2178,23 +2415,132 @@ static void ap_scan_bus(struct work_struct *unused)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
- AP_DBF_DBG("%s init scan complete\n", __func__);
+ pr_debug("init scan complete\n");
ap_send_init_scan_done_uevent();
- ap_check_bindings_complete();
}
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_check_bindings_complete();
+
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
+
+ pr_debug("< config_changed=%d\n", config_changed);
+
+ return config_changed;
+}
+
+/*
+ * Callback for the ap_scan_bus_timer
+ * Runs periodically, workqueue timer (ap_scan_bus_time)
+ */
+static void ap_scan_bus_timer_callback(struct timer_list *unused)
+{
+ /*
+ * schedule work into the system long wq which when
+ * the work is finally executed, calls the AP bus scan.
+ */
+ queue_work(system_long_wq, &ap_scan_bus_work);
+}
+
+/*
+ * Callback for the ap_scan_bus_work
+ */
+static void ap_scan_bus_wq_callback(struct work_struct *unused)
+{
+ /*
+ * Try to invoke an ap_scan_bus(). If the mutex acquisition
+ * fails there is currently another task already running the
+ * AP scan bus and there is no need to wait and re-trigger the
+ * scan again. Please note at the end of the scan bus function
+ * the AP scan bus timer is re-armed which triggers then the
+ * ap_scan_bus_timer_callback which enqueues a work into the
+ * system_long_wq which invokes this function here again.
+ */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_task = current;
+ ap_scan_bus_result = ap_scan_bus();
+ ap_scan_bus_task = NULL;
+ mutex_unlock(&ap_scan_bus_mutex);
+ }
+}
+
+static inline void __exit ap_async_exit(void)
+{
+ if (ap_thread_flag)
+ ap_poll_thread_stop();
+ chsc_notifier_unregister(&ap_bus_nb);
+ cancel_work(&ap_scan_bus_work);
+ hrtimer_cancel(&ap_poll_timer);
+ timer_delete(&ap_scan_bus_timer);
+}
+
+static inline int __init ap_async_init(void)
+{
+ int rc;
+
+ /* Setup the AP bus rescan timer. */
+ timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
+
+ /*
+ * Setup the high resolution poll timer.
+ * If we are running under z/VM adjust polling to z/VM polling rate.
+ */
+ if (machine_is_vm())
+ poll_high_timeout = 1500000;
+ hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
+ queue_work(system_long_wq, &ap_scan_bus_work);
+
+ rc = chsc_notifier_register(&ap_bus_nb);
+ if (rc)
+ goto out;
+
+ /* Start the low priority AP bus poll thread. */
+ if (!ap_thread_flag)
+ return 0;
+
+ rc = ap_poll_thread_start();
+ if (rc)
+ goto out_notifier;
+
+ return 0;
+
+out_notifier:
+ chsc_notifier_unregister(&ap_bus_nb);
+out:
+ cancel_work(&ap_scan_bus_work);
+ hrtimer_cancel(&ap_poll_timer);
+ timer_delete(&ap_scan_bus_timer);
+ return rc;
+}
+
+static inline void ap_irq_exit(void)
+{
+ if (ap_irq_flag)
+ unregister_adapter_interrupt(&ap_airq);
}
-static void ap_config_timeout(struct timer_list *unused)
+static inline int __init ap_irq_init(void)
{
- queue_work(system_long_wq, &ap_scan_work);
+ int rc;
+
+ if (!ap_interrupts_available() || !ap_useirq)
+ return 0;
+
+ rc = register_adapter_interrupt(&ap_airq);
+ ap_irq_flag = (rc == 0);
+
+ return rc;
}
-static int __init ap_debug_init(void)
+static inline void ap_debug_exit(void)
+{
+ debug_unregister(ap_dbf_info);
+}
+
+static inline int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 2, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
debug_set_level(ap_dbf_info, DBF_ERR);
@@ -2212,14 +2558,14 @@ static void __init ap_perms_init(void)
if (apm_str) {
memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
- &ap_perms_mutex);
+ &ap_attr_mutex);
}
/* aqm kernel parameter string */
if (aqm_str) {
memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
- &ap_perms_mutex);
+ &ap_attr_mutex);
}
}
@@ -2232,18 +2578,26 @@ static int __init ap_module_init(void)
{
int rc;
- rc = ap_debug_init();
- if (rc)
- return rc;
-
if (!ap_instructions_available()) {
pr_warn("The hardware system does not support AP instructions\n");
return -ENODEV;
}
+ rc = ap_debug_init();
+ if (rc)
+ return rc;
+
/* init ap_queue hashtable */
hash_init(ap_queues);
+ /* create ap msg buffer memory pool */
+ ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items,
+ AP_DEFAULT_MAX_MSG_SIZE);
+ if (!ap_msg_pool) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
@@ -2259,12 +2613,6 @@ static int __init ap_module_init(void)
ap_domain_index = -1;
}
- /* enable interrupts if available */
- if (ap_interrupts_available() && ap_useirq) {
- rc = register_adapter_interrupt(&ap_airq);
- ap_irq_flag = (rc == 0);
- }
-
/* Create /sys/bus/ap. */
rc = bus_register(&ap_bus_type);
if (rc)
@@ -2277,38 +2625,39 @@ static int __init ap_module_init(void)
goto out_bus;
ap_root_device->bus = &ap_bus_type;
- /* Setup the AP bus rescan timer. */
- timer_setup(&ap_config_timer, ap_config_timeout, 0);
-
- /*
- * Setup the high resolution poll timer.
- * If we are running under z/VM adjust polling to z/VM polling rate.
- */
- if (MACHINE_IS_VM)
- poll_high_timeout = 1500000;
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
-
- /* Start the low priority AP bus poll thread. */
- if (ap_thread_flag) {
- rc = ap_poll_thread_start();
- if (rc)
- goto out_work;
- }
+ /* enable interrupts if available */
+ rc = ap_irq_init();
+ if (rc)
+ goto out_device;
- queue_work(system_long_wq, &ap_scan_work);
+ /* Setup asynchronous work (timers, workqueue, etc). */
+ rc = ap_async_init();
+ if (rc)
+ goto out_irq;
return 0;
-out_work:
- hrtimer_cancel(&ap_poll_timer);
+out_irq:
+ ap_irq_exit();
+out_device:
root_device_unregister(ap_root_device);
out_bus:
bus_unregister(&ap_bus_type);
out:
- if (ap_irq_flag)
- unregister_adapter_interrupt(&ap_airq);
- kfree(ap_qci_info);
+ mempool_destroy(ap_msg_pool);
+ ap_debug_exit();
return rc;
}
-device_initcall(ap_module_init);
+
+static void __exit ap_module_exit(void)
+{
+ ap_async_exit();
+ ap_irq_exit();
+ root_device_unregister(ap_root_device);
+ bus_unregister(&ap_bus_type);
+ mempool_destroy(ap_msg_pool);
+ ap_debug_exit();
+}
+
+module_init(ap_module_init);
+module_exit(ap_module_exit);