summaryrefslogtreecommitdiff
path: root/drivers/s390/crypto/ap_bus.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/crypto/ap_bus.c')
-rw-r--r--drivers/s390/crypto/ap_bus.c352
1 files changed, 270 insertions, 82 deletions
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 898865be0dad..a445494fd2be 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -11,11 +11,11 @@
* Adjunct processor bus.
*/
-#define KMSG_COMPONENT "ap"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "ap: " fmt
#include <linux/kernel_stat.h>
#include <linux/moduleparam.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -26,6 +26,7 @@
#include <linux/notifier.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
+#include <asm/machine.h>
#include <asm/airq.h>
#include <asm/tpi.h>
#include <linux/atomic.h>
@@ -40,6 +41,7 @@
#include <linux/module.h>
#include <asm/uv.h>
#include <asm/chsc.h>
+#include <linux/mempool.h>
#include "ap_bus.h"
#include "ap_debug.h"
@@ -83,8 +85,17 @@ DEFINE_SPINLOCK(ap_queues_lock);
/* Default permissions (ioctl, card and domain masking) */
struct ap_perms ap_perms;
EXPORT_SYMBOL(ap_perms);
-DEFINE_MUTEX(ap_perms_mutex);
-EXPORT_SYMBOL(ap_perms_mutex);
+/* true if apmask and/or aqmask are NOT default */
+bool ap_apmask_aqmask_in_use;
+/* counter for how many driver_overrides are currently active */
+int ap_driver_override_ctr;
+/*
+ * Mutex for consistent read and write of the ap_perms struct,
+ * ap_apmask_aqmask_in_use, ap_driver_override_ctr
+ * and the ap bus sysfs attributes apmask and aqmask.
+ */
+DEFINE_MUTEX(ap_attr_mutex);
+EXPORT_SYMBOL(ap_attr_mutex);
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
@@ -102,11 +113,33 @@ static struct ap_config_info *const ap_qci_info_old = &qci[1];
debug_info_t *ap_dbf_info;
/*
+ * There is a need for a do-not-allocate-memory path through the AP bus
+ * layer. The pkey layer may be triggered via the in-kernel interface from
+ * a protected key crypto algorithm (namely PAES) to convert a secure key
+ * into a protected key. This happens in a workqueue context, so sleeping
+ * is allowed but memory allocations causing IO operations are not permitted.
+ * To accomplish this, an AP message memory pool with pre-allocated space
+ * is established. When ap_init_apmsg() with use_mempool set to true is
+ * called, instead of kmalloc() the ap message buffer is allocated from
+ * the ap_msg_pool. This pool only holds a limited amount of buffers:
+ * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and
+ * exactly one of these items (if available) is returned if ap_init_apmsg()
+ * with the use_mempool arg set to true is called. When this pool is exhausted
+ * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without
+ * any attempt to allocate memory and the caller has to deal with that.
+ */
+static mempool_t *ap_msg_pool;
+static unsigned int ap_msg_pool_min_items = 8;
+module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440);
+MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items");
+
+/*
* AP bus rescan related things.
*/
static bool ap_scan_bus(void);
static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static struct task_struct *ap_scan_bus_task; /* thread holding the scan mutex */
static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
static int ap_scan_bus_time = AP_CONFIG_TIME;
static struct timer_list ap_scan_bus_timer;
@@ -404,7 +437,7 @@ void ap_wait(enum ap_sm_wait wait)
*/
void ap_request_timeout(struct timer_list *t)
{
- struct ap_queue *aq = from_timer(aq, t, timeout);
+ struct ap_queue *aq = timer_container_of(aq, t, timeout);
spin_lock_bh(&aq->lock);
ap_wait(ap_sm_event(aq, AP_SM_EVENT_TIMEOUT));
@@ -452,7 +485,7 @@ static void ap_tasklet_fn(unsigned long dummy)
* important that no requests on any AP get lost.
*/
if (ap_irq_flag)
- xchg(ap_airq.lsi_ptr, 0);
+ WRITE_ONCE(*ap_airq.lsi_ptr, 0);
spin_lock_bh(&ap_queues_lock);
hash_for_each(ap_queues, bkt, aq, hnode) {
@@ -545,6 +578,48 @@ static void ap_poll_thread_stop(void)
#define is_card_dev(x) ((x)->parent == ap_root_device)
#define is_queue_dev(x) ((x)->parent != ap_root_device)
+/*
+ * ap_init_apmsg() - Initialize ap_message.
+ */
+int ap_init_apmsg(struct ap_message *ap_msg, u32 flags)
+{
+ unsigned int maxmsgsize;
+
+ memset(ap_msg, 0, sizeof(*ap_msg));
+ ap_msg->flags = flags;
+
+ if (flags & AP_MSG_FLAG_MEMPOOL) {
+ ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE;
+ return 0;
+ }
+
+ maxmsgsize = atomic_read(&ap_max_msg_size);
+ ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL);
+ if (!ap_msg->msg)
+ return -ENOMEM;
+ ap_msg->bufsize = maxmsgsize;
+
+ return 0;
+}
+EXPORT_SYMBOL(ap_init_apmsg);
+
+/*
+ * ap_release_apmsg() - Release ap_message.
+ */
+void ap_release_apmsg(struct ap_message *ap_msg)
+{
+ if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) {
+ memzero_explicit(ap_msg->msg, ap_msg->bufsize);
+ mempool_free(ap_msg->msg, ap_msg_pool);
+ } else {
+ kfree_sensitive(ap_msg->msg);
+ }
+}
+EXPORT_SYMBOL(ap_release_apmsg);
+
/**
* ap_bus_match()
* @dev: Pointer to device
@@ -552,9 +627,9 @@ static void ap_poll_thread_stop(void)
*
* AP bus driver registration/unregistration.
*/
-static int ap_bus_match(struct device *dev, struct device_driver *drv)
+static int ap_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct ap_driver *ap_drv = to_ap_drv(drv);
+ const struct ap_driver *ap_drv = to_ap_drv(drv);
struct ap_device_id *id;
/*
@@ -733,7 +808,7 @@ static void ap_check_bindings_complete(void)
if (!completion_done(&ap_apqn_bindings_complete)) {
complete_all(&ap_apqn_bindings_complete);
ap_send_bindings_complete_uevent();
- pr_debug("%s all apqn bindings complete\n", __func__);
+ pr_debug("all apqn bindings complete\n");
}
}
}
@@ -768,7 +843,7 @@ int ap_wait_apqn_bindings_complete(unsigned long timeout)
else if (l == 0 && timeout)
rc = -ETIME;
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
@@ -786,21 +861,38 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
int rc, card, queue, devres, drvres;
if (is_queue_dev(dev)) {
- card = AP_QID_CARD(to_ap_queue(dev)->qid);
- queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
- mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm) &&
- test_bit_inv(queue, ap_perms.aqm);
- mutex_unlock(&ap_perms_mutex);
- drvres = to_ap_drv(dev->driver)->flags
- & AP_DRIVER_FLAG_DEFAULT;
- if (!!devres != !!drvres) {
- pr_debug("%s reprobing queue=%02x.%04x\n",
- __func__, card, queue);
- rc = device_reprobe(dev);
- if (rc)
- AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
- __func__, card, queue);
+ struct ap_driver *ap_drv = to_ap_drv(dev->driver);
+ struct ap_queue *aq = to_ap_queue(dev);
+ struct ap_device *ap_dev = &aq->ap_dev;
+
+ card = AP_QID_CARD(aq->qid);
+ queue = AP_QID_QUEUE(aq->qid);
+
+ if (ap_dev->driver_override) {
+ if (strcmp(ap_dev->driver_override,
+ ap_drv->driver.name)) {
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
+ rc = device_reprobe(dev);
+ if (rc) {
+ AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
+ __func__, card, queue);
+ }
+ }
+ } else {
+ mutex_lock(&ap_attr_mutex);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_attr_mutex);
+ drvres = to_ap_drv(dev->driver)->flags
+ & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres) {
+ pr_debug("reprobing queue=%02x.%04x\n", card, queue);
+ rc = device_reprobe(dev);
+ if (rc) {
+ AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
+ __func__, card, queue);
+ }
+ }
}
}
@@ -818,22 +910,37 @@ static void ap_bus_revise_bindings(void)
* @card: the APID of the adapter card to check
* @queue: the APQI of the queue to check
*
- * Note: the ap_perms_mutex must be locked by the caller of this function.
+ * Note: the ap_attr_mutex must be locked by the caller of this function.
*
* Return: an int specifying whether the AP adapter is reserved for the host (1)
* or not (0).
*/
int ap_owned_by_def_drv(int card, int queue)
{
+ struct ap_queue *aq;
int rc = 0;
if (card < 0 || card >= AP_DEVICES || queue < 0 || queue >= AP_DOMAINS)
return -EINVAL;
+ aq = ap_get_qdev(AP_MKQID(card, queue));
+ if (aq) {
+ const struct device_driver *drv = aq->ap_dev.device.driver;
+ const struct ap_driver *ap_drv = to_ap_drv(drv);
+ bool override = !!aq->ap_dev.driver_override;
+
+ if (override && drv && ap_drv->flags & AP_DRIVER_FLAG_DEFAULT)
+ rc = 1;
+ put_device(&aq->ap_dev.device);
+ if (override)
+ goto out;
+ }
+
if (test_bit_inv(card, ap_perms.apm) &&
test_bit_inv(queue, ap_perms.aqm))
rc = 1;
+out:
return rc;
}
EXPORT_SYMBOL(ap_owned_by_def_drv);
@@ -845,7 +952,7 @@ EXPORT_SYMBOL(ap_owned_by_def_drv);
* @apm: a bitmap specifying a set of APIDs comprising the APQNs to check
* @aqm: a bitmap specifying a set of APQIs comprising the APQNs to check
*
- * Note: the ap_perms_mutex must be locked by the caller of this function.
+ * Note: the ap_attr_mutex must be locked by the caller of this function.
*
* Return: an int specifying whether each APQN is reserved for the host (1) or
* not (0)
@@ -856,12 +963,10 @@ int ap_apqn_in_matrix_owned_by_def_drv(unsigned long *apm,
int card, queue, rc = 0;
for (card = 0; !rc && card < AP_DEVICES; card++)
- if (test_bit_inv(card, apm) &&
- test_bit_inv(card, ap_perms.apm))
+ if (test_bit_inv(card, apm))
for (queue = 0; !rc && queue < AP_DOMAINS; queue++)
- if (test_bit_inv(queue, aqm) &&
- test_bit_inv(queue, ap_perms.aqm))
- rc = 1;
+ if (test_bit_inv(queue, aqm))
+ rc = ap_owned_by_def_drv(card, queue);
return rc;
}
@@ -885,13 +990,19 @@ static int ap_device_probe(struct device *dev)
*/
card = AP_QID_CARD(to_ap_queue(dev)->qid);
queue = AP_QID_QUEUE(to_ap_queue(dev)->qid);
- mutex_lock(&ap_perms_mutex);
- devres = test_bit_inv(card, ap_perms.apm) &&
- test_bit_inv(queue, ap_perms.aqm);
- mutex_unlock(&ap_perms_mutex);
- drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
- if (!!devres != !!drvres)
- goto out;
+ if (ap_dev->driver_override) {
+ if (strcmp(ap_dev->driver_override,
+ ap_drv->driver.name))
+ goto out;
+ } else {
+ mutex_lock(&ap_attr_mutex);
+ devres = test_bit_inv(card, ap_perms.apm) &&
+ test_bit_inv(queue, ap_perms.aqm);
+ mutex_unlock(&ap_attr_mutex);
+ drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
+ if (!!devres != !!drvres)
+ goto out;
+ }
}
/*
@@ -917,8 +1028,17 @@ static int ap_device_probe(struct device *dev)
}
out:
- if (rc)
+ if (rc) {
put_device(dev);
+ } else {
+ if (is_queue_dev(dev)) {
+ pr_debug("queue=%02x.%04x new driver=%s\n",
+ card, queue, ap_drv->driver.name);
+ } else {
+ pr_debug("card=%02x new driver=%s\n",
+ to_ap_card(dev)->id, ap_drv->driver.name);
+ }
+ }
return rc;
}
@@ -971,11 +1091,16 @@ int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
char *name)
{
struct device_driver *drv = &ap_drv->driver;
+ int rc;
drv->bus = &ap_bus_type;
drv->owner = owner;
drv->name = name;
- return driver_register(drv);
+ rc = driver_register(drv);
+
+ ap_check_bindings_complete();
+
+ return rc;
}
EXPORT_SYMBOL(ap_driver_register);
@@ -995,17 +1120,31 @@ bool ap_bus_force_rescan(void)
unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
bool rc = false;
- pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
+ pr_debug("> scan counter=%lu\n", scan_counter);
/* Only trigger AP bus scans after the initial scan is done */
if (scan_counter <= 0)
goto out;
+ /*
+ * There is one unlikely but nevertheless valid scenario where the
+ * thread holding the mutex may try to send some crypto load but
+ * all cards are offline so a rescan is triggered which causes
+ * a recursive call of ap_bus_force_rescan(). A simple return if
+ * the mutex is already locked by this thread solves this.
+ */
+ if (mutex_is_locked(&ap_scan_bus_mutex)) {
+ if (ap_scan_bus_task == current)
+ goto out;
+ }
+
/* Try to acquire the AP scan bus mutex */
if (mutex_trylock(&ap_scan_bus_mutex)) {
/* mutex acquired, run the AP bus scan */
+ ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
rc = ap_scan_bus_result;
+ ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
goto out;
}
@@ -1024,7 +1163,7 @@ bool ap_bus_force_rescan(void)
mutex_unlock(&ap_scan_bus_mutex);
out:
- pr_debug("%s rc=%d\n", __func__, rc);
+ pr_debug("rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@@ -1038,7 +1177,7 @@ static int ap_bus_cfg_chg(struct notifier_block *nb,
if (action != CHSC_NOTIFY_AP_CFG)
return NOTIFY_DONE;
- pr_debug("%s config change, forcing bus rescan\n", __func__);
+ pr_debug("config change, forcing bus rescan\n");
ap_bus_force_rescan();
@@ -1352,12 +1491,12 @@ static ssize_t apmask_show(const struct bus_type *bus, char *buf)
{
int rc;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
ap_perms.apm[0], ap_perms.apm[1],
ap_perms.apm[2], ap_perms.apm[3]);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
@@ -1367,6 +1506,7 @@ static int __verify_card_reservations(struct device_driver *drv, void *data)
int rc = 0;
struct ap_driver *ap_drv = to_ap_drv(drv);
unsigned long *newapm = (unsigned long *)data;
+ unsigned long aqm_any[BITS_TO_LONGS(AP_DOMAINS)];
/*
* increase the driver's module refcounter to be sure it is not
@@ -1376,7 +1516,8 @@ static int __verify_card_reservations(struct device_driver *drv, void *data)
return 0;
if (ap_drv->in_use) {
- rc = ap_drv->in_use(newapm, ap_perms.aqm);
+ bitmap_fill(aqm_any, AP_DOMAINS);
+ rc = ap_drv->in_use(newapm, aqm_any);
if (rc)
rc = -EBUSY;
}
@@ -1405,18 +1546,31 @@ static int apmask_commit(unsigned long *newapm)
memcpy(ap_perms.apm, newapm, APMASKSIZE);
+ /*
+ * Update ap_apmask_aqmask_in_use. Note that the
+ * ap_attr_mutex has to be obtained here.
+ */
+ ap_apmask_aqmask_in_use =
+ bitmap_full(ap_perms.apm, AP_DEVICES) &&
+ bitmap_full(ap_perms.aqm, AP_DOMAINS) ?
+ false : true;
+
return 0;
}
static ssize_t apmask_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- int rc, changes = 0;
DECLARE_BITMAP(newapm, AP_DEVICES);
+ int rc = -EINVAL, changes = 0;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
+ /* Do not allow apmask/aqmask if driver override is active */
+ if (ap_driver_override_ctr)
+ goto done;
+
rc = ap_parse_bitmap_str(buf, ap_perms.apm, AP_DEVICES, newapm);
if (rc)
goto done;
@@ -1426,7 +1580,7 @@ static ssize_t apmask_store(const struct bus_type *bus, const char *buf,
rc = apmask_commit(newapm);
done:
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
if (rc)
return rc;
@@ -1444,12 +1598,12 @@ static ssize_t aqmask_show(const struct bus_type *bus, char *buf)
{
int rc;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
rc = sysfs_emit(buf, "0x%016lx%016lx%016lx%016lx\n",
ap_perms.aqm[0], ap_perms.aqm[1],
ap_perms.aqm[2], ap_perms.aqm[3]);
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
return rc;
}
@@ -1459,6 +1613,7 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data)
int rc = 0;
struct ap_driver *ap_drv = to_ap_drv(drv);
unsigned long *newaqm = (unsigned long *)data;
+ unsigned long apm_any[BITS_TO_LONGS(AP_DEVICES)];
/*
* increase the driver's module refcounter to be sure it is not
@@ -1468,7 +1623,8 @@ static int __verify_queue_reservations(struct device_driver *drv, void *data)
return 0;
if (ap_drv->in_use) {
- rc = ap_drv->in_use(ap_perms.apm, newaqm);
+ bitmap_fill(apm_any, AP_DEVICES);
+ rc = ap_drv->in_use(apm_any, newaqm);
if (rc)
rc = -EBUSY;
}
@@ -1497,18 +1653,31 @@ static int aqmask_commit(unsigned long *newaqm)
memcpy(ap_perms.aqm, newaqm, AQMASKSIZE);
+ /*
+ * Update ap_apmask_aqmask_in_use. Note that the
+ * ap_attr_mutex has to be obtained here.
+ */
+ ap_apmask_aqmask_in_use =
+ bitmap_full(ap_perms.apm, AP_DEVICES) &&
+ bitmap_full(ap_perms.aqm, AP_DOMAINS) ?
+ false : true;
+
return 0;
}
static ssize_t aqmask_store(const struct bus_type *bus, const char *buf,
size_t count)
{
- int rc, changes = 0;
DECLARE_BITMAP(newaqm, AP_DOMAINS);
+ int rc = -EINVAL, changes = 0;
- if (mutex_lock_interruptible(&ap_perms_mutex))
+ if (mutex_lock_interruptible(&ap_attr_mutex))
return -ERESTARTSYS;
+ /* Do not allow apmask/aqmask if driver override is active */
+ if (ap_driver_override_ctr)
+ goto done;
+
rc = ap_parse_bitmap_str(buf, ap_perms.aqm, AP_DOMAINS, newaqm);
if (rc)
goto done;
@@ -1518,7 +1687,7 @@ static ssize_t aqmask_store(const struct bus_type *bus, const char *buf,
rc = aqmask_commit(newaqm);
done:
- mutex_unlock(&ap_perms_mutex);
+ mutex_unlock(&ap_attr_mutex);
if (rc)
return rc;
@@ -1565,6 +1734,15 @@ static ssize_t bindings_show(const struct bus_type *bus, char *buf)
static BUS_ATTR_RO(bindings);
+static ssize_t bindings_complete_count_show(const struct bus_type *bus,
+ char *buf)
+{
+ return sysfs_emit(buf, "%llu\n",
+ atomic64_read(&ap_bindings_complete_count));
+}
+
+static BUS_ATTR_RO(bindings_complete_count);
+
static ssize_t features_show(const struct bus_type *bus, char *buf)
{
int n = 0;
@@ -1605,6 +1783,7 @@ static struct attribute *ap_bus_attrs[] = {
&bus_attr_aqmask.attr,
&bus_attr_scans.attr,
&bus_attr_bindings.attr,
+ &bus_attr_bindings_complete_count.attr,
&bus_attr_features.attr,
NULL,
};
@@ -1845,13 +2024,12 @@ static inline void ap_scan_domains(struct ap_card *ac)
}
/* if no queue device exists, create a new one */
if (!aq) {
- aq = ap_queue_create(qid, ac->ap_dev.device_type);
+ aq = ap_queue_create(qid, ac);
if (!aq) {
AP_DBF_WARN("%s(%d,%d) ap_queue_create() failed\n",
__func__, ac->id, dom);
continue;
}
- aq->card = ac;
aq->config = !decfg;
aq->chkstop = chkstop;
aq->se_bstate = hwinfo.bs;
@@ -1895,8 +2073,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev checkstop on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop on\n",
+ ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@@ -1906,8 +2084,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev checkstop off\n",
+ ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@@ -1919,8 +2097,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev config off\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config off\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@@ -1931,8 +2109,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- pr_debug("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
+ pr_debug("(%d,%d) queue dev config on\n",
+ ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@@ -2004,8 +2182,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- pr_debug("%s(%d) no type info (no APQN found), ignored\n",
- __func__, ap);
+ pr_debug("(%d) no type info (no APQN found), ignored\n",
+ ap);
}
return;
}
@@ -2017,8 +2195,7 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- pr_debug("%s(%d) no valid type (0) info, ignored\n",
- __func__, ap);
+ pr_debug("(%d) no valid type (0) info, ignored\n", ap);
}
return;
}
@@ -2197,7 +2374,7 @@ static bool ap_scan_bus(void)
bool config_changed;
int ap;
- pr_debug(">%s\n", __func__);
+ pr_debug(">\n");
/* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
@@ -2238,7 +2415,7 @@ static bool ap_scan_bus(void)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
- pr_debug("%s init scan complete\n", __func__);
+ pr_debug("init scan complete\n");
ap_send_init_scan_done_uevent();
}
@@ -2246,7 +2423,7 @@ static bool ap_scan_bus(void)
mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
- pr_debug("<%s config_changed=%d\n", __func__, config_changed);
+ pr_debug("< config_changed=%d\n", config_changed);
return config_changed;
}
@@ -2279,7 +2456,9 @@ static void ap_scan_bus_wq_callback(struct work_struct *unused)
* system_long_wq which invokes this function here again.
*/
if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_task = current;
ap_scan_bus_result = ap_scan_bus();
+ ap_scan_bus_task = NULL;
mutex_unlock(&ap_scan_bus_mutex);
}
}
@@ -2305,10 +2484,9 @@ static inline int __init ap_async_init(void)
* Setup the high resolution poll timer.
* If we are running under z/VM adjust polling to z/VM polling rate.
*/
- if (MACHINE_IS_VM)
+ if (machine_is_vm())
poll_high_timeout = 1500000;
- hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- ap_poll_timer.function = ap_poll_timeout;
+ hrtimer_setup(&ap_poll_timer, ap_poll_timeout, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
queue_work(system_long_wq, &ap_scan_bus_work);
@@ -2380,14 +2558,14 @@ static void __init ap_perms_init(void)
if (apm_str) {
memset(&ap_perms.apm, 0, sizeof(ap_perms.apm));
ap_parse_mask_str(apm_str, ap_perms.apm, AP_DEVICES,
- &ap_perms_mutex);
+ &ap_attr_mutex);
}
/* aqm kernel parameter string */
if (aqm_str) {
memset(&ap_perms.aqm, 0, sizeof(ap_perms.aqm));
ap_parse_mask_str(aqm_str, ap_perms.aqm, AP_DOMAINS,
- &ap_perms_mutex);
+ &ap_attr_mutex);
}
}
@@ -2400,18 +2578,26 @@ static int __init ap_module_init(void)
{
int rc;
- rc = ap_debug_init();
- if (rc)
- return rc;
-
if (!ap_instructions_available()) {
pr_warn("The hardware system does not support AP instructions\n");
return -ENODEV;
}
+ rc = ap_debug_init();
+ if (rc)
+ return rc;
+
/* init ap_queue hashtable */
hash_init(ap_queues);
+ /* create ap msg buffer memory pool */
+ ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items,
+ AP_DEFAULT_MAX_MSG_SIZE);
+ if (!ap_msg_pool) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
/* set up the AP permissions (ioctls, ap and aq masks) */
ap_perms_init();
@@ -2458,6 +2644,7 @@ out_device:
out_bus:
bus_unregister(&ap_bus_type);
out:
+ mempool_destroy(ap_msg_pool);
ap_debug_exit();
return rc;
}
@@ -2468,6 +2655,7 @@ static void __exit ap_module_exit(void)
ap_irq_exit();
root_device_unregister(ap_root_device);
bus_unregister(&ap_bus_type);
+ mempool_destroy(ap_msg_pool);
ap_debug_exit();
}