summaryrefslogtreecommitdiff
path: root/net/iucv/iucv.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv/iucv.c')
-rw-r--r--net/iucv/iucv.c764
1 files changed, 296 insertions, 468 deletions
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 4fe76ff214c2..da2af413c89d 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IUCV base infrastructure.
*
@@ -17,26 +18,12 @@
* Documentation used:
* The original source
* CP Programming Service, IBM document # SC24-5760
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#define KMSG_COMPONENT "iucv"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "iucv: " fmt
#include <linux/kernel_stat.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/spinlock.h>
@@ -52,6 +39,7 @@
#include <linux/reboot.h>
#include <net/iucv/iucv.h>
#include <linux/atomic.h>
+#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -75,42 +63,55 @@
#define IUCV_IPNORPY 0x10
#define IUCV_IPALL 0x80
-static int iucv_bus_match(struct device *dev, struct device_driver *drv)
+static int iucv_bus_match(struct device *dev, const struct device_driver *drv)
{
return 0;
}
-enum iucv_pm_states {
- IUCV_PM_INITIAL = 0,
- IUCV_PM_FREEZING = 1,
- IUCV_PM_THAWING = 2,
- IUCV_PM_RESTORING = 3,
-};
-static enum iucv_pm_states iucv_pm_state;
-
-static int iucv_pm_prepare(struct device *);
-static void iucv_pm_complete(struct device *);
-static int iucv_pm_freeze(struct device *);
-static int iucv_pm_thaw(struct device *);
-static int iucv_pm_restore(struct device *);
-
-static const struct dev_pm_ops iucv_pm_ops = {
- .prepare = iucv_pm_prepare,
- .complete = iucv_pm_complete,
- .freeze = iucv_pm_freeze,
- .thaw = iucv_pm_thaw,
- .restore = iucv_pm_restore,
-};
-
-struct bus_type iucv_bus = {
+const struct bus_type iucv_bus = {
.name = "iucv",
.match = iucv_bus_match,
- .pm = &iucv_pm_ops,
};
EXPORT_SYMBOL(iucv_bus);
-struct device *iucv_root;
-EXPORT_SYMBOL(iucv_root);
+static struct device *iucv_root;
+
+static void iucv_release_device(struct device *device)
+{
+ kfree(device);
+}
+
+struct device *iucv_alloc_device(const struct attribute_group **attrs,
+ struct device_driver *driver,
+ void *priv, const char *fmt, ...)
+{
+ struct device *dev;
+ va_list vargs;
+ char buf[20];
+ int rc;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ goto out_error;
+ va_start(vargs, fmt);
+ vscnprintf(buf, sizeof(buf), fmt, vargs);
+ rc = dev_set_name(dev, "%s", buf);
+ va_end(vargs);
+ if (rc)
+ goto out_error;
+ dev->bus = &iucv_bus;
+ dev->parent = iucv_root;
+ dev->driver = driver;
+ dev->groups = attrs;
+ dev->release = iucv_release_device;
+ dev_set_drvdata(dev, priv);
+ return dev;
+
+out_error:
+ kfree(dev);
+ return NULL;
+}
+EXPORT_SYMBOL(iucv_alloc_device);
static int iucv_available;
@@ -119,7 +120,7 @@ struct iucv_irq_data {
u16 ippathid;
u8 ipflags1;
u8 iptype;
- u32 res2[8];
+ u32 res2[9];
};
struct iucv_irq_list {
@@ -141,7 +142,7 @@ static LIST_HEAD(iucv_task_queue);
* The tasklet for fast delivery of iucv interrupts.
*/
static void iucv_tasklet_fn(unsigned long);
-static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_fn,0);
+static DECLARE_TASKLET_OLD(iucv_tasklet, iucv_tasklet_fn);
/*
* Queue of interrupt buffers for delivery via a work queue
@@ -192,7 +193,7 @@ static char iucv_error_pathid[16] = "INVALID PATHID";
static LIST_HEAD(iucv_handler_list);
/*
- * iucv_path_table: an array of iucv_path structures.
+ * iucv_path_table: array of pointers to iucv_path structures.
*/
static struct iucv_path **iucv_path_table;
static unsigned long iucv_max_pathid;
@@ -246,7 +247,7 @@ struct iucv_cmd_dpl {
u8 iprmmsg[8];
u32 ipsrccls;
u32 ipmsgtag;
- u32 ipbfadr2;
+ dma32_t ipbfadr2;
u32 ipbfln2f;
u32 res;
} __attribute__ ((packed,aligned(8)));
@@ -262,11 +263,11 @@ struct iucv_cmd_db {
u8 iprcode;
u32 ipmsgid;
u32 iptrgcls;
- u32 ipbfadr1;
+ dma32_t ipbfadr1;
u32 ipbfln1f;
u32 ipsrccls;
u32 ipmsgtag;
- u32 ipbfadr2;
+ dma32_t ipbfadr2;
u32 ipbfln2f;
u32 res;
} __attribute__ ((packed,aligned(8)));
@@ -312,32 +313,41 @@ static union iucv_param *iucv_param[NR_CPUS];
static union iucv_param *iucv_param_irq[NR_CPUS];
/**
- * iucv_call_b2f0
- * @code: identifier of IUCV call to CP.
+ * __iucv_call_b2f0
+ * @command: identifier of IUCV call to CP.
* @parm: pointer to a struct iucv_parm block
*
* Calls CP to execute IUCV commands.
*
* Returns the result of the CP IUCV call.
*/
+static inline int __iucv_call_b2f0(int command, union iucv_param *parm)
+{
+ unsigned long reg1 = virt_to_phys(parm);
+ int cc;
+
+ asm volatile(
+ " lgr 0,%[reg0]\n"
+ " lgr 1,%[reg1]\n"
+ " .long 0xb2f01000\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=&d" (cc), "+m" (*parm)
+ : [reg0] "d" ((unsigned long)command),
+ [reg1] "d" (reg1)
+ : "cc", "0", "1");
+ return cc;
+}
+
static inline int iucv_call_b2f0(int command, union iucv_param *parm)
{
- register unsigned long reg0 asm ("0");
- register unsigned long reg1 asm ("1");
int ccode;
- reg0 = command;
- reg1 = virt_to_phys(parm);
- asm volatile(
- " .long 0xb2f01000\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1)
- : "m" (*parm) : "cc");
- return (ccode == 1) ? parm->ctrl.iprcode : ccode;
+ ccode = __iucv_call_b2f0(command, parm);
+ return ccode == 1 ? parm->ctrl.iprcode : ccode;
}
-/**
+/*
* iucv_query_maxconn
*
* Determines the maximum number of connections that may be established.
@@ -345,25 +355,37 @@ static inline int iucv_call_b2f0(int command, union iucv_param *parm)
* Returns the maximum number of connections or -EPERM is IUCV is not
* available.
*/
+static int __iucv_query_maxconn(void *param, unsigned long *max_pathid)
+{
+ unsigned long reg1 = virt_to_phys(param);
+ int cc;
+
+ asm volatile (
+ " lghi 0,%[cmd]\n"
+ " lgr 1,%[reg1]\n"
+ " .long 0xb2f01000\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ " lgr %[reg1],1\n"
+ : [cc] "=&d" (cc), [reg1] "+&d" (reg1)
+ : [cmd] "K" (IUCV_QUERY)
+ : "cc", "0", "1");
+ *max_pathid = reg1;
+ return cc;
+}
+
static int iucv_query_maxconn(void)
{
- register unsigned long reg0 asm ("0");
- register unsigned long reg1 asm ("1");
+ unsigned long max_pathid;
void *param;
int ccode;
- param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA);
+ param = kzalloc(sizeof(union iucv_param), GFP_KERNEL | GFP_DMA);
if (!param)
return -ENOMEM;
- reg0 = IUCV_QUERY;
- reg1 = (unsigned long) param;
- asm volatile (
- " .long 0xb2f01000\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
+ ccode = __iucv_query_maxconn(param, &max_pathid);
if (ccode == 0)
- iucv_max_pathid = reg1;
+ iucv_max_pathid = max_pathid;
kfree(param);
return ccode ? -EPERM : 0;
}
@@ -430,31 +452,6 @@ static void iucv_block_cpu(void *data)
}
/**
- * iucv_block_cpu_almost
- * @data: unused
- *
- * Allow connection-severed interrupts only on this cpu.
- */
-static void iucv_block_cpu_almost(void *data)
-{
- int cpu = smp_processor_id();
- union iucv_param *parm;
-
- /* Allow iucv control interrupts only */
- parm = iucv_param_irq[cpu];
- memset(parm, 0, sizeof(union iucv_param));
- parm->set_mask.ipmask = 0x08;
- iucv_call_b2f0(IUCV_SETMASK, parm);
- /* Allow iucv-severed interrupt only */
- memset(parm, 0, sizeof(union iucv_param));
- parm->set_mask.ipmask = 0x20;
- iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
-
- /* Clear indication that iucv interrupts are allowed for this cpu. */
- cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
-}
-
-/**
* iucv_declare_cpu
* @data: unused
*
@@ -472,7 +469,7 @@ static void iucv_declare_cpu(void *data)
/* Declare interrupt buffer. */
parm = iucv_param_irq[cpu];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]);
+ parm->db.ipbfadr1 = virt_to_dma32(iucv_irq_data[cpu]);
rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
if (rc) {
char *err = "Unknown";
@@ -493,8 +490,8 @@ static void iucv_declare_cpu(void *data)
err = "Paging or storage error";
break;
}
- pr_warning("Defining an interrupt buffer on CPU %i"
- " failed with 0x%02x (%s)\n", cpu, rc, err);
+ pr_warn("Defining an interrupt buffer on CPU %i failed with 0x%02x (%s)\n",
+ cpu, rc, err);
return;
}
@@ -534,8 +531,8 @@ static void iucv_retrieve_cpu(void *data)
cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
}
-/**
- * iucv_setmask_smp
+/*
+ * iucv_setmask_mp
*
* Allow iucv interrupts on all cpus.
*/
@@ -543,24 +540,24 @@ static void iucv_setmask_mp(void)
{
int cpu;
- get_online_cpus();
+ cpus_read_lock();
for_each_online_cpu(cpu)
/* Enable all cpus with a declared buffer. */
if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
!cpumask_test_cpu(cpu, &iucv_irq_cpumask))
smp_call_function_single(cpu, iucv_allow_cpu,
NULL, 1);
- put_online_cpus();
+ cpus_read_unlock();
}
-/**
+/*
* iucv_setmask_up
*
* Allow iucv interrupts on a single cpu.
*/
static void iucv_setmask_up(void)
{
- cpumask_t cpumask;
+ static cpumask_t cpumask;
int cpu;
/* Disable all cpu but the first in cpu_irq_cpumask. */
@@ -570,7 +567,7 @@ static void iucv_setmask_up(void)
smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
}
-/**
+/*
* iucv_enable
*
* This function makes iucv ready for use. It allocates the pathid
@@ -583,9 +580,9 @@ static int iucv_enable(void)
size_t alloc_size;
int cpu, rc;
- get_online_cpus();
+ cpus_read_lock();
rc = -ENOMEM;
- alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
+ alloc_size = iucv_max_pathid * sizeof(*iucv_path_table);
iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
if (!iucv_path_table)
goto out;
@@ -596,16 +593,16 @@ static int iucv_enable(void)
if (cpumask_empty(&iucv_buffer_cpumask))
/* No cpu could declare an iucv buffer. */
goto out;
- put_online_cpus();
+ cpus_read_unlock();
return 0;
out:
kfree(iucv_path_table);
iucv_path_table = NULL;
- put_online_cpus();
+ cpus_read_unlock();
return rc;
}
-/**
+/*
* iucv_disable
*
* This function shuts down iucv. It disables iucv interrupts, retrieves
@@ -614,85 +611,88 @@ out:
*/
static void iucv_disable(void)
{
- get_online_cpus();
+ cpus_read_lock();
on_each_cpu(iucv_retrieve_cpu, NULL, 1);
kfree(iucv_path_table);
iucv_path_table = NULL;
- put_online_cpus();
+ cpus_read_unlock();
}
-static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
- unsigned long action, void *hcpu)
+static int iucv_cpu_dead(unsigned int cpu)
{
- cpumask_t cpumask;
- long cpu = (long) hcpu;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_irq_data[cpu])
- return notifier_from_errno(-ENOMEM);
-
- iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param[cpu]) {
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- return notifier_from_errno(-ENOMEM);
- }
- iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param_irq[cpu]) {
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- return notifier_from_errno(-ENOMEM);
- }
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- kfree(iucv_param_irq[cpu]);
- iucv_param_irq[cpu] = NULL;
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- break;
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- if (!iucv_path_table)
- break;
- smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
- break;
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- if (!iucv_path_table)
- break;
- cpumask_copy(&cpumask, &iucv_buffer_cpumask);
- cpumask_clear_cpu(cpu, &cpumask);
- if (cpumask_empty(&cpumask))
- /* Can't offline last IUCV enabled cpu. */
- return notifier_from_errno(-EINVAL);
- smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
- if (cpumask_empty(&iucv_irq_cpumask))
- smp_call_function_single(
- cpumask_first(&iucv_buffer_cpumask),
- iucv_allow_cpu, NULL, 1);
- break;
- }
- return NOTIFY_OK;
+ kfree(iucv_param_irq[cpu]);
+ iucv_param_irq[cpu] = NULL;
+ kfree(iucv_param[cpu]);
+ iucv_param[cpu] = NULL;
+ kfree(iucv_irq_data[cpu]);
+ iucv_irq_data[cpu] = NULL;
+ return 0;
}
-static struct notifier_block __refdata iucv_cpu_notifier = {
- .notifier_call = iucv_cpu_notify,
-};
+static int iucv_cpu_prepare(unsigned int cpu)
+{
+ /* Note: GFP_DMA used to get memory below 2G */
+ iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_irq_data[cpu])
+ goto out_free;
+
+ /* Allocate parameter blocks. */
+ iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param[cpu])
+ goto out_free;
+
+ iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
+ if (!iucv_param_irq[cpu])
+ goto out_free;
+
+ return 0;
+
+out_free:
+ iucv_cpu_dead(cpu);
+ return -ENOMEM;
+}
+
+static int iucv_cpu_online(unsigned int cpu)
+{
+ if (!iucv_path_table)
+ return 0;
+ iucv_declare_cpu(NULL);
+ return 0;
+}
+
+static int iucv_cpu_down_prep(unsigned int cpu)
+{
+ cpumask_var_t cpumask;
+ int ret = 0;
+
+ if (!iucv_path_table)
+ return 0;
+
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_copy(cpumask, &iucv_buffer_cpumask);
+ cpumask_clear_cpu(cpu, cpumask);
+ if (cpumask_empty(cpumask)) {
+ /* Can't offline last IUCV enabled cpu. */
+ ret = -EINVAL;
+ goto __free_cpumask;
+ }
+
+ iucv_retrieve_cpu(NULL);
+ if (!cpumask_empty(&iucv_irq_cpumask))
+ goto __free_cpumask;
+
+ smp_call_function_single(cpumask_first(&iucv_buffer_cpumask),
+ iucv_allow_cpu, NULL, 1);
+
+__free_cpumask:
+ free_cpumask_var(cpumask);
+ return ret;
+}
/**
* iucv_sever_pathid
@@ -701,7 +701,7 @@ static struct notifier_block __refdata iucv_cpu_notifier = {
*
* Sever an iucv path to free up the pathid. Used internally.
*/
-static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
+static int iucv_sever_pathid(u16 pathid, u8 *userdata)
{
union iucv_param *parm;
@@ -834,7 +834,7 @@ static int iucv_reboot_event(struct notifier_block *this,
if (cpumask_empty(&iucv_irq_cpumask))
return NOTIFY_DONE;
- get_online_cpus();
+ cpus_read_lock();
on_each_cpu_mask(&iucv_irq_cpumask, iucv_block_cpu, NULL, 1);
preempt_disable();
for (i = 0; i < iucv_max_pathid; i++) {
@@ -842,7 +842,7 @@ static int iucv_reboot_event(struct notifier_block *this,
iucv_sever_pathid(i, NULL);
}
preempt_enable();
- put_online_cpus();
+ cpus_read_unlock();
iucv_disable();
return NOTIFY_DONE;
}
@@ -864,7 +864,7 @@ static struct notifier_block iucv_reboot_notifier = {
* Returns the result of the CP IUCV call.
*/
int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
- u8 userdata[16], void *private)
+ u8 *userdata, void *private)
{
union iucv_param *parm;
int rc;
@@ -911,7 +911,7 @@ EXPORT_SYMBOL(iucv_path_accept);
* Returns the result of the CP IUCV call.
*/
int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
- u8 userid[8], u8 system[8], u8 userdata[16],
+ u8 *userid, u8 *system, u8 *userdata,
void *private)
{
union iucv_param *parm;
@@ -973,7 +973,7 @@ EXPORT_SYMBOL(iucv_path_connect);
*
* Returns the result from the CP IUCV call.
*/
-int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
+int iucv_path_quiesce(struct iucv_path *path, u8 *userdata)
{
union iucv_param *parm;
int rc;
@@ -1005,7 +1005,7 @@ EXPORT_SYMBOL(iucv_path_quiesce);
*
* Returns the result from the CP IUCV call.
*/
-int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
+int iucv_path_resume(struct iucv_path *path, u8 *userdata)
{
union iucv_param *parm;
int rc;
@@ -1035,7 +1035,7 @@ out:
*
* Returns the result from the CP IUCV call.
*/
-int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
+int iucv_path_sever(struct iucv_path *path, u8 *userdata)
{
int rc;
@@ -1128,8 +1128,7 @@ static int iucv_message_receive_iprmdata(struct iucv_path *path,
size = (size < 8) ? size : 8;
for (array = buffer; size > 0; array++) {
copy = min_t(size_t, size, array->length);
- memcpy((u8 *)(addr_t) array->address,
- rmmsg, copy);
+ memcpy(dma32_to_virt(array->address), rmmsg, copy);
rmmsg += copy;
size -= copy;
}
@@ -1166,13 +1165,12 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
- if (cpumask_empty(&iucv_buffer_cpumask)) {
- rc = -EIO;
- goto out;
- }
+ if (cpumask_empty(&iucv_buffer_cpumask))
+ return -EIO;
+
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = virt_to_dma32(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ipmsgid = msg->id;
parm->db.ippathid = path->pathid;
@@ -1185,7 +1183,6 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (residual)
*residual = parm->db.ipbfln1f;
}
-out:
return rc;
}
EXPORT_SYMBOL(__iucv_message_receive);
@@ -1291,7 +1288,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) reply;
+ parm->db.ipbfadr1 = virt_to_dma32(reply);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags;
@@ -1343,7 +1340,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.ipmsgtag = msg->tag;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = virt_to_dma32(buffer);
parm->db.ipbfln1f = (u32) size;
parm->db.ippathid = path->pathid;
parm->db.ipflags1 = flags | IUCV_IPNORPY;
@@ -1397,8 +1394,9 @@ EXPORT_SYMBOL(iucv_message_send);
* @srccls: source class of message
* @buffer: address of send buffer or address of struct iucv_array
* @size: length of send buffer
- * @ansbuf: address of answer buffer or address of struct iucv_array
+ * @answer: address of answer buffer or address of struct iucv_array
* @asize: size of reply buffer
+ * @residual: ignored
*
* This function transmits data to another application. Data to be
* transmitted is in a buffer. The receiver of the send is expected to
@@ -1427,7 +1425,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->dpl.iptrgcls = msg->class;
parm->dpl.ipsrccls = srccls;
parm->dpl.ipmsgtag = msg->tag;
- parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
+ parm->dpl.ipbfadr2 = virt_to_dma32(answer);
parm->dpl.ipbfln2f = (u32) asize;
memcpy(parm->dpl.iprmmsg, buffer, 8);
} else {
@@ -1436,9 +1434,9 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
parm->db.iptrgcls = msg->class;
parm->db.ipsrccls = srccls;
parm->db.ipmsgtag = msg->tag;
- parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfadr1 = virt_to_dma32(buffer);
parm->db.ipbfln1f = (u32) size;
- parm->db.ipbfadr2 = (u32)(addr_t) answer;
+ parm->db.ipbfadr2 = virt_to_dma32(answer);
parm->db.ipbfln2f = (u32) asize;
}
rc = iucv_call_b2f0(IUCV_SEND, parm);
@@ -1450,13 +1448,6 @@ out:
}
EXPORT_SYMBOL(iucv_message_send2way);
-/**
- * iucv_path_pending
- * @data: Pointer to external interrupt buffer
- *
- * Process connection pending work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_pending {
u16 ippathid;
u8 ipflags1;
@@ -1470,6 +1461,13 @@ struct iucv_path_pending {
u8 res4[3];
} __packed;
+/**
+ * iucv_path_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_pending(struct iucv_irq_data *data)
{
struct iucv_path_pending *ipp = (void *) data;
@@ -1511,13 +1509,6 @@ out_sever:
iucv_sever_pathid(ipp->ippathid, error);
}
-/**
- * iucv_path_complete
- * @data: Pointer to external interrupt buffer
- *
- * Process connection complete work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_complete {
u16 ippathid;
u8 ipflags1;
@@ -1531,6 +1522,13 @@ struct iucv_path_complete {
u8 res4[3];
} __packed;
+/**
+ * iucv_path_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_complete(struct iucv_irq_data *data)
{
struct iucv_path_complete *ipc = (void *) data;
@@ -1542,13 +1540,6 @@ static void iucv_path_complete(struct iucv_irq_data *data)
path->handler->path_complete(path, ipc->ipuser);
}
-/**
- * iucv_path_severed
- * @data: Pointer to external interrupt buffer
- *
- * Process connection severed work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_severed {
u16 ippathid;
u8 res1;
@@ -1561,6 +1552,13 @@ struct iucv_path_severed {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_severed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection severed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_severed(struct iucv_irq_data *data)
{
struct iucv_path_severed *ips = (void *) data;
@@ -1578,13 +1576,6 @@ static void iucv_path_severed(struct iucv_irq_data *data)
}
}
-/**
- * iucv_path_quiesced
- * @data: Pointer to external interrupt buffer
- *
- * Process connection quiesced work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_quiesced {
u16 ippathid;
u8 res1;
@@ -1597,6 +1588,13 @@ struct iucv_path_quiesced {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_quiesced
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection quiesced work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_quiesced(struct iucv_irq_data *data)
{
struct iucv_path_quiesced *ipq = (void *) data;
@@ -1606,13 +1604,6 @@ static void iucv_path_quiesced(struct iucv_irq_data *data)
path->handler->path_quiesced(path, ipq->ipuser);
}
-/**
- * iucv_path_resumed
- * @data: Pointer to external interrupt buffer
- *
- * Process connection resumed work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_path_resumed {
u16 ippathid;
u8 res1;
@@ -1625,6 +1616,13 @@ struct iucv_path_resumed {
u8 res5[3];
} __packed;
+/**
+ * iucv_path_resumed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection resumed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_path_resumed(struct iucv_irq_data *data)
{
struct iucv_path_resumed *ipr = (void *) data;
@@ -1634,13 +1632,6 @@ static void iucv_path_resumed(struct iucv_irq_data *data)
path->handler->path_resumed(path, ipr->ipuser);
}
-/**
- * iucv_message_complete
- * @data: Pointer to external interrupt buffer
- *
- * Process message complete work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_message_complete {
u16 ippathid;
u8 ipflags1;
@@ -1656,6 +1647,13 @@ struct iucv_message_complete {
u8 res2[3];
} __packed;
+/**
+ * iucv_message_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_message_complete(struct iucv_irq_data *data)
{
struct iucv_message_complete *imc = (void *) data;
@@ -1674,33 +1672,35 @@ static void iucv_message_complete(struct iucv_irq_data *data)
}
}
-/**
- * iucv_message_pending
- * @data: Pointer to external interrupt buffer
- *
- * Process message pending work item. Called from tasklet while holding
- * iucv_table_lock.
- */
struct iucv_message_pending {
u16 ippathid;
u8 ipflags1;
u8 iptype;
u32 ipmsgid;
u32 iptrgcls;
- union {
- u32 iprmmsg1_u32;
- u8 iprmmsg1[4];
- } ln1msg1;
- union {
- u32 ipbfln1f;
- u8 iprmmsg2[4];
- } ln1msg2;
+ struct {
+ union {
+ u32 iprmmsg1_u32;
+ u8 iprmmsg1[4];
+ } ln1msg1;
+ union {
+ u32 ipbfln1f;
+ u8 iprmmsg2[4];
+ } ln1msg2;
+ } rmmsg;
u32 res1[3];
u32 ipbfln2f;
u8 ippollfg;
u8 res2[3];
} __packed;
+/**
+ * iucv_message_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
static void iucv_message_pending(struct iucv_irq_data *data)
{
struct iucv_message_pending *imp = (void *) data;
@@ -1712,16 +1712,16 @@ static void iucv_message_pending(struct iucv_irq_data *data)
msg.id = imp->ipmsgid;
msg.class = imp->iptrgcls;
if (imp->ipflags1 & IUCV_IPRMDATA) {
- memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
+ memcpy(msg.rmmsg, &imp->rmmsg, 8);
msg.length = 8;
} else
- msg.length = imp->ln1msg2.ipbfln1f;
+ msg.length = imp->rmmsg.ln1msg2.ipbfln1f;
msg.reply_size = imp->ipbfln2f;
path->handler->message_pending(path, &msg);
}
}
-/**
+/*
* iucv_tasklet_fn:
*
* This tasklet loops over the queue of irq buffers created by
@@ -1765,7 +1765,7 @@ static void iucv_tasklet_fn(unsigned long ignored)
spin_unlock(&iucv_table_lock);
}
-/**
+/*
* iucv_work_fn:
*
* This work function loops over the queue of path pending irq blocks
@@ -1796,9 +1796,8 @@ static void iucv_work_fn(struct work_struct *work)
spin_unlock_bh(&iucv_table_lock);
}
-/**
+/*
* iucv_external_interrupt
- * @code: irq code
*
* Handles external interrupts coming in from CP.
* Places the interrupt buffer on a queue and schedules iucv_tasklet_fn().
@@ -1819,7 +1818,7 @@ static void iucv_external_interrupt(struct ext_code ext_code,
BUG_ON(p->iptype < 0x01 || p->iptype > 0x09);
work = kmalloc(sizeof(struct iucv_irq_list), GFP_ATOMIC);
if (!work) {
- pr_warning("iucv_external_interrupt: out of memory\n");
+ pr_warn("iucv_external_interrupt: out of memory\n");
return;
}
memcpy(&work->data, p, sizeof(work->data));
@@ -1836,147 +1835,6 @@ static void iucv_external_interrupt(struct ext_code ext_code,
spin_unlock(&iucv_queue_lock);
}
-static int iucv_pm_prepare(struct device *dev)
-{
- int rc = 0;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_INFO "iucv_pm_prepare\n");
-#endif
- if (dev->driver && dev->driver->pm && dev->driver->pm->prepare)
- rc = dev->driver->pm->prepare(dev);
- return rc;
-}
-
-static void iucv_pm_complete(struct device *dev)
-{
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_INFO "iucv_pm_complete\n");
-#endif
- if (dev->driver && dev->driver->pm && dev->driver->pm->complete)
- dev->driver->pm->complete(dev);
-}
-
-/**
- * iucv_path_table_empty() - determine if iucv path table is empty
- *
- * Returns 0 if there are still iucv pathes defined
- * 1 if there are no iucv pathes defined
- */
-int iucv_path_table_empty(void)
-{
- int i;
-
- for (i = 0; i < iucv_max_pathid; i++) {
- if (iucv_path_table[i])
- return 0;
- }
- return 1;
-}
-
-/**
- * iucv_pm_freeze() - Freeze PM callback
- * @dev: iucv-based device
- *
- * disable iucv interrupts
- * invoke callback function of the iucv-based driver
- * shut down iucv, if no iucv-pathes are established anymore
- */
-static int iucv_pm_freeze(struct device *dev)
-{
- int cpu;
- struct iucv_irq_list *p, *n;
- int rc = 0;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "iucv_pm_freeze\n");
-#endif
- if (iucv_pm_state != IUCV_PM_FREEZING) {
- for_each_cpu(cpu, &iucv_irq_cpumask)
- smp_call_function_single(cpu, iucv_block_cpu_almost,
- NULL, 1);
- cancel_work_sync(&iucv_work);
- list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
- list_del_init(&p->list);
- iucv_sever_pathid(p->data.ippathid,
- iucv_error_no_listener);
- kfree(p);
- }
- }
- iucv_pm_state = IUCV_PM_FREEZING;
- if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
- rc = dev->driver->pm->freeze(dev);
- if (iucv_path_table_empty())
- iucv_disable();
- return rc;
-}
-
-/**
- * iucv_pm_thaw() - Thaw PM callback
- * @dev: iucv-based device
- *
- * make iucv ready for use again: allocate path table, declare interrupt buffers
- * and enable iucv interrupts
- * invoke callback function of the iucv-based driver
- */
-static int iucv_pm_thaw(struct device *dev)
-{
- int rc = 0;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "iucv_pm_thaw\n");
-#endif
- iucv_pm_state = IUCV_PM_THAWING;
- if (!iucv_path_table) {
- rc = iucv_enable();
- if (rc)
- goto out;
- }
- if (cpumask_empty(&iucv_irq_cpumask)) {
- if (iucv_nonsmp_handler)
- /* enable interrupts on one cpu */
- iucv_allow_cpu(NULL);
- else
- /* enable interrupts on all cpus */
- iucv_setmask_mp();
- }
- if (dev->driver && dev->driver->pm && dev->driver->pm->thaw)
- rc = dev->driver->pm->thaw(dev);
-out:
- return rc;
-}
-
-/**
- * iucv_pm_restore() - Restore PM callback
- * @dev: iucv-based device
- *
- * make iucv ready for use again: allocate path table, declare interrupt buffers
- * and enable iucv interrupts
- * invoke callback function of the iucv-based driver
- */
-static int iucv_pm_restore(struct device *dev)
-{
- int rc = 0;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
-#endif
- if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table)
- pr_warning("Suspending Linux did not completely close all IUCV "
- "connections\n");
- iucv_pm_state = IUCV_PM_RESTORING;
- if (cpumask_empty(&iucv_irq_cpumask)) {
- rc = iucv_query_maxconn();
- rc = iucv_enable();
- if (rc)
- goto out;
- }
- if (dev->driver && dev->driver->pm && dev->driver->pm->restore)
- rc = dev->driver->pm->restore(dev);
-out:
- return rc;
-}
-
struct iucv_interface iucv_if = {
.message_receive = iucv_message_receive,
.__message_receive = __iucv_message_receive,
@@ -1998,6 +1856,7 @@ struct iucv_interface iucv_if = {
};
EXPORT_SYMBOL(iucv_if);
+static enum cpuhp_state iucv_online;
/**
* iucv_init
*
@@ -2006,17 +1865,16 @@ EXPORT_SYMBOL(iucv_if);
static int __init iucv_init(void)
{
int rc;
- int cpu;
- if (!MACHINE_IS_VM) {
+ if (!machine_is_vm()) {
rc = -EPROTONOSUPPORT;
goto out;
}
- ctl_set_bit(0, 1);
+ system_ctl_set_bit(0, CR0_IUCV_BIT);
rc = iucv_query_maxconn();
if (rc)
goto out_ctl;
- rc = register_external_interrupt(0x4000, iucv_external_interrupt);
+ rc = register_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
if (rc)
goto out_ctl;
iucv_root = root_device_register("iucv");
@@ -2025,36 +1883,19 @@ static int __init iucv_init(void)
goto out_int;
}
- for_each_online_cpu(cpu) {
- /* Note: GFP_DMA used to get memory below 2G */
- iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_irq_data[cpu]) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- /* Allocate parameter blocks. */
- iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param[cpu]) {
- rc = -ENOMEM;
- goto out_free;
- }
- iucv_param_irq[cpu] = kmalloc_node(sizeof(union iucv_param),
- GFP_KERNEL|GFP_DMA, cpu_to_node(cpu));
- if (!iucv_param_irq[cpu]) {
- rc = -ENOMEM;
- goto out_free;
- }
-
- }
- rc = register_hotcpu_notifier(&iucv_cpu_notifier);
+ rc = cpuhp_setup_state(CPUHP_NET_IUCV_PREPARE, "net/iucv:prepare",
+ iucv_cpu_prepare, iucv_cpu_dead);
if (rc)
- goto out_free;
+ goto out_dev;
+ rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "net/iucv:online",
+ iucv_cpu_online, iucv_cpu_down_prep);
+ if (rc < 0)
+ goto out_prep;
+ iucv_online = rc;
+
rc = register_reboot_notifier(&iucv_reboot_notifier);
if (rc)
- goto out_cpu;
+ goto out_remove_hp;
ASCEBC(iucv_error_no_listener, 16);
ASCEBC(iucv_error_no_memory, 16);
ASCEBC(iucv_error_pathid, 16);
@@ -2068,22 +1909,16 @@ static int __init iucv_init(void)
out_reboot:
unregister_reboot_notifier(&iucv_reboot_notifier);
-out_cpu:
- unregister_hotcpu_notifier(&iucv_cpu_notifier);
-out_free:
- for_each_possible_cpu(cpu) {
- kfree(iucv_param_irq[cpu]);
- iucv_param_irq[cpu] = NULL;
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- }
+out_remove_hp:
+ cpuhp_remove_state(iucv_online);
+out_prep:
+ cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
+out_dev:
root_device_unregister(iucv_root);
out_int:
- unregister_external_interrupt(0x4000, iucv_external_interrupt);
+ unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
out_ctl:
- ctl_clear_bit(0, 1);
+ system_ctl_clear_bit(0, 1);
out:
return rc;
}
@@ -2096,7 +1931,6 @@ out:
static void __exit iucv_exit(void)
{
struct iucv_irq_list *p, *n;
- int cpu;
spin_lock_irq(&iucv_queue_lock);
list_for_each_entry_safe(p, n, &iucv_task_queue, list)
@@ -2105,23 +1939,17 @@ static void __exit iucv_exit(void)
kfree(p);
spin_unlock_irq(&iucv_queue_lock);
unregister_reboot_notifier(&iucv_reboot_notifier);
- unregister_hotcpu_notifier(&iucv_cpu_notifier);
- for_each_possible_cpu(cpu) {
- kfree(iucv_param_irq[cpu]);
- iucv_param_irq[cpu] = NULL;
- kfree(iucv_param[cpu]);
- iucv_param[cpu] = NULL;
- kfree(iucv_irq_data[cpu]);
- iucv_irq_data[cpu] = NULL;
- }
+
+ cpuhp_remove_state_nocalls(iucv_online);
+ cpuhp_remove_state(CPUHP_NET_IUCV_PREPARE);
root_device_unregister(iucv_root);
bus_unregister(&iucv_bus);
- unregister_external_interrupt(0x4000, iucv_external_interrupt);
+ unregister_external_irq(EXT_IRQ_IUCV, iucv_external_interrupt);
}
subsys_initcall(iucv_init);
module_exit(iucv_exit);
-MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert <felfert@millenux.com>");
MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
MODULE_LICENSE("GPL");