summaryrefslogtreecommitdiff
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c650
1 files changed, 244 insertions, 406 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 78ea5a739d10..1e62fbc22cb7 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* IUCV protocol stack for Linux on zSeries
*
@@ -9,11 +10,13 @@
* Ursula Braun <ursula.braun@de.ibm.com>
*/
-#define KMSG_COMPONENT "af_iucv"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "af_iucv: " fmt
+#include <linux/filter.h>
#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/types.h>
+#include <linux/limits.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/kernel.h>
@@ -24,6 +27,7 @@
#include <linux/poll.h>
#include <linux/security.h>
#include <net/sock.h>
+#include <asm/machine.h>
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <linux/kmod.h>
@@ -34,8 +38,6 @@
static char iucv_userid[80];
-static const struct proto_ops iucv_sock_ops;
-
static struct proto iucv_proto = {
.name = "AF_IUCV",
.owner = THIS_MODULE,
@@ -43,12 +45,13 @@ static struct proto iucv_proto = {
};
static struct iucv_interface *pr_iucv;
+static struct iucv_handler af_iucv_handler;
/* special AF_IUCV IPRM messages */
static const u8 iprm_shutdown[8] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
-#define TRGCLS_SIZE FIELD_SIZEOF(struct iucv_message, class)
+#define TRGCLS_SIZE sizeof_field(struct iucv_message, class)
#define __iucv_sock_wait(sk, condition, timeo, ret) \
do { \
@@ -83,38 +86,18 @@ do { \
__ret; \
})
+static struct sock *iucv_accept_dequeue(struct sock *parent,
+ struct socket *newsock);
static void iucv_sock_kill(struct sock *sk);
static void iucv_sock_close(struct sock *sk);
-static void iucv_sever_path(struct sock *, int);
-static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
- struct packet_type *pt, struct net_device *orig_dev);
-static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
- struct sk_buff *skb, u8 flags);
-static void afiucv_hs_callback_txnotify(struct sk_buff *, enum iucv_tx_notify);
-
-/* Call Back functions */
-static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
-static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
-static void iucv_callback_connack(struct iucv_path *, u8 *);
-static int iucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
-static void iucv_callback_connrej(struct iucv_path *, u8 *);
-static void iucv_callback_shutdown(struct iucv_path *, u8 *);
+static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
static struct iucv_sock_list iucv_sk_list = {
.lock = __RW_LOCK_UNLOCKED(iucv_sk_list.lock),
.autobind_name = ATOMIC_INIT(0)
};
-static struct iucv_handler af_iucv_handler = {
- .path_pending = iucv_callback_connreq,
- .path_complete = iucv_callback_connack,
- .path_severed = iucv_callback_connrej,
- .message_pending = iucv_callback_rx,
- .message_complete = iucv_callback_txdone,
- .path_quiesced = iucv_callback_shutdown,
-};
-
static inline void high_nmcpy(unsigned char *dst, char *src)
{
memcpy(dst, src, 8);
@@ -125,110 +108,6 @@ static inline void low_nmcpy(unsigned char *dst, char *src)
memcpy(&dst[8], src, 8);
}
-static int afiucv_pm_prepare(struct device *dev)
-{
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "afiucv_pm_prepare\n");
-#endif
- return 0;
-}
-
-static void afiucv_pm_complete(struct device *dev)
-{
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "afiucv_pm_complete\n");
-#endif
-}
-
-/**
- * afiucv_pm_freeze() - Freeze PM callback
- * @dev: AFIUCV dummy device
- *
- * Sever all established IUCV communication pathes
- */
-static int afiucv_pm_freeze(struct device *dev)
-{
- struct iucv_sock *iucv;
- struct sock *sk;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "afiucv_pm_freeze\n");
-#endif
- read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, &iucv_sk_list.head) {
- iucv = iucv_sk(sk);
- switch (sk->sk_state) {
- case IUCV_DISCONN:
- case IUCV_CLOSING:
- case IUCV_CONNECTED:
- iucv_sever_path(sk, 0);
- break;
- case IUCV_OPEN:
- case IUCV_BOUND:
- case IUCV_LISTEN:
- case IUCV_CLOSED:
- default:
- break;
- }
- skb_queue_purge(&iucv->send_skb_q);
- skb_queue_purge(&iucv->backlog_skb_q);
- }
- read_unlock(&iucv_sk_list.lock);
- return 0;
-}
-
-/**
- * afiucv_pm_restore_thaw() - Thaw and restore PM callback
- * @dev: AFIUCV dummy device
- *
- * socket clean up after freeze
- */
-static int afiucv_pm_restore_thaw(struct device *dev)
-{
- struct sock *sk;
-
-#ifdef CONFIG_PM_DEBUG
- printk(KERN_WARNING "afiucv_pm_restore_thaw\n");
-#endif
- read_lock(&iucv_sk_list.lock);
- sk_for_each(sk, &iucv_sk_list.head) {
- switch (sk->sk_state) {
- case IUCV_CONNECTED:
- sk->sk_err = EPIPE;
- sk->sk_state = IUCV_DISCONN;
- sk->sk_state_change(sk);
- break;
- case IUCV_DISCONN:
- case IUCV_CLOSING:
- case IUCV_LISTEN:
- case IUCV_BOUND:
- case IUCV_OPEN:
- default:
- break;
- }
- }
- read_unlock(&iucv_sk_list.lock);
- return 0;
-}
-
-static const struct dev_pm_ops afiucv_pm_ops = {
- .prepare = afiucv_pm_prepare,
- .complete = afiucv_pm_complete,
- .freeze = afiucv_pm_freeze,
- .thaw = afiucv_pm_restore_thaw,
- .restore = afiucv_pm_restore_thaw,
-};
-
-static struct device_driver af_iucv_driver = {
- .owner = THIS_MODULE,
- .name = "afiucv",
- .bus = NULL,
- .pm = &afiucv_pm_ops,
-};
-
-/* dummy device used as trigger for PM functions */
-static struct device *af_iucv_dev;
-
/**
* iucv_msg_length() - Returns the length of an iucv message.
* @msg: Pointer to struct iucv_message, MUST NOT be NULL
@@ -264,7 +143,7 @@ static inline size_t iucv_msg_length(struct iucv_message *msg)
* iucv_sock_in_state() - check for specific states
* @sk: sock structure
* @state: first iucv sk state
- * @state: second iucv sk state
+ * @state2: second iucv sk state
*
* Returns true if the socket in either in the first or second state.
*/
@@ -288,13 +167,13 @@ static inline int iucv_below_msglim(struct sock *sk)
if (sk->sk_state != IUCV_CONNECTED)
return 1;
if (iucv->transport == AF_IUCV_TRANS_IUCV)
- return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
+ return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim);
else
return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
(atomic_read(&iucv->pendings) <= 0));
}
-/**
+/*
* iucv_sock_wake_msglim() - Wake up thread waiting on msg limit
*/
static void iucv_sock_wake_msglim(struct sock *sk)
@@ -305,11 +184,11 @@ static void iucv_sock_wake_msglim(struct sock *sk)
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
-/**
+/*
* afiucv_hs_send() - send a message through HiperSockets transport
*/
static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
@@ -317,7 +196,6 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
{
struct iucv_sock *iucv = iucv_sk(sock);
struct af_iucv_trans_hdr *phs_hdr;
- struct sk_buff *nskb;
int err, confirm_recv = 0;
phs_hdr = skb_push(skb, sizeof(*phs_hdr));
@@ -346,14 +224,14 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
if (imsg)
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
- skb_push(skb, ETH_HLEN);
- memset(skb->data, 0, ETH_HLEN);
-
skb->dev = iucv->hs_dev;
if (!skb->dev) {
err = -ENODEV;
goto err_free;
}
+
+ dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
+
if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
err = -ENETDOWN;
goto err_free;
@@ -363,20 +241,16 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
err = -EMSGSIZE;
goto err_free;
}
- skb_trim(skb, skb->dev->mtu);
+ err = pskb_trim(skb, skb->dev->mtu);
+ if (err)
+ goto err_free;
}
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
- nskb = skb_clone(skb, GFP_ATOMIC);
- if (!nskb) {
- err = -ENOMEM;
- goto err_free;
- }
- skb_queue_tail(&iucv->send_skb_q, nskb);
+ atomic_inc(&iucv->skbs_in_xmit);
err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) {
- skb_unlink(nskb, &iucv->send_skb_q);
- kfree_skb(nskb);
+ atomic_dec(&iucv->skbs_in_xmit);
} else {
atomic_sub(confirm_recv, &iucv->msg_recv);
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
@@ -404,8 +278,6 @@ static void iucv_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_error_queue);
- sk_mem_reclaim(sk);
-
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive iucv socket %p\n", sk);
return;
@@ -431,6 +303,20 @@ static void iucv_sock_cleanup_listen(struct sock *parent)
parent->sk_state = IUCV_CLOSED;
}
+static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
+{
+ write_lock_bh(&l->lock);
+ sk_add_node(sk, &l->head);
+ write_unlock_bh(&l->lock);
+}
+
+static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
+{
+ write_lock_bh(&l->lock);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l->lock);
+}
+
/* Kill socket (only if zapped and orphaned) */
static void iucv_sock_kill(struct sock *sk)
{
@@ -449,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
- if (iucv->path) {
- iucv->path = NULL;
+ /* Whoever resets the path pointer, must sever and free it. */
+ if (xchg(&iucv->path, NULL)) {
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
@@ -465,12 +351,14 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
/* Send controlling flags through an IUCV socket for HIPER transport */
static int iucv_send_ctrl(struct sock *sk, u8 flags)
{
+ struct iucv_sock *iucv = iucv_sk(sk);
int err = 0;
int blen;
struct sk_buff *skb;
u8 shutdown = 0;
- blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
+ blen = sizeof(struct af_iucv_trans_hdr) +
+ LL_RESERVED_SPACE(iucv->hs_dev);
if (sk->sk_shutdown & SEND_SHUTDOWN) {
/* controlling flags should be sent anyway */
shutdown = sk->sk_shutdown;
@@ -506,11 +394,13 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
- case IUCV_DISCONN: /* fall through */
+ fallthrough;
+
+ case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
- if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
+ if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) {
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
timeo = sk->sk_lingertime;
else
@@ -519,8 +409,9 @@ static void iucv_sock_close(struct sock *sk)
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
timeo);
}
+ fallthrough;
- case IUCV_CLOSING: /* fall through */
+ case IUCV_CLOSING:
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
@@ -529,8 +420,9 @@ static void iucv_sock_close(struct sock *sk)
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
+ fallthrough;
- default: /* fall through */
+ default:
iucv_sever_path(sk, 1);
}
@@ -575,11 +467,12 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
atomic_set(&iucv->pendings, 0);
iucv->flags = 0;
iucv->msglimit = 0;
+ atomic_set(&iucv->skbs_in_xmit, 0);
atomic_set(&iucv->msg_sent, 0);
atomic_set(&iucv->msg_recv, 0);
iucv->path = NULL;
iucv->sk_txnotify = afiucv_hs_callback_txnotify;
- memset(&iucv->src_user_id , 0, 32);
+ memset(&iucv->init, 0, sizeof(iucv->init));
if (pr_iucv)
iucv->transport = AF_IUCV_TRANS_IUCV;
else
@@ -587,7 +480,6 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
- sk->sk_allocation = GFP_DMA;
sock_reset_flag(sk, SOCK_ZAPPED);
@@ -598,53 +490,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio,
return sk;
}
-/* Create an IUCV socket */
-static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
- int kern)
-{
- struct sock *sk;
-
- if (protocol && protocol != PF_IUCV)
- return -EPROTONOSUPPORT;
-
- sock->state = SS_UNCONNECTED;
-
- switch (sock->type) {
- case SOCK_STREAM:
- sock->ops = &iucv_sock_ops;
- break;
- case SOCK_SEQPACKET:
- /* currently, proto ops can handle both sk types */
- sock->ops = &iucv_sock_ops;
- break;
- default:
- return -ESOCKTNOSUPPORT;
- }
-
- sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
- if (!sk)
- return -ENOMEM;
-
- iucv_sock_init(sk, NULL);
-
- return 0;
-}
-
-void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
-{
- write_lock_bh(&l->lock);
- sk_add_node(sk, &l->head);
- write_unlock_bh(&l->lock);
-}
-
-void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
-{
- write_lock_bh(&l->lock);
- sk_del_node_init(sk);
- write_unlock_bh(&l->lock);
-}
-
-void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
+static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(parent);
@@ -657,7 +503,7 @@ void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
sk_acceptq_added(parent);
}
-void iucv_accept_unlink(struct sock *sk)
+static void iucv_accept_unlink(struct sock *sk)
{
unsigned long flags;
struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
@@ -670,7 +516,8 @@ void iucv_accept_unlink(struct sock *sk)
sock_put(sk);
}
-struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
+static struct sock *iucv_accept_dequeue(struct sock *parent,
+ struct socket *newsock)
{
struct iucv_sock *isk, *n;
struct sock *sk;
@@ -705,24 +552,25 @@ static void __iucv_auto_name(struct iucv_sock *iucv)
{
char name[12];
- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
+ scnprintf(name, sizeof(name),
+ "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
while (__iucv_get_sock_by_name(name)) {
- sprintf(name, "%08x",
- atomic_inc_return(&iucv_sk_list.autobind_name));
+ scnprintf(name, sizeof(name), "%08x",
+ atomic_inc_return(&iucv_sk_list.autobind_name));
}
memcpy(iucv->src_name, name, 8);
}
/* Bind an unbound socket */
-static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+static int iucv_sock_bind(struct socket *sock, struct sockaddr_unsized *addr,
int addr_len)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
+ char uid[sizeof(sa->siucv_user_id)];
struct sock *sk = sock->sk;
struct iucv_sock *iucv;
int err = 0;
struct net_device *dev;
- char uid[9];
/* Verify the input sockaddr */
if (addr_len < sizeof(struct sockaddr_iucv) ||
@@ -757,7 +605,7 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
for_each_netdev_rcu(&init_net, dev) {
if (!memcmp(dev->perm_addr, uid, 8)) {
memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
- /* Check for unitialized siucv_name */
+ /* Check for uninitialized siucv_name */
if (strncmp(sa->siucv_name, " ", 8) == 0)
__iucv_auto_name(iucv);
else
@@ -781,6 +629,7 @@ vm_bind:
memcpy(iucv->src_user_id, iucv_userid, 8);
sk->sk_state = IUCV_BOUND;
iucv->transport = AF_IUCV_TRANS_IUCV;
+ sk->sk_allocation |= GFP_DMA;
if (!iucv->msglimit)
iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
goto done_unlock;
@@ -805,6 +654,8 @@ static int iucv_sock_autobind(struct sock *sk)
return -EPROTO;
memcpy(iucv->src_user_id, iucv_userid, 8);
+ iucv->transport = AF_IUCV_TRANS_IUCV;
+ sk->sk_allocation |= GFP_DMA;
write_lock_bh(&iucv_sk_list.lock);
__iucv_auto_name(iucv);
@@ -816,9 +667,9 @@ static int iucv_sock_autobind(struct sock *sk)
return err;
}
-static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr)
+static int afiucv_path_connect(struct socket *sock, struct sockaddr_unsized *addr)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned char user_data[16];
@@ -862,10 +713,10 @@ done:
}
/* Connect an unconnected socket */
-static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
+static int iucv_sock_connect(struct socket *sock, struct sockaddr_unsized *addr,
int alen, int flags)
{
- struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, sa, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
int err;
@@ -945,7 +796,7 @@ done:
/* Accept a pending connection */
static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
@@ -959,7 +810,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
goto done;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
/* Wait for an incoming connection */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -1001,7 +852,7 @@ done:
static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
int peer)
{
- struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
+ DECLARE_SOCKADDR(struct sockaddr_iucv *, siucv, addr);
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1088,7 +939,6 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
/* initialize defaults */
cmsg_done = 0; /* check for duplicate headers */
- txmsg.class = 0;
/* iterate over control messages */
for_each_cmsghdr(cmsg, msg) {
@@ -1130,8 +980,9 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
* segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
- headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
- linear = len;
+ headroom = sizeof(struct af_iucv_trans_hdr) +
+ LL_RESERVED_SPACE(iucv->hs_dev);
+ linear = min(len, PAGE_SIZE - headroom);
} else {
if (len < PAGE_SIZE) {
linear = len;
@@ -1182,6 +1033,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
}
} else { /* Classic VM IUCV transport */
skb_queue_tail(&iucv->send_skb_q, skb);
+ atomic_inc(&iucv->skbs_in_xmit);
if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
skb->len <= 7) {
@@ -1190,14 +1042,16 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
/* on success: there is no message_complete callback */
/* for an IPRMDATA msg; remove skb from send queue */
if (err == 0) {
+ atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
- kfree_skb(skb);
+ consume_skb(skb);
}
/* this error should never happen since the */
/* IUCV_IPRMDATA path flag is set... sever path */
if (err == 0x15) {
pr_iucv->path_sever(iucv->path, NULL);
+ atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
err = -EPIPE;
goto fail;
@@ -1207,13 +1061,12 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
int i;
/* skip iucv_array lying in the headroom */
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = virt_to_dma32(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
@@ -1236,6 +1089,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
} else {
err = -EPIPE;
}
+
+ atomic_dec(&iucv->skbs_in_xmit);
skb_unlink(skb, &iucv->send_skb_q);
goto fail;
}
@@ -1307,13 +1162,12 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = virt_to_dma32(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
@@ -1333,7 +1187,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
IUCV_SKB_CB(skb)->offset = 0;
if (sk_filter(sk, skb)) {
- atomic_inc(&sk->sk_drops); /* skb rejected by filter */
+ sk_drops_inc(sk); /* skb rejected by filter */
kfree_skb(skb);
return;
}
@@ -1366,7 +1220,6 @@ static void iucv_process_message_q(struct sock *sk)
static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags)
{
- int noblock = flags & MSG_DONTWAIT;
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
unsigned int copied, rlen;
@@ -1384,8 +1237,10 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
return -EOPNOTSUPP;
/* receive/dequeue next skb:
- * the function understands MSG_PEEK and, thus, does not dequeue skb */
- skb = skb_recv_datagram(sk, flags, noblock, &err);
+ * the function understands MSG_PEEK and, thus, does not dequeue skb
+ * only refcount is increased.
+ */
+ skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
return 0;
@@ -1400,9 +1255,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
cskb = skb;
if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
- if (!(flags & MSG_PEEK))
- skb_queue_head(&sk->sk_receive_queue, skb);
- return -EFAULT;
+ err = -EFAULT;
+ goto err_out;
}
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
@@ -1419,11 +1273,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
sizeof(IUCV_SKB_CB(skb)->class),
(void *)&IUCV_SKB_CB(skb)->class);
- if (err) {
- if (!(flags & MSG_PEEK))
- skb_queue_head(&sk->sk_receive_queue, skb);
- return err;
- }
+ if (err)
+ goto err_out;
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
@@ -1437,7 +1288,7 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
}
}
- kfree_skb(skb);
+ consume_skb(skb);
if (iucv->transport == AF_IUCV_TRANS_HIPER) {
atomic_inc(&iucv->msg_recv);
if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
@@ -1479,8 +1330,18 @@ done:
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
copied = rlen;
+ if (flags & MSG_PEEK)
+ skb_unref(skb);
return copied;
+
+err_out:
+ if (!(flags & MSG_PEEK))
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ else
+ skb_unref(skb);
+
+ return err;
}
static inline __poll_t iucv_accept_poll(struct sock *parent)
@@ -1498,8 +1359,8 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
return 0;
}
-__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
- poll_table *wait)
+static __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask = 0;
@@ -1561,7 +1422,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
break;
}
- if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
+ if ((how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) &&
+ sk->sk_state == IUCV_CONNECTED) {
if (iucv->transport == AF_IUCV_TRANS_IUCV) {
txmsg.class = 0;
txmsg.tag = 0;
@@ -1621,7 +1483,7 @@ static int iucv_sock_release(struct socket *sock)
/* getsockopt and setsockopt */
static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
- char __user *optval, unsigned int optlen)
+ sockptr_t optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1634,7 +1496,7 @@ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(int))
return -EINVAL;
- if (get_user(val, (int __user *) optval))
+ if (copy_from_sockptr(&val, optval, sizeof(int)))
return -EFAULT;
rc = 0;
@@ -1651,7 +1513,7 @@ static int iucv_sock_setsockopt(struct socket *sock, int level, int optname,
switch (sk->sk_state) {
case IUCV_OPEN:
case IUCV_BOUND:
- if (val < 1 || val > (u16)(~0))
+ if (val < 1 || val > U16_MAX)
rc = -EINVAL;
else
iucv->msglimit = val;
@@ -1771,7 +1633,7 @@ static int iucv_callback_connreq(struct iucv_path *path,
}
/* Create the new socket */
- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
+ nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
if (!nsk) {
err = pr_iucv->path_sever(path, user_data);
iucv_path_free(path);
@@ -1780,6 +1642,8 @@ static int iucv_callback_connreq(struct iucv_path *path,
niucv = iucv_sk(nsk);
iucv_sock_init(nsk, sk);
+ niucv->transport = AF_IUCV_TRANS_IUCV;
+ nsk->sk_allocation |= GFP_DMA;
/* Set the new iucv_sock */
memcpy(niucv->dst_name, ipuser + 8, 8);
@@ -1872,10 +1736,14 @@ static void iucv_callback_txdone(struct iucv_path *path,
{
struct sock *sk = path->private;
struct sk_buff *this = NULL;
- struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
+ struct sk_buff_head *list;
struct sk_buff *list_skb;
+ struct iucv_sock *iucv;
unsigned long flags;
+ iucv = iucv_sk(sk);
+ list = &iucv->send_skb_q;
+
bh_lock_sock(sk);
spin_lock_irqsave(&list->lock, flags);
@@ -1885,18 +1753,21 @@ static void iucv_callback_txdone(struct iucv_path *path,
break;
}
}
- if (this)
+ if (this) {
+ atomic_dec(&iucv->skbs_in_xmit);
__skb_unlink(this, list);
+ }
+
spin_unlock_irqrestore(&list->lock, flags);
if (this) {
- kfree_skb(this);
+ consume_skb(this);
/* wake up any process waiting for sending */
iucv_sock_wake_msglim(sk);
}
if (sk->sk_state == IUCV_CLOSING) {
- if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+ if (atomic_read(&iucv->skbs_in_xmit) == 0) {
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
}
@@ -1935,6 +1806,15 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
bh_unlock_sock(sk);
}
+static struct iucv_handler af_iucv_handler = {
+ .path_pending = iucv_callback_connreq,
+ .path_complete = iucv_callback_connack,
+ .path_severed = iucv_callback_connrej,
+ .message_pending = iucv_callback_rx,
+ .message_complete = iucv_callback_txdone,
+ .path_quiesced = iucv_callback_shutdown,
+};
+
/***************** HiperSockets transport callbacks ********************/
static void afiucv_swap_src_dest(struct sk_buff *skb)
{
@@ -1956,9 +1836,9 @@ static void afiucv_swap_src_dest(struct sk_buff *skb)
memset(skb->data, 0, ETH_HLEN);
}
-/**
+/*
* afiucv_hs_callback_syn - react on received SYN
- **/
+ */
static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
{
struct af_iucv_trans_hdr *trans_hdr = iucv_trans_hdr(skb);
@@ -1975,7 +1855,7 @@ static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
goto out;
}
- nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC, 0);
+ nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
bh_lock_sock(sk);
if ((sk->sk_state != IUCV_LISTEN) ||
sk_acceptq_is_full(sk) ||
@@ -2021,71 +1901,73 @@ out:
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_synack() - react on received SYN-ACK
- **/
+ */
static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
- if (!iucv)
- goto out;
- if (sk->sk_state != IUCV_BOUND)
- goto out;
+ if (!iucv || sk->sk_state != IUCV_BOUND) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
bh_lock_sock(sk);
iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
sk->sk_state = IUCV_CONNECTED;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
-out:
- kfree_skb(skb);
+ consume_skb(skb);
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_synfin() - react on received SYN_FIN
- **/
+ */
static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
- if (!iucv)
- goto out;
- if (sk->sk_state != IUCV_BOUND)
- goto out;
+ if (!iucv || sk->sk_state != IUCV_BOUND) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
bh_lock_sock(sk);
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
bh_unlock_sock(sk);
-out:
- kfree_skb(skb);
+ consume_skb(skb);
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_fin() - react on received FIN
- **/
+ */
static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
/* other end of connection closed */
- if (!iucv)
- goto out;
+ if (!iucv) {
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
bh_lock_sock(sk);
if (sk->sk_state == IUCV_CONNECTED) {
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
-out:
- kfree_skb(skb);
+ consume_skb(skb);
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_win() - react on received WIN
- **/
+ */
static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -2101,9 +1983,9 @@ static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_callback_rx() - react on received data
- **/
+ */
static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -2129,7 +2011,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
skb_reset_network_header(skb);
IUCV_SKB_CB(skb)->offset = 0;
if (sk_filter(sk, skb)) {
- atomic_inc(&sk->sk_drops); /* skb rejected by filter */
+ sk_drops_inc(sk); /* skb rejected by filter */
kfree_skb(skb);
return NET_RX_SUCCESS;
}
@@ -2145,11 +2027,11 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/**
+/*
* afiucv_hs_rcv() - base function for arriving data through HiperSockets
* transport
* called from netif RX softirq
- **/
+ */
static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
@@ -2160,7 +2042,6 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
char nullstring[8];
if (!pskb_may_pull(skb, sizeof(*trans_hdr))) {
- WARN_ONCE(1, "AF_IUCV failed to receive skb, len=%u", skb->len);
kfree_skb(skb);
return NET_RX_SUCCESS;
}
@@ -2233,13 +2114,13 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
case (AF_IUCV_FLAG_WIN):
err = afiucv_hs_callback_win(sk, skb);
if (skb->len == sizeof(struct af_iucv_trans_hdr)) {
- kfree_skb(skb);
+ consume_skb(skb);
break;
}
- /* fall through and receive non-zero length data */
+ fallthrough; /* and receive non-zero length data */
case (AF_IUCV_FLAG_SHT):
/* shutdown request */
- /* fall through and receive zero length data */
+ fallthrough; /* and receive zero length data */
case 0:
/* plain data frame */
IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
@@ -2252,77 +2133,44 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
return err;
}
-/**
- * afiucv_hs_callback_txnotify() - handle send notifcations from HiperSockets
+/*
+ * afiucv_hs_callback_txnotify() - handle send notifications from HiperSockets
* transport
- **/
-static void afiucv_hs_callback_txnotify(struct sk_buff *skb,
- enum iucv_tx_notify n)
+ */
+static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
{
- struct sock *isk = skb->sk;
- struct sock *sk = NULL;
- struct iucv_sock *iucv = NULL;
- struct sk_buff_head *list;
- struct sk_buff *list_skb;
- struct sk_buff *nskb;
- unsigned long flags;
-
- read_lock_irqsave(&iucv_sk_list.lock, flags);
- sk_for_each(sk, &iucv_sk_list.head)
- if (sk == isk) {
- iucv = iucv_sk(sk);
- break;
- }
- read_unlock_irqrestore(&iucv_sk_list.lock, flags);
+ struct iucv_sock *iucv = iucv_sk(sk);
- if (!iucv || sock_flag(sk, SOCK_ZAPPED))
+ if (sock_flag(sk, SOCK_ZAPPED))
return;
- list = &iucv->send_skb_q;
- spin_lock_irqsave(&list->lock, flags);
- skb_queue_walk_safe(list, list_skb, nskb) {
- if (skb_shinfo(list_skb) == skb_shinfo(skb)) {
- switch (n) {
- case TX_NOTIFY_OK:
- __skb_unlink(list_skb, list);
- kfree_skb(list_skb);
- iucv_sock_wake_msglim(sk);
- break;
- case TX_NOTIFY_PENDING:
- atomic_inc(&iucv->pendings);
- break;
- case TX_NOTIFY_DELAYED_OK:
- __skb_unlink(list_skb, list);
- atomic_dec(&iucv->pendings);
- if (atomic_read(&iucv->pendings) <= 0)
- iucv_sock_wake_msglim(sk);
- kfree_skb(list_skb);
- break;
- case TX_NOTIFY_UNREACHABLE:
- case TX_NOTIFY_DELAYED_UNREACHABLE:
- case TX_NOTIFY_TPQFULL: /* not yet used */
- case TX_NOTIFY_GENERALERROR:
- case TX_NOTIFY_DELAYED_GENERALERROR:
- __skb_unlink(list_skb, list);
- kfree_skb(list_skb);
- if (sk->sk_state == IUCV_CONNECTED) {
- sk->sk_state = IUCV_DISCONN;
- sk->sk_state_change(sk);
- }
- break;
- }
- break;
+ switch (n) {
+ case TX_NOTIFY_OK:
+ atomic_dec(&iucv->skbs_in_xmit);
+ iucv_sock_wake_msglim(sk);
+ break;
+ case TX_NOTIFY_PENDING:
+ atomic_inc(&iucv->pendings);
+ break;
+ case TX_NOTIFY_DELAYED_OK:
+ atomic_dec(&iucv->skbs_in_xmit);
+ if (atomic_dec_return(&iucv->pendings) <= 0)
+ iucv_sock_wake_msglim(sk);
+ break;
+ default:
+ atomic_dec(&iucv->skbs_in_xmit);
+ if (sk->sk_state == IUCV_CONNECTED) {
+ sk->sk_state = IUCV_DISCONN;
+ sk->sk_state_change(sk);
}
}
- spin_unlock_irqrestore(&list->lock, flags);
if (sk->sk_state == IUCV_CLOSING) {
- if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
+ if (atomic_read(&iucv->skbs_in_xmit) == 0) {
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
}
}
-
}
/*
@@ -2381,6 +2229,35 @@ static const struct proto_ops iucv_sock_ops = {
.getsockopt = iucv_sock_getsockopt,
};
+static int iucv_sock_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+
+ if (protocol && protocol != PF_IUCV)
+ return -EPROTONOSUPPORT;
+
+ sock->state = SS_UNCONNECTED;
+
+ switch (sock->type) {
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ /* currently, proto ops can handle both sk types */
+ sock->ops = &iucv_sock_ops;
+ break;
+ default:
+ return -ESOCKTNOSUPPORT;
+ }
+
+ sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
+ if (!sk)
+ return -ENOMEM;
+
+ iucv_sock_init(sk, NULL);
+
+ return 0;
+}
+
static const struct net_proto_family iucv_sock_family_ops = {
.family = AF_IUCV,
.owner = THIS_MODULE,
@@ -2392,48 +2269,11 @@ static struct packet_type iucv_packet_type = {
.func = afiucv_hs_rcv,
};
-static int afiucv_iucv_init(void)
-{
- int err;
-
- err = pr_iucv->iucv_register(&af_iucv_handler, 0);
- if (err)
- goto out;
- /* establish dummy device */
- af_iucv_driver.bus = pr_iucv->bus;
- err = driver_register(&af_iucv_driver);
- if (err)
- goto out_iucv;
- af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
- if (!af_iucv_dev) {
- err = -ENOMEM;
- goto out_driver;
- }
- dev_set_name(af_iucv_dev, "af_iucv");
- af_iucv_dev->bus = pr_iucv->bus;
- af_iucv_dev->parent = pr_iucv->root;
- af_iucv_dev->release = (void (*)(struct device *))kfree;
- af_iucv_dev->driver = &af_iucv_driver;
- err = device_register(af_iucv_dev);
- if (err)
- goto out_iucv_dev;
- return 0;
-
-out_iucv_dev:
- put_device(af_iucv_dev);
-out_driver:
- driver_unregister(&af_iucv_driver);
-out_iucv:
- pr_iucv->iucv_unregister(&af_iucv_handler, 0);
-out:
- return err;
-}
-
static int __init afiucv_init(void)
{
int err;
- if (MACHINE_IS_VM) {
+ if (machine_is_vm() && IS_ENABLED(CONFIG_IUCV)) {
cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
if (unlikely(err)) {
WARN_ON(err);
@@ -2441,11 +2281,7 @@ static int __init afiucv_init(void)
goto out;
}
- pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv");
- if (!pr_iucv) {
- printk(KERN_WARNING "iucv_if lookup failed\n");
- memset(&iucv_userid, 0, sizeof(iucv_userid));
- }
+ pr_iucv = &iucv_if;
} else {
memset(&iucv_userid, 0, sizeof(iucv_userid));
pr_iucv = NULL;
@@ -2459,33 +2295,35 @@ static int __init afiucv_init(void)
goto out_proto;
if (pr_iucv) {
- err = afiucv_iucv_init();
+ err = pr_iucv->iucv_register(&af_iucv_handler, 0);
if (err)
goto out_sock;
- } else
- register_netdevice_notifier(&afiucv_netdev_notifier);
+ }
+
+ err = register_netdevice_notifier(&afiucv_netdev_notifier);
+ if (err)
+ goto out_notifier;
+
dev_add_pack(&iucv_packet_type);
return 0;
+out_notifier:
+ if (pr_iucv)
+ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
out_sock:
sock_unregister(PF_IUCV);
out_proto:
proto_unregister(&iucv_proto);
out:
- if (pr_iucv)
- symbol_put(iucv_if);
return err;
}
static void __exit afiucv_exit(void)
{
- if (pr_iucv) {
- device_unregister(af_iucv_dev);
- driver_unregister(&af_iucv_driver);
+ if (pr_iucv)
pr_iucv->iucv_unregister(&af_iucv_handler, 0);
- symbol_put(iucv_if);
- } else
- unregister_netdevice_notifier(&afiucv_netdev_notifier);
+
+ unregister_netdevice_notifier(&afiucv_netdev_notifier);
dev_remove_pack(&iucv_packet_type);
sock_unregister(PF_IUCV);
proto_unregister(&iucv_proto);