summaryrefslogtreecommitdiff
path: root/net/iucv/af_iucv.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/iucv/af_iucv.c')
-rw-r--r--net/iucv/af_iucv.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 498a0c35b7bb..7929df08d4e0 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -184,7 +184,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_all(&wq->wait);
- sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
@@ -335,8 +335,8 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
struct iucv_sock *iucv = iucv_sk(sk);
struct iucv_path *path = iucv->path;
- if (iucv->path) {
- iucv->path = NULL;
+ /* Whoever resets the path pointer, must sever and free it. */
+ if (xchg(&iucv->path, NULL)) {
if (with_user_data) {
low_nmcpy(user_data, iucv->src_name);
high_nmcpy(user_data, iucv->dst_name);
@@ -795,7 +795,7 @@ done:
/* Accept a pending connection */
static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
- int flags, bool kern)
+ struct proto_accept_arg *arg)
{
DECLARE_WAITQUEUE(wait, current);
struct sock *sk = sock->sk, *nsk;
@@ -809,7 +809,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
goto done;
}
- timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ timeo = sock_rcvtimeo(sk, arg->flags & O_NONBLOCK);
/* Wait for an incoming connection */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
@@ -1060,13 +1060,12 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
int i;
/* skip iucv_array lying in the headroom */
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = virt_to_dma32(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
err = pr_iucv->message_send(iucv->path, &txmsg,
@@ -1162,13 +1161,12 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_array *iba = (struct iucv_array *)skb->head;
int i;
- iba[0].address = (u32)(addr_t)skb->data;
+ iba[0].address = virt_to_dma32(skb->data);
iba[0].length = (u32)skb_headlen(skb);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- iba[i + 1].address =
- (u32)(addr_t)skb_frag_address(frag);
+ iba[i + 1].address = virt_to_dma32(skb_frag_address(frag));
iba[i + 1].length = (u32)skb_frag_size(frag);
}
rc = pr_iucv->message_receive(path, msg,
@@ -1238,7 +1236,9 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
return -EOPNOTSUPP;
/* receive/dequeue next skb:
- * the function understands MSG_PEEK and, thus, does not dequeue skb */
+ * the function understands MSG_PEEK and, thus, does not dequeue skb
+ * only refcount is increased.
+ */
skb = skb_recv_datagram(sk, flags, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -1254,9 +1254,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
cskb = skb;
if (skb_copy_datagram_msg(cskb, offset, msg, copied)) {
- if (!(flags & MSG_PEEK))
- skb_queue_head(&sk->sk_receive_queue, skb);
- return -EFAULT;
+ err = -EFAULT;
+ goto err_out;
}
/* SOCK_SEQPACKET: set MSG_TRUNC if recv buf size is too small */
@@ -1273,11 +1272,8 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS,
sizeof(IUCV_SKB_CB(skb)->class),
(void *)&IUCV_SKB_CB(skb)->class);
- if (err) {
- if (!(flags & MSG_PEEK))
- skb_queue_head(&sk->sk_receive_queue, skb);
- return err;
- }
+ if (err)
+ goto err_out;
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
@@ -1333,8 +1329,18 @@ done:
/* SOCK_SEQPACKET: return real length if MSG_TRUNC is set */
if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
copied = rlen;
+ if (flags & MSG_PEEK)
+ skb_unref(skb);
return copied;
+
+err_out:
+ if (!(flags & MSG_PEEK))
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ else
+ skb_unref(skb);
+
+ return err;
}
static inline __poll_t iucv_accept_poll(struct sock *parent)