diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 09:04:05 +0100 | 
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 09:04:05 +0100 | 
| commit | 92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch) | |
| tree | 15626ff9287e37c3cb81c7286d6db5a7fd77c854 /net/unix | |
| parent | 15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff) | |
| parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) | |
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2
Backmerge to get at
commit 1b0e3a049efe471c399674fd954500ce97438d30
Author: Imre Deak <imre.deak@intel.com>
Date:   Thu Nov 5 23:04:11 2015 +0200
    drm/i915/skl: disable display side power well support for now
so that we can proplery re-eanble skl power wells in -next.
Conflicts are just adjacent lines changed, except for intel_fbdev.c
where we need to interleave the changs. Nothing nefarious.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'net/unix')
| -rw-r--r-- | net/unix/af_unix.c | 46 | 
1 files changed, 42 insertions, 4 deletions
| diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 03ee4d359f6a..955ec152cb71 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -326,9 +326,10 @@ found:  	return s;  } -static inline int unix_writable(struct sock *sk) +static int unix_writable(const struct sock *sk)  { -	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf; +	return sk->sk_state != TCP_LISTEN && +	       (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;  }  static void unix_write_space(struct sock *sk) @@ -440,6 +441,7 @@ static void unix_release_sock(struct sock *sk, int embrion)  		if (state == TCP_LISTEN)  			unix_release_sock(skb->sk, 1);  		/* passed fds are erased in the kfree_skb hook	      */ +		UNIXCB(skb).consumed = skb->len;  		kfree_skb(skb);  	} @@ -1798,6 +1800,7 @@ alloc_skb:  		 * this - does no harm  		 */  		consume_skb(newskb); +		newskb = NULL;  	}  	if (skb_append_pagefrags(skb, page, offset, size)) { @@ -1810,8 +1813,11 @@ alloc_skb:  	skb->truesize += size;  	atomic_add(size, &sk->sk_wmem_alloc); -	if (newskb) +	if (newskb) { +		spin_lock(&other->sk_receive_queue.lock);  		__skb_queue_tail(&other->sk_receive_queue, newskb); +		spin_unlock(&other->sk_receive_queue.lock); +	}  	unix_state_unlock(other);  	mutex_unlock(&unix_sk(other)->readlock); @@ -2064,8 +2070,14 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)  		goto out;  	} +	if (flags & MSG_PEEK) +		skip = sk_peek_offset(sk, flags); +	else +		skip = 0; +  	do {  		int chunk; +		bool drop_skb;  		struct sk_buff *skb, *last;  		unix_state_lock(sk); @@ -2112,7 +2124,6 @@ unlock:  			break;  		} -		skip = sk_peek_offset(sk, flags);  		while (skip >= unix_skb_len(skb)) {  			skip -= unix_skb_len(skb);  			last = skb; @@ -2147,7 +2158,11 @@ unlock:  		}  		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); +		skb_get(skb);  		chunk = state->recv_actor(skb, skip, chunk, state); +		drop_skb = !unix_skb_len(skb); +		/* skb is only safe to use if !drop_skb */ +		consume_skb(skb);  		if (chunk < 0) {  			if (copied == 0)  				copied = -EFAULT; @@ -2156,6 +2171,18 @@ unlock:  		copied += chunk;  		size -= chunk; +		if (drop_skb) { +			/* the skb was touched by a concurrent reader; +			 * we should not expect anything from this skb +			 * anymore and assume it invalid - we can be +			 * sure it was dropped from the socket queue +			 * +			 * let's report a short read +			 */ +			err = 0; +			break; +		} +  		/* Mark read part of skb as used */  		if (!(flags & MSG_PEEK)) {  			UNIXCB(skb).consumed += chunk; @@ -2181,6 +2208,17 @@ unlock:  			sk_peek_offset_fwd(sk, chunk); +			if (UNIXCB(skb).fp) +				break; + +			skip = 0; +			last = skb; +			last_len = skb->len; +			unix_state_lock(sk); +			skb = skb_peek_next(skb, &sk->sk_receive_queue); +			if (skb) +				goto again; +			unix_state_unlock(sk);  			break;  		}  	} while (size); | 
