summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv4/tcp_output.c7
2 files changed, 4 insertions, 25 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2561c14a6e63..bc7f419184aa 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -863,7 +863,6 @@ struct sk_buff *tcp_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
if (likely(skb)) {
bool mem_scheduled;
- skb->truesize = SKB_TRUESIZE(size + MAX_TCP_HEADER);
if (force_schedule) {
mem_scheduled = true;
sk_forced_mem_schedule(sk, skb->truesize);
@@ -1320,15 +1319,6 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
- /* skb changing from pure zc to mixed, must charge zc */
- if (unlikely(skb_zcopy_pure(skb))) {
- if (!sk_wmem_schedule(sk, skb->data_len))
- goto wait_for_space;
-
- sk_mem_charge(sk, skb->data_len);
- skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY;
- }
-
if (!sk_wmem_schedule(sk, copy))
goto wait_for_space;
@@ -1349,16 +1339,8 @@ new_segment:
}
pfrag->offset += copy;
} else {
- /* First append to a fragless skb builds initial
- * pure zerocopy skb
- */
- if (!skb->len)
- skb_shinfo(skb)->flags |= SKBFL_PURE_ZEROCOPY;
-
- if (!skb_zcopy_pure(skb)) {
- if (!sk_wmem_schedule(sk, copy))
- goto wait_for_space;
- }
+ if (!sk_wmem_schedule(sk, copy))
+ goto wait_for_space;
err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
if (err == -EMSGSIZE || err == -EEXIST) {
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 287b57aadc37..6fbbf1558033 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1677,8 +1677,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
if (delta_truesize) {
skb->truesize -= delta_truesize;
sk_wmem_queued_add(sk, -delta_truesize);
- if (!skb_zcopy_pure(skb))
- sk_mem_uncharge(sk, delta_truesize);
+ sk_mem_uncharge(sk, delta_truesize);
}
/* Any change of skb->len requires recalculation of tso factor. */
@@ -2296,9 +2295,7 @@ static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
if (len <= skb->len)
break;
- if (unlikely(TCP_SKB_CB(skb)->eor) ||
- tcp_has_tx_tstamp(skb) ||
- !skb_pure_zcopy_same(skb, next))
+ if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
return false;
len -= skb->len;