summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/pktgen.c11
-rw-r--r--net/dsa/tag_sja1105.c10
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/raw.c25
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_rdma.c10
-rw-r--r--net/rds/ib_recv.c3
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/tls/tls_device.c26
15 files changed, 92 insertions, 59 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 140858d4a048..eb7fb6daa1ef 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5021,12 +5021,12 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
if (list_empty(head))
return;
if (pt_prev->list_func != NULL)
- pt_prev->list_func(head, pt_prev, orig_dev);
+ INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
+ ip_list_rcv, head, pt_prev, orig_dev);
else
list_for_each_entry_safe(skb, next, head, list) {
skb_list_del_init(skb);
- INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
- skb->dev, pt_prev, orig_dev);
+ pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6dadeff8d39a..d08b1e19ce9c 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1355,13 +1355,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (!regbuf)
return -ENOMEM;
+ if (regs.len < reglen)
+ reglen = regs.len;
+
ops->get_regs(dev, &regs, regbuf);
ret = -EFAULT;
if (copy_to_user(useraddr, &regs, sizeof(regs)))
goto out;
useraddr += offsetof(struct ethtool_regs, data);
- if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
+ if (copy_to_user(useraddr, regbuf, reglen))
goto out;
ret = 0;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 43f0115cce9c..18f8dd8329ed 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -757,9 +757,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
goto errout;
- if (rule_exists(ops, frh, tb, rule)) {
- if (nlh->nlmsg_flags & NLM_F_EXCL)
- err = -EEXIST;
+ if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
+ rule_exists(ops, frh, tb, rule)) {
+ err = -EEXIST;
goto errout_free;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 99ddc69736b2..f975c5e2a369 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3059,7 +3059,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
{
while (thread_is_running(t)) {
+ /* note: 't' will still be around even after the unlock/lock
+ * cycle because pktgen_thread threads are only cleared at
+ * net exit
+ */
+ mutex_unlock(&pktgen_thread_lock);
msleep_interruptible(100);
+ mutex_lock(&pktgen_thread_lock);
if (signal_pending(current))
goto signal;
@@ -3074,6 +3080,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
struct pktgen_thread *t;
int sig = 1;
+ /* prevent from racing with rmmod */
+ if (!try_module_get(THIS_MODULE))
+ return sig;
+
mutex_lock(&pktgen_thread_lock);
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
@@ -3087,6 +3097,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
t->control |= (T_STOP);
mutex_unlock(&pktgen_thread_lock);
+ module_put(THIS_MODULE);
return sig;
}
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 969402c7dbf1..d43737e6c3fb 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -28,14 +28,10 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb)
*/
static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
{
- if (sja1105_is_link_local(skb)) {
- SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_LINK_LOCAL;
+ if (sja1105_is_link_local(skb))
return true;
- }
- if (!dsa_port_is_vlan_filtering(dev->dsa_ptr)) {
- SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_NORMAL;
+ if (!dsa_port_is_vlan_filtering(dev->dsa_ptr))
return true;
- }
return false;
}
@@ -84,7 +80,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
skb->offload_fwd_mark = 1;
- if (SJA1105_SKB_CB(skb)->type == SJA1105_FRAME_TYPE_LINK_LOCAL) {
+ if (sja1105_is_link_local(skb)) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
* the incl_srcpt options.
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cee640281e02..6cb7cff22db9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1981,7 +1981,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u32 itag = 0;
struct rtable *rth;
struct flowi4 fl4;
- bool do_cache;
+ bool do_cache = true;
/* IP on this device is disabled. */
@@ -2058,6 +2058,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res->type == RTN_BROADCAST) {
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
+ /* not do cache if bc_forwarding is enabled */
+ if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
+ do_cache = false;
goto brd_input;
}
@@ -2095,18 +2098,15 @@ brd_input:
RT_CACHE_STAT_INC(in_brd);
local_input:
- do_cache = false;
- if (res->fi) {
- if (!itag) {
- struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+ do_cache &= res->fi && !itag;
+ if (do_cache) {
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
- rth = rcu_dereference(nhc->nhc_rth_input);
- if (rt_cache_valid(rth)) {
- skb_dst_set_noref(skb, &rth->dst);
- err = 0;
- goto out;
- }
- do_cache = true;
+ rth = rcu_dereference(nhc->nhc_rth_input);
+ if (rt_cache_valid(rth)) {
+ skb_dst_set_noref(skb, &rth->dst);
+ err = 0;
+ goto out;
}
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 189144346cd4..7c6228fbf5dd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -533,8 +533,7 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
(inet->inet_dport != rmt_port && inet->inet_dport) ||
(inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
ipv6_only_sock(sk) ||
- (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
- sk->sk_bound_dev_if != sdif))
+ !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
return false;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
return false;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 703c8387f102..70693bc7ad9d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -779,6 +779,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct flowi6 fl6;
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
+ int hdrincl;
u16 proto;
int err;
@@ -792,6 +793,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ /* hdrincl should be READ_ONCE(inet->hdrincl)
+ * but READ_ONCE() doesn't work with bit fields.
+ * Doing this indirectly yields the same result.
+ */
+ hdrincl = inet->hdrincl;
+ hdrincl = READ_ONCE(hdrincl);
+
/*
* Get and verify the address.
*/
@@ -883,11 +891,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
- rfv.msg = msg;
- rfv.hlen = 0;
- err = rawv6_probe_proto_opt(&rfv, &fl6);
- if (err)
- goto out;
+
+ if (!hdrincl) {
+ rfv.msg = msg;
+ rfv.hlen = 0;
+ err = rawv6_probe_proto_opt(&rfv, &fl6);
+ if (err)
+ goto out;
+ }
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
@@ -904,7 +915,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- if (inet->hdrincl)
+ if (hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
if (ipc6.tclass < 0)
@@ -927,7 +938,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto do_confirm;
back_from_confirm:
- if (inet->hdrincl)
+ if (hdrincl)
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
msg->msg_flags, &ipc6.sockc);
else {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fc012e801459..a29d66da7394 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3008,8 +3008,8 @@ static int packet_release(struct socket *sock)
synchronize_net();
+ kfree(po->rollover);
if (f) {
- kfree(po->rollover);
fanout_release_data(f);
kfree(f);
}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 2da9b75bad16..b8d581b779b2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -87,7 +87,7 @@ static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
spin_lock_irqsave(&rds_ibdev->spinlock, flags);
list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
- rds_conn_drop(ic->conn);
+ rds_conn_path_drop(&ic->conn->c_path[0], true);
spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
}
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index d664e9ade74d..0b347f46b2f4 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
wait_clean_list_grace();
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
- if (ibmr_ret)
+ if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
-
+ clean_nodes = clean_nodes->next;
+ }
/* more than one entry in llist nodes */
- if (clean_nodes->next)
- llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
+ if (clean_nodes)
+ llist_add_batch(clean_nodes, clean_tail,
+ &pool->clean_list);
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8946c89d7392..3cae88cbdaa0 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -168,6 +168,7 @@ void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
list_del(&inc->ii_cache_entry);
WARN_ON(!list_empty(&inc->ii_frags));
kmem_cache_free(rds_ib_incoming_slab, inc);
+ atomic_dec(&rds_ib_allocation);
}
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
@@ -1057,6 +1058,8 @@ out:
void rds_ib_recv_exit(void)
{
+ WARN_ON(atomic_read(&rds_ib_allocation));
+
kmem_cache_destroy(rds_ib_incoming_slab);
kmem_cache_destroy(rds_ib_frag_slab);
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 92331e1195c1..f17908f5c4f3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2312,7 +2312,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
union sctp_addr addr;
struct sctp_af *af;
int src_match = 0;
- char *cookie;
/* We must include the address that the INIT packet came from.
* This is the only address that matters for an INIT packet.
@@ -2416,14 +2415,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
/* Peer Rwnd : Current calculated value of the peer's rwnd. */
asoc->peer.rwnd = asoc->peer.i.a_rwnd;
- /* Copy cookie in case we need to resend COOKIE-ECHO. */
- cookie = asoc->peer.cookie;
- if (cookie) {
- asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
- if (!asoc->peer.cookie)
- goto clean_up;
- }
-
/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
* high (for example, implementations MAY use the size of the receiver
* advertised window).
@@ -2592,7 +2583,9 @@ do_addr_param:
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
- asoc->peer.cookie = param.cookie->body;
+ asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
+ if (!asoc->peer.cookie)
+ retval = 0;
break;
case SCTP_PARAM_HEARTBEAT_INFO:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9b50da548db2..a554d6d15d1b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -883,6 +883,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
asoc->rto_initial;
}
+ if (sctp_state(asoc, ESTABLISHED)) {
+ kfree(asoc->peer.cookie);
+ asoc->peer.cookie = NULL;
+ }
+
if (sctp_state(asoc, ESTABLISHED) ||
sctp_state(asoc, CLOSED) ||
sctp_state(asoc, SHUTDOWN_RECEIVED)) {
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b95c408fd771..1f9cf57d9754 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -550,11 +550,23 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
}
}
+static void tls_device_resync_rx(struct tls_context *tls_ctx,
+ struct sock *sk, u32 seq, u64 rcd_sn)
+{
+ struct net_device *netdev;
+
+ if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
+ return;
+ netdev = READ_ONCE(tls_ctx->netdev);
+ if (netdev)
+ netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
+ clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+}
+
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
- struct net_device *netdev;
u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@@ -570,12 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
if (unlikely(is_req_pending) && req_seq == seq &&
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
seq += TLS_HEADER_SIZE - 1;
- down_read(&device_offload_lock);
- netdev = tls_ctx->netdev;
- if (netdev)
- netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq,
- rcd_sn);
- up_read(&device_offload_lock);
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
}
}
@@ -977,7 +984,10 @@ static int tls_device_down(struct net_device *netdev)
if (ctx->rx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
- ctx->netdev = NULL;
+ WRITE_ONCE(ctx->netdev, NULL);
+ smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
+ while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
+ usleep_range(10, 200);
dev_put(netdev);
list_del_init(&ctx->list);