summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-08-16 15:37:26 -0700
committerDavid S. Miller <davem@davemloft.net>2013-08-16 15:37:26 -0700
commit2ff1cf12c9fe70e75e600404e6a4274b19d293ed (patch)
treebeafddac0a8098e3f07d2ec60e44a2a7d006e605 /net
parent16b304f3404f8e0243d5ee2b70b68767b7b59b2b (diff)
parent0f7dd1aa8f959216f1faa71513b9d3c1a9065e5a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/gateway_client.c13
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/soft-interface.c9
-rw-r--r--net/batman-adv/unicast.c13
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/core/flow_dissector.c1
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_trie.c5
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel_core.c4
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c12
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c5
-rw-r--r--net/netfilter/xt_TCPMSS.c28
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c10
-rw-r--r--net/netlink/genetlink.c7
-rw-r--r--net/openvswitch/actions.c1
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c13
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/transport.c4
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/netns.h1
-rw-r--r--net/sunrpc/rpcb_clnt.c48
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/vmw_vsock/af_vsock.c2
37 files changed, 230 insertions, 79 deletions
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4de9f20..6ee48aac776f 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- return vlan_dev_priv(dev)->real_dev;
+ struct net_device *ret = vlan_dev_priv(dev)->real_dev;
+
+ while (is_vlan_dev(ret))
+ ret = vlan_dev_priv(ret)->real_dev;
+
+ return ret;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f1ce1c..264de88db320 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@ out:
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
+ *
+ * This call might reallocate skb data.
*/
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219f4a4b..7614af31daff 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -508,6 +508,7 @@ out:
return 0;
}
+/* this call might reallocate skb data */
static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
{
int ret = false;
@@ -568,6 +569,7 @@ out:
return ret;
}
+/* this call might reallocate skb data */
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
return false;
+
+ /* skb->data might have been reallocated by pskb_may_pull() */
+ ethhdr = (struct ethhdr *)skb->data;
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
udphdr = (struct udphdr *)(skb->data + *header_len);
*header_len += sizeof(*udphdr);
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
return true;
}
+/* this call might reallocate skb data */
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr)
+ struct sk_buff *skb)
{
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
struct batadv_gw_node *curr_gw = NULL;
+ struct ethhdr *ethhdr;
bool ret, out_of_range = false;
unsigned int header_len = 0;
uint8_t curr_tq_avg;
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
if (!ret)
goto out;
+ ethhdr = (struct ethhdr *)skb->data;
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest);
if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902dca4a6..1037d75da51f 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
void batadv_gw_node_purge(struct batadv_priv *bat_priv);
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b49742d..0f04e1c302b4 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
+ /* skb->data might have been reallocated by batadv_bla_tx() */
+ ethhdr = (struct ethhdr *)skb->data;
+
/* Register the client MAC in the transtable */
if (!is_multicast_ether_addr(ethhdr->h_source))
batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb,
default:
break;
}
+
+ /* reminder: ethhdr might have become unusable from here on
+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
+ */
}
/* ethernet packet should be broadcasted */
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
/* unicast packet */
} else {
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
+ ret = batadv_gw_out_of_range(bat_priv, skb);
if (ret)
goto dropped;
}
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4dd636..688a0419756b 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
struct batadv_orig_node *orig_node)
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
* @orig_node: the destination node
* @packet_subtype: the batman 4addr packet subtype to use
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb,
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh_node;
int data_len = skb->len;
int ret = NET_RX_DROP;
- unsigned int dev_mtu;
+ unsigned int dev_mtu, header_len;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -429,10 +433,12 @@ find_router:
switch (packet_type) {
case BATADV_UNICAST:
batadv_unicast_prepare_skb(skb, orig_node);
+ header_len = sizeof(struct batadv_unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
packet_subtype);
+ header_len = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +447,7 @@ find_router:
goto out;
}
+ ethhdr = (struct ethhdr *)(skb->data + header_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 61c5e819380e..08e576ada0b2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
- } else if (skb->len >= sizeof(*mld2q)) {
+ } else {
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
err = -EINVAL;
goto out;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96b6087..3b9637fb7939 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
/*
- * Sysfs attributes of bridge ports
+ * Sysfs attributes of bridge
* Linux ethernet bridge
*
* Authors:
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index ade9ff1c1980..159737cac76c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@ ipv6:
nhoff += sizeof(struct ipv6hdr);
break;
}
+ case __constant_htons(ETH_P_8021AD):
case __constant_htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 242084edb658..2a0e21de3060 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2179,7 +2179,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
/* If aging addresses are supported device will need to
* implement its own handler for this.
*/
- if (ndm->ndm_state & NUD_PERMANENT) {
+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n", dev->name);
return -EINVAL;
}
@@ -2407,7 +2407,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
struct nlattr *extfilt;
u32 filter_mask = 0;
- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
IFLA_EXT_MASK);
if (extfilt)
filter_mask = nla_get_u32(extfilt);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814bc80a..109ee89f123e 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
}
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 108a1e9c9eac..3df6d3edb2a1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
if (!c)
continue;
- if (IS_LEAF(c)) {
- prefetch(rcu_dereference_rtnl(p->child[idx]));
+ if (IS_LEAF(c))
return (struct leaf *) c;
- }
/* Rescan start scanning in new node */
p = (struct tnode *) c;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index bc3a76521deb..d7aea4c5b940 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
if (daddr)
memcpy(&iph->daddr, daddr, 4);
if (iph->daddr)
- return t->hlen;
+ return t->hlen + sizeof(*iph);
return -(t->hlen + sizeof(*iph));
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08977df..850525b34899 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
iph->daddr = dst;
iph->saddr = src;
iph->ttl = ttl;
- tunnel_ip_select_ident(skb,
- (const struct iphdr *)skb_inner_network_header(skb),
- &rt->dst);
+ __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
err = ip_local_out(skb);
if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 5f5fa612647f..4a0335854b89 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -278,7 +278,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
- SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
+ SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f441cb2..b6ae92a51f58 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
*/
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
- u64 offs;
- u32 delta, t, bic_target, max_cnt;
+ u32 delta, bic_target, max_cnt;
+ u64 offs, t;
ca->ack_cnt++; /* count the number of ACKs */
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
* if the cwnd < 1 million packets !!!
*/
+ t = (s32)(tcp_time_stamp - ca->epoch_start);
+ t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
- t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
- - ca->epoch_start) << BICTCP_HZ) / HZ;
+ t <<= BICTCP_HZ;
+ do_div(t, HZ);
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
return;
/* Discard delay samples right after fast recovery */
- if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
return;
delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72243a4..aeac0dc3635d 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
net_adj = 0;
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ed828d6f37b2..73db48eba1c4 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
- if (fn->subtree)
- fn = fib6_lookup_1(fn->subtree, args + 1);
+ if (fn->subtree) {
+ struct fib6_node *sfn;
+ sfn = fib6_lookup_1(fn->subtree,
+ args + 1);
+ if (!sfn)
+ goto backtrack;
+ fn = sfn;
+ }
#endif
- if (!fn || fn->fn_flags & RTN_RTINFO)
+ if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
if (fn->fn_flags & RTN_ROOT)
break;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376eea5f..2f8010707d01 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
s16 receiver_offset;
- bool res;
+ bool res, in_recv_win;
/*
* Get the required data from the packet.
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
receiver->td_scale);
+ /* Is the ending sequence in the receive window (if available)? */
+ in_recv_win = !receiver->td_maxwin ||
+ after(end, sender->td_end - receiver->td_maxwin - 1);
+
pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
before(seq, sender->td_maxend + 1),
- after(end, sender->td_end - receiver->td_maxwin - 1),
+ (in_recv_win ? 1 : 0),
before(sack, receiver->td_end + 1),
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
if (before(seq, sender->td_maxend + 1) &&
- after(end, sender->td_end - receiver->td_maxwin - 1) &&
+ in_recv_win &&
before(sack, receiver->td_end + 1) &&
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
/*
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: %s ",
before(seq, sender->td_maxend + 1) ?
- after(end, sender->td_end - receiver->td_maxwin - 1) ?
+ in_recv_win ?
before(sack, receiver->td_end + 1) ?
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e9792e317..d92cc317bf8b 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(inst->group_num);
+ memset(&pmsg, 0, sizeof(pmsg));
pmsg.hw_protocol = skb->protocol;
pmsg.hook = hooknum;
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
if (indev && skb->dev &&
skb->mac_header != skb->network_header) {
struct nfulnl_msg_packet_hw phw;
- int len = dev_parse_header(skb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
phw.hw_addrlen = htons(len);
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea145ab3e..8a703c3dd318 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (indev && entskb->dev &&
entskb->mac_header != entskb->network_header) {
struct nfqnl_msg_packet_hw phw;
- int len = dev_parse_header(entskb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71646f0..6113cc7efffc 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
{
const struct xt_tcpmss_info *info = par->targinfo;
struct tcphdr *tcph;
- unsigned int tcplen, i;
+ int len, tcp_hdrlen;
+ unsigned int i;
__be16 oldval;
u16 newmss;
u8 *opt;
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb,
if (!skb_make_writable(skb, skb->len))
return -1;
- tcplen = skb->len - tcphoff;
+ len = skb->len - tcphoff;
+ if (len < (int)sizeof(struct tcphdr))
+ return -1;
+
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+ tcp_hdrlen = tcph->doff * 4;
- /* Header cannot be larger than the packet */
- if (tcplen < tcph->doff*4)
+ if (len < tcp_hdrlen)
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
newmss = info->mss;
opt = (u_int8_t *)tcph;
- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
- opt[i+1] == TCPOLEN_MSS) {
+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
u_int16_t oldmss;
oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
}
/* There is data after the header so the option can't be added
- without moving it, and doing so may make the SYN packet
- itself too large. Accept the packet unmodified instead. */
- if (tcplen > tcph->doff*4)
+ * without moving it, and doing so may make the SYN packet
+ * itself too large. Accept the packet unmodified instead.
+ */
+ if (len > tcp_hdrlen)
return 0;
/*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
newmss = min(newmss, (u16)1220);
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
inet_proto_csum_replace2(&tcph->check, skb,
- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
+ htons(len), htons(len + TCPOLEN_MSS), 1);
opt[0] = TCPOPT_MSS;
opt[1] = TCPOLEN_MSS;
opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710f..625fa1d636a0 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
struct tcphdr *tcph;
u_int16_t n, o;
u_int8_t *opt;
- int len;
+ int len, tcp_hdrlen;
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
return NF_DROP;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
- if (tcph->doff * 4 > len)
+ tcp_hdrlen = tcph->doff * 4;
+
+ if (len < tcp_hdrlen)
return NF_DROP;
opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
* Walk through all TCP options - if we find some option to remove,
* set all octets to %TCPOPT_NOP and adjust checksum.
*/
- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
optl = optlen(opt, i);
- if (i + optl > tcp_hdrlen(skb))
+ if (i + optl > tcp_hdrlen)
break;
if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 512718adb0d5..f85f8a2ad6cf 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
struct net *net = sock_net(skb->sk);
int chains_to_skip = cb->args[0];
int fams_to_skip = cb->args[1];
+ bool need_locking = chains_to_skip || fams_to_skip;
+
+ if (need_locking)
+ genl_lock();
for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) {
n = 0;
@@ -810,6 +814,9 @@ errout:
cb->args[0] = i;
cb->args[1] = n;
+ if (need_locking)
+ genl_unlock();
+
return skb->len;
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f399f1cf..ab101f715447 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
{
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+ OVS_CB(skb)->tun_key = NULL;
return do_execute_actions(dp, skb, acts->actions,
acts->actions_len, false);
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d84c40..f2ed7600084e 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
return 0;
- rtnl_unlock();
- return 0;
-
exit_free:
kfree_skb(reply);
exit_unlock:
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b121e1b..1aa84dc58777 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
struct flex_array *buckets;
int i, err;
- buckets = flex_array_alloc(sizeof(struct hlist_head *),
+ buckets = flex_array_alloc(sizeof(struct hlist_head),
n_buckets, GFP_KERNEL);
if (!buckets)
return NULL;
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f6..51b968d3febb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
return q;
}
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value. The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell. If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+ int low = roundup(r->mpu, 48);
+ int high = roundup(low+1, 48);
+ int cell_low = low >> r->cell_log;
+ int cell_high = (high >> r->cell_log) - 1;
+
+ /* rtab is too inaccurate at rates > 100Mbit/s */
+ if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+ pr_debug("TC linklayer: Giving up ATM detection\n");
+ return TC_LINKLAYER_ETHERNET;
+ }
+
+ if ((cell_high > cell_low) && (cell_high < 256)
+ && (rtab[cell_low] == rtab[cell_high])) {
+ pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+ cell_low, cell_high, rtab[cell_high]);
+ return TC_LINKLAYER_ATM;
+ }
+ return TC_LINKLAYER_ETHERNET;
+}
+
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
+ if (r->linklayer == TC_LINKLAYER_UNAWARE)
+ r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76e..48be3d5c0d92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
unsigned long dev_trans_start(struct net_device *dev)
{
- unsigned long val, res = dev->trans_start;
+ unsigned long val, res;
unsigned int i;
+ if (is_vlan_dev(dev))
+ dev = vlan_dev_real_dev(dev);
+ res = dev->trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
dev->trans_start = res;
+
return res;
}
EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = conf->rate;
+ r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
* The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 45e751527dfc..c2178b15ca6e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct nlattr *opt = tca[TCA_OPTIONS];
+ struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_opt *hopt;
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!hopt->rate.rate || !hopt->ceil.rate)
goto failure;
+ /* Keeping backward compatible with rate_table based iproute2 tc */
+ if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ }
+ if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+ if (ctab)
+ qdisc_put_rtab(ctab);
+ }
+
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index e051920816e6..cef509985192 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -840,12 +840,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
else
spc_state = SCTP_ADDR_AVAILABLE;
/* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1, see SCTP
+ * active state and set cwnd to 1 MTU, see SCTP
* Quick failover draft section 5.1, point 5
*/
if (transport->state == SCTP_PF) {
ulp_notify = false;
- transport->cwnd = 1;
+ transport->cwnd = asoc->pathmtu;
}
transport->state = SCTP_ACTIVE;
break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 6836ead5c957..e332efb124cc 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -175,12 +175,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
return;
}
- call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
-
sctp_packet_free(&transport->packet);
if (transport->asoc)
sctp_association_put(transport->asoc);
+
+ call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
}
/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 74f6a704e374..ecbc4e3d83ad 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task)
task->tk_action = call_connect_status;
if (task->tk_status < 0)
return;
+ if (task->tk_flags & RPC_TASK_NOCONNECT) {
+ rpc_exit(task, -ENOTCONN);
+ return;
+ }
xprt_connect(task);
}
}
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 74d948f5d5a1..779742cfc1ff 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -23,6 +23,7 @@ struct sunrpc_net {
struct rpc_clnt *rpcb_local_clnt4;
spinlock_t rpcb_clnt_lock;
unsigned int rpcb_users;
+ unsigned int rpcb_is_af_local : 1;
struct mutex gssp_lock;
wait_queue_head_t gssp_wq;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3df764dc330c..1891a1022c17 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
}
static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
- struct rpc_clnt *clnt4)
+ struct rpc_clnt *clnt4,
+ bool is_af_local)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
/* Protected by rpcb_create_local_mutex */
sn->rpcb_local_clnt = clnt;
sn->rpcb_local_clnt4 = clnt4;
+ sn->rpcb_is_af_local = is_af_local ? 1 : 0;
smp_wmb();
sn->rpcb_users = 1;
dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net)
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_NULL,
+ /*
+ * We turn off the idle timeout to prevent the kernel
+ * from automatically disconnecting the socket.
+ * Otherwise, we'd have to cache the mount namespace
+ * of the caller and somehow pass that to the socket
+ * reconnect code.
+ */
+ .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
@@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net)
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, true);
out:
return result;
@@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net)
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, false);
out:
return result;
@@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
return rpc_create(&args);
}
-static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
+static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
{
- int result, error = 0;
+ int flags = RPC_TASK_NOCONNECT;
+ int error, result = 0;
+ if (is_set || !sn->rpcb_is_af_local)
+ flags = RPC_TASK_SOFTCONN;
msg->rpc_resp = &result;
- error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
+ error = rpc_call_sync(clnt, msg, flags);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
@@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short
.rpc_argp = &map,
};
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ bool is_set = false;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
prog, vers, prot, port);
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
+ is_set = true;
+ }
- return rpcb_register_call(sn->rpcb_local_clnt, &msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
}
/*
@@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin->sin_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin6->sin6_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- return rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
}
/**
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7ba2f0..609c30c80816 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
{
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
+ struct tipc_link_req *temp_req;
pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
- if (b_ptr->link_req)
- tipc_disc_delete(b_ptr->link_req);
+ temp_req = b_ptr->link_req;
+ b_ptr->link_req = NULL;
spin_unlock_bh(&b_ptr->lock);
+
+ if (temp_req)
+ tipc_disc_delete(temp_req);
+
memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 57896eee1c25..545c08b8a1d4 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -346,7 +346,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
- connected_table);
+ connected_table)
fn(sk_vsock(vsk));
}