summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/fc.c2
-rw-r--r--net/802/fddi.c12
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/802/tr.c2
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/8021q/vlan_core.c32
-rw-r--r--net/8021q/vlan_dev.c11
-rw-r--r--net/9p/client.c4
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/9p/trans_virtio.c3
-rw-r--r--net/Kconfig3
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/br2684.c12
-rw-r--r--net/atm/clip.c4
-rw-r--r--net/atm/common.c2
-rw-r--r--net/atm/lec.c1
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_ds_timer.c2
-rw-r--r--net/ax25/ax25_route.c4
-rw-r--r--net/bluetooth/af_bluetooth.c5
-rw-r--r--net/bluetooth/rfcomm/core.c4
-rw-r--r--net/bridge/br_if.c29
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_netfilter.c111
-rw-r--r--net/caif/caif_dev.c24
-rw-r--r--net/caif/caif_socket.c27
-rw-r--r--net/caif/cfcnfg.c49
-rw-r--r--net/caif/cfctrl.c59
-rw-r--r--net/caif/cfdbgl.c4
-rw-r--r--net/caif/cfdgml.c11
-rw-r--r--net/caif/cffrml.c14
-rw-r--r--net/caif/cfmuxl.c14
-rw-r--r--net/caif/cfpkt_skbuff.c50
-rw-r--r--net/caif/cfrfml.c14
-rw-r--r--net/caif/cfserl.c4
-rw-r--r--net/caif/cfsrvl.c17
-rw-r--r--net/caif/cfutill.c12
-rw-r--r--net/caif/cfveil.c11
-rw-r--r--net/caif/cfvidl.c6
-rw-r--r--net/caif/chnl_net.c47
-rw-r--r--net/can/bcm.c41
-rw-r--r--net/can/raw.c4
-rw-r--r--net/core/datagram.c5
-rw-r--r--net/core/dev.c407
-rw-r--r--net/core/ethtool.c88
-rw-r--r--net/core/fib_rules.c15
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/flow.c82
-rw-r--r--net/core/gen_estimator.c16
-rw-r--r--net/core/iovec.c11
-rw-r--r--net/core/neighbour.c359
-rw-r--r--net/core/net-sysfs.c34
-rw-r--r--net/core/net-sysfs.h4
-rw-r--r--net/core/pktgen.c12
-rw-r--r--net/core/rtnetlink.c31
-rw-r--r--net/core/skbuff.c98
-rw-r--r--net/core/sock.c12
-rw-r--r--net/core/stream.c8
-rw-r--r--net/core/utils.c15
-rw-r--r--net/dccp/ccid.h46
-rw-r--r--net/dccp/ccids/Kconfig31
-rw-r--r--net/dccp/ccids/ccid2.c287
-rw-r--r--net/dccp/ccids/ccid2.h35
-rw-r--r--net/dccp/ccids/ccid3.c253
-rw-r--r--net/dccp/ccids/ccid3.h51
-rw-r--r--net/dccp/ccids/lib/loss_interval.c2
-rw-r--r--net/dccp/ccids/lib/packet_history.c39
-rw-r--r--net/dccp/ccids/lib/packet_history.h22
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c14
-rw-r--r--net/dccp/options.c25
-rw-r--r--net/decnet/dn_neigh.c13
-rw-r--r--net/decnet/dn_nsp_out.c8
-rw-r--r--net/dns_resolver/Kconfig27
-rw-r--r--net/dns_resolver/Makefile7
-rw-r--r--net/dns_resolver/dns_key.c293
-rw-r--r--net/dns_resolver/dns_query.c165
-rw-r--r--net/dns_resolver/internal.h44
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/econet/af_econet.c6
-rw-r--r--net/ethernet/eth.c8
-rw-r--r--net/ipv4/Kconfig11
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/arp.c239
-rw-r--r--net/ipv4/datagram.c7
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_frontend.c196
-rw-r--r--net/ipv4/fib_hash.c5
-rw-r--r--net/ipv4/fib_lookup.h2
-rw-r--r--net/ipv4/fib_rules.c13
-rw-r--r--net/ipv4/fib_semantics.c276
-rw-r--r--net/ipv4/fib_trie.c61
-rw-r--r--net/ipv4/gre.c151
-rw-r--r--net/ipv4/icmp.c4
-rw-r--r--net/ipv4/igmp.c28
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c241
-rw-r--r--net/ipv4/ip_options.c3
-rw-r--r--net/ipv4/ip_output.c43
-rw-r--r--net/ipv4/ip_sockglue.c3
-rw-r--r--net/ipv4/ipip.c212
-rw-r--r--net/ipv4/ipmr.c428
-rw-r--r--net/ipv4/netfilter/arp_tables.c7
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c31
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c1
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c6
-rw-r--r--net/ipv4/protocol.c31
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c139
-rw-r--r--net/ipv4/tcp.c52
-rw-r--r--net/ipv4/tcp_cong.c5
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_minisocks.c2
-rw-r--r--net/ipv4/tcp_output.c31
-rw-r--r--net/ipv4/tcp_timer.c55
-rw-r--r--net/ipv4/tcp_westwood.c2
-rw-r--r--net/ipv4/tunnel4.c19
-rw-r--r--net/ipv4/udp.c48
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv4/xfrm4_state.c33
-rw-r--r--net/ipv4/xfrm4_tunnel.c4
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/addrlabel.c10
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/datagram.c7
-rw-r--r--net/ipv6/exthdrs_core.c4
-rw-r--r--net/ipv6/ip6_output.c24
-rw-r--r--net/ipv6/ip6_tunnel.c157
-rw-r--r--net/ipv6/ip6mr.c1
-rw-r--r--net/ipv6/ndisc.c36
-rw-r--r--net/ipv6/netfilter/ip6_tables.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c82
-rw-r--r--net/ipv6/protocol.c32
-rw-r--r--net/ipv6/raw.c12
-rw-r--r--net/ipv6/reassembly.c73
-rw-r--r--net/ipv6/route.c58
-rw-r--r--net/ipv6/sit.c165
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/tunnel6.c17
-rw-r--r--net/ipv6/udp.c10
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_state.c33
-rw-r--r--net/ipv6/xfrm6_tunnel.c4
-rw-r--r--net/irda/af_irda.c18
-rw-r--r--net/irda/discovery.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c4
-rw-r--r--net/irda/irlan/irlan_common.c2
-rw-r--r--net/irda/irlan/irlan_eth.c34
-rw-r--r--net/irda/irlan/irlan_event.c2
-rw-r--r--net/irda/irlmp.c2
-rw-r--r--net/irda/irlmp_frame.c2
-rw-r--r--net/irda/irnet/irnet_irda.c22
-rw-r--r--net/irda/irnet/irnet_ppp.c8
-rw-r--r--net/irda/irnet/irnet_ppp.h3
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_eth.c3
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/llc/af_llc.c3
-rw-r--r--net/llc/llc_station.c2
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/rate.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c22
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c18
-rw-r--r--net/netfilter/nf_conntrack_extend.c4
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_tproxy_core.c6
-rw-r--r--net/netfilter/xt_hashlimit.c15
-rw-r--r--net/netlink/af_netlink.c78
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/phonet/Kconfig11
-rw-r--r--net/phonet/af_phonet.c17
-rw-r--r--net/phonet/datagram.c13
-rw-r--r--net/phonet/pep.c480
-rw-r--r--net/phonet/pn_dev.c5
-rw-r--r--net/phonet/socket.c190
-rw-r--r--net/rds/af_rds.c26
-rw-r--r--net/rds/bind.c82
-rw-r--r--net/rds/cong.c8
-rw-r--r--net/rds/connection.c157
-rw-r--r--net/rds/ib.c194
-rw-r--r--net/rds/ib.h100
-rw-r--r--net/rds/ib_cm.c184
-rw-r--r--net/rds/ib_rdma.c318
-rw-r--r--net/rds/ib_recv.c549
-rw-r--r--net/rds/ib_send.c682
-rw-r--r--net/rds/ib_stats.c2
-rw-r--r--net/rds/ib_sysctl.c17
-rw-r--r--net/rds/info.c12
-rw-r--r--net/rds/iw.c4
-rw-r--r--net/rds/iw.h11
-rw-r--r--net/rds/iw_cm.c14
-rw-r--r--net/rds/iw_rdma.c5
-rw-r--r--net/rds/iw_recv.c24
-rw-r--r--net/rds/iw_send.c93
-rw-r--r--net/rds/iw_sysctl.c4
-rw-r--r--net/rds/loop.c31
-rw-r--r--net/rds/message.c118
-rw-r--r--net/rds/page.c5
-rw-r--r--net/rds/rdma.c339
-rw-r--r--net/rds/rdma.h85
-rw-r--r--net/rds/rdma_transport.c42
-rw-r--r--net/rds/rds.h187
-rw-r--r--net/rds/recv.c11
-rw-r--r--net/rds/send.c544
-rw-r--r--net/rds/stats.c6
-rw-r--r--net/rds/sysctl.c4
-rw-r--r--net/rds/tcp.c8
-rw-r--r--net/rds/tcp.h9
-rw-r--r--net/rds/tcp_connect.c6
-rw-r--r--net/rds/tcp_listen.c10
-rw-r--r--net/rds/tcp_recv.c18
-rw-r--r--net/rds/tcp_send.c70
-rw-r--r--net/rds/threads.c69
-rw-r--r--net/rds/transport.c19
-rw-r--r--net/rds/xlist.h80
-rw-r--r--net/rfkill/input.c2
-rw-r--r--net/rose/af_rose.c4
-rw-r--r--net/rose/rose_link.c4
-rw-r--r--net/rxrpc/ar-internal.h16
-rw-r--r--net/sched/Kconfig10
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_csum.c595
-rw-r--r--net/sched/act_gact.c21
-rw-r--r--net/sched/act_mirred.c15
-rw-r--r--net/sched/act_nat.c22
-rw-r--r--net/sched/act_police.c21
-rw-r--r--net/sched/act_simple.c11
-rw-r--r--net/sched/act_skbedit.c11
-rw-r--r--net/sched/cls_flow.c74
-rw-r--r--net/sched/em_meta.c6
-rw-r--r--net/sched/sch_api.c66
-rw-r--r--net/sched/sch_atm.c8
-rw-r--r--net/sched/sch_generic.c12
-rw-r--r--net/sched/sch_hfsc.c2
-rw-r--r--net/sched/sch_sfq.c47
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/sched/sch_teql.c2
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/auth.c8
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/objcnt.c5
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/sctp/outqueue.c34
-rw-r--r--net/sctp/probe.c4
-rw-r--r--net/sctp/protocol.c19
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c21
-rw-r--r--net/sctp/sm_statefuns.c66
-rw-r--r--net/sctp/sm_statetable.c42
-rw-r--r--net/sctp/socket.c98
-rw-r--r--net/sctp/transport.c9
-rw-r--r--net/socket.c30
-rw-r--r--net/sunrpc/Kconfig9
-rw-r--r--net/sunrpc/auth.c162
-rw-r--r--net/sunrpc/auth_generic.c23
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c31
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c44
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c2
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/auth_null.c2
-rw-r--r--net/sunrpc/auth_unix.c21
-rw-r--r--net/sunrpc/cache.c8
-rw-r--r--net/sunrpc/clnt.c182
-rw-r--r--net/sunrpc/sched.c106
-rw-r--r--net/sunrpc/sunrpc_syms.c16
-rw-r--r--net/sunrpc/xprt.c3
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c22
-rw-r--r--net/sunrpc/xprtsock.c82
-rw-r--r--net/tipc/addr.c2
-rw-r--r--net/tipc/bcast.c41
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/core.c6
-rw-r--r--net/tipc/dbg.c4
-rw-r--r--net/tipc/discover.c8
-rw-r--r--net/tipc/eth_media.c48
-rw-r--r--net/tipc/link.c31
-rw-r--r--net/tipc/link.h16
-rw-r--r--net/tipc/msg.h6
-rw-r--r--net/tipc/name_table.c50
-rw-r--r--net/tipc/net.c1
-rw-r--r--net/tipc/node.c28
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/port.c19
-rw-r--r--net/tipc/port.h2
-rw-r--r--net/tipc/socket.c83
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/unix/af_unix.c25
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/x25/af_x25.c34
-rw-r--r--net/xfrm/xfrm_output.c2
-rw-r--r--net/xfrm/xfrm_policy.c5
-rw-r--r--net/xfrm/xfrm_state.c45
-rw-r--r--net/xfrm/xfrm_user.c4
305 files changed, 8942 insertions, 5222 deletions
diff --git a/net/802/fc.c b/net/802/fc.c
index 34cf1ee014b8..1e49f2d4ea96 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -70,7 +70,7 @@ static int fc_header(struct sk_buff *skb, struct net_device *dev,
if(daddr)
{
memcpy(fch->daddr,daddr,dev->addr_len);
- return(hdr_len);
+ return hdr_len;
}
return -hdr_len;
}
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 3ef0ab0a543a..94b3ad08f39a 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -82,10 +82,10 @@ static int fddi_header(struct sk_buff *skb, struct net_device *dev,
if (daddr != NULL)
{
memcpy(fddi->daddr, daddr, dev->addr_len);
- return(hl);
+ return hl;
}
- return(-hl);
+ return -hl;
}
@@ -108,7 +108,7 @@ static int fddi_rebuild_header(struct sk_buff *skb)
{
printk("%s: Don't know how to resolve type %04X addresses.\n",
skb->dev->name, ntohs(fddi->hdr.llc_snap.ethertype));
- return(0);
+ return 0;
}
}
@@ -162,7 +162,7 @@ __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev)
/* Assume 802.2 SNAP frames, for now */
- return(type);
+ return type;
}
EXPORT_SYMBOL(fddi_type_trans);
@@ -170,9 +170,9 @@ EXPORT_SYMBOL(fddi_type_trans);
int fddi_change_mtu(struct net_device *dev, int new_mtu)
{
if ((new_mtu < FDDI_K_SNAP_HLEN) || (new_mtu > FDDI_K_SNAP_DLEN))
- return(-EINVAL);
+ return -EINVAL;
dev->mtu = new_mtu;
- return(0);
+ return 0;
}
EXPORT_SYMBOL(fddi_change_mtu);
diff --git a/net/802/hippi.c b/net/802/hippi.c
index cd3e8e929529..91aca8780fd0 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -152,7 +152,7 @@ int hippi_change_mtu(struct net_device *dev, int new_mtu)
if ((new_mtu < 68) || (new_mtu > 65280))
return -EINVAL;
dev->mtu = new_mtu;
- return(0);
+ return 0;
}
EXPORT_SYMBOL(hippi_change_mtu);
diff --git a/net/802/tr.c b/net/802/tr.c
index 1c6e596074df..5e20cf8a074b 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -145,7 +145,7 @@ static int tr_header(struct sk_buff *skb, struct net_device *dev,
{
memcpy(trh->daddr,daddr,dev->addr_len);
tr_source_route(skb, trh, dev);
- return(hdr_len);
+ return hdr_len;
}
return -hdr_len;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a2ad15250575..25c21332e9c3 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -321,7 +321,7 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
if (new_dev == NULL)
return -ENOBUFS;
- new_dev->real_num_tx_queues = real_dev->real_num_tx_queues;
+ netif_copy_real_num_queues(new_dev, real_dev);
dev_net_set(new_dev, net);
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
@@ -525,6 +525,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
break;
case NETDEV_UNREGISTER:
+ /* twiddle thumbs on netns device moves */
+ if (dev->reg_state != NETREG_UNREGISTERING)
+ break;
+
/* Delete all VLANs for this dev. */
grp->killall = 1;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 01ddb0472f86..dee727ce0291 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -24,23 +24,27 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
if (vlan_dev)
skb->dev = vlan_dev;
- else if (vlan_id)
- goto drop;
+ else if (vlan_id) {
+ if (!(skb->dev->flags & IFF_PROMISC))
+ goto drop;
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
- return (polling ? netif_receive_skb(skb) : netif_rx(skb));
+ return polling ? netif_receive_skb(skb) : netif_rx(skb);
drop:
+ atomic_long_inc(&skb->dev->rx_dropped);
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
EXPORT_SYMBOL(__vlan_hwaccel_rx);
-int vlan_hwaccel_do_receive(struct sk_buff *skb)
+void vlan_hwaccel_do_receive(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct vlan_rx_stats *rx_stats;
- skb->dev = vlan_dev_info(dev)->real_dev;
+ skb->dev = vlan_dev_real_dev(dev);
netif_nit_deliver(skb);
skb->dev = dev;
@@ -69,7 +73,6 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
break;
}
u64_stats_update_end(&rx_stats->syncp);
- return 0;
}
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
@@ -102,19 +105,26 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
if (vlan_dev)
skb->dev = vlan_dev;
- else if (vlan_id)
- goto drop;
+ else if (vlan_id) {
+ if (!(skb->dev->flags & IFF_PROMISC))
+ goto drop;
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
for (p = napi->gro_list; p; p = p->next) {
- NAPI_GRO_CB(p)->same_flow =
- p->dev == skb->dev && !compare_ether_header(
- skb_mac_header(p), skb_gro_mac_header(skb));
+ unsigned long diffs;
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= compare_ether_header(skb_mac_header(p),
+ skb_gro_mac_header(skb));
+ NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0;
}
return dev_gro_receive(napi, skb);
drop:
+ atomic_long_inc(&skb->dev->rx_dropped);
return GRO_DROP;
}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3d59c9bf8feb..f54251edd40d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -177,8 +177,8 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
} else {
skb->dev = vlan_dev;
- rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
- smp_processor_id());
+ rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats);
+
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->rx_packets++;
rx_stats->rx_bytes += skb->len;
@@ -226,12 +226,14 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
}
netif_rx(skb);
+
rcu_read_unlock();
return NET_RX_SUCCESS;
err_unlock:
rcu_read_unlock();
err_free:
+ atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -510,7 +512,8 @@ static int vlan_dev_open(struct net_device *dev)
if (vlan->flags & VLAN_FLAG_GVRP)
vlan_gvrp_request_join(dev);
- netif_carrier_on(dev);
+ if (netif_carrier_ok(real_dev))
+ netif_carrier_on(dev);
return 0;
clear_allmulti:
@@ -842,7 +845,7 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
accum.rx_packets += rxpackets;
accum.rx_bytes += rxbytes;
accum.rx_multicast += rxmulticast;
- /* rx_errors is an ulong, not protected by syncp */
+ /* rx_errors is ulong, not protected by syncp */
accum.rx_errors += p->rx_errors;
}
stats->rx_packets = accum.rx_packets;
diff --git a/net/9p/client.c b/net/9p/client.c
index dc6f2f26d023..f34b9f510818 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -61,13 +61,13 @@ static const match_table_t tokens = {
inline int p9_is_proto_dotl(struct p9_client *clnt)
{
- return (clnt->proto_version == p9_proto_2000L);
+ return clnt->proto_version == p9_proto_2000L;
}
EXPORT_SYMBOL(p9_is_proto_dotl);
inline int p9_is_proto_dotu(struct p9_client *clnt)
{
- return (clnt->proto_version == p9_proto_2000u);
+ return clnt->proto_version == p9_proto_2000u;
}
EXPORT_SYMBOL(p9_is_proto_dotu);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index c85109d809ca..078eb162d9bf 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -222,7 +222,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
}
}
-static unsigned int
+static int
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
{
int ret, n;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index dcfbe99ff81c..b88515936e4b 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -329,7 +329,8 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
- if (!strncmp(devname, chan->tag, chan->tag_len)) {
+ if (!strncmp(devname, chan->tag, chan->tag_len) &&
+ strlen(devname) == chan->tag_len) {
if (!chan->inuse) {
chan->inuse = true;
found = 1;
diff --git a/net/Kconfig b/net/Kconfig
index e24fa0873f32..e926884c1675 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -213,10 +213,11 @@ source "net/phonet/Kconfig"
source "net/ieee802154/Kconfig"
source "net/sched/Kconfig"
source "net/dcb/Kconfig"
+source "net/dns_resolver/Kconfig"
config RPS
boolean
- depends on SMP && SYSFS
+ depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
default y
menu "Network testing"
diff --git a/net/Makefile b/net/Makefile
index 41d420070a38..ea60fbce9b1b 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -67,3 +67,4 @@ ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
endif
obj-$(CONFIG_WIMAX) += wimax/
+obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 651babdfab38..ad2b232a2055 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -399,12 +399,6 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
unregister_netdev(net_dev);
free_netdev(net_dev);
}
- read_lock_irq(&devs_lock);
- if (list_empty(&br2684_devs)) {
- /* last br2684 device */
- unregister_atmdevice_notifier(&atm_dev_notifier);
- }
- read_unlock_irq(&devs_lock);
return;
}
@@ -675,7 +669,6 @@ static int br2684_create(void __user *arg)
if (list_empty(&br2684_devs)) {
/* 1st br2684 device */
- register_atmdevice_notifier(&atm_dev_notifier);
brdev->number = 1;
} else
brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
@@ -815,6 +808,7 @@ static int __init br2684_init(void)
return -ENOMEM;
#endif
register_atm_ioctl(&br2684_ioctl_ops);
+ register_atmdevice_notifier(&atm_dev_notifier);
return 0;
}
@@ -830,9 +824,7 @@ static void __exit br2684_exit(void)
#endif
- /* if not already empty */
- if (!list_empty(&br2684_devs))
- unregister_atmdevice_notifier(&atm_dev_notifier);
+ unregister_atmdevice_notifier(&atm_dev_notifier);
while (!list_empty(&br2684_devs)) {
net_dev = list_entry_brdev(br2684_devs.next);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 95fdd1185067..ff956d1115bc 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -310,9 +310,9 @@ static int clip_constructor(struct neighbour *neigh)
return 0;
}
-static u32 clip_hash(const void *pkey, const struct net_device *dev)
+static u32 clip_hash(const void *pkey, const struct net_device *dev, __u32 rnd)
{
- return jhash_2words(*(u32 *) pkey, dev->ifindex, clip_tbl.hash_rnd);
+ return jhash_2words(*(u32 *) pkey, dev->ifindex, rnd);
}
static struct neigh_table clip_tbl = {
diff --git a/net/atm/common.c b/net/atm/common.c
index 940404a73b3d..1b9c52a02cd3 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -792,7 +792,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
default:
if (level == SOL_SOCKET)
return -EINVAL;
- break;
+ break;
}
if (!vcc->dev || !vcc->dev->ops->getsockopt)
return -EINVAL;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d98bde1a0ac8..181d70c73d70 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -220,7 +220,6 @@ static unsigned char *get_tr_dst(unsigned char *packet, unsigned char *rdesc)
static int lec_open(struct net_device *dev)
{
netif_start_queue(dev);
- memset(&dev->stats, 0, sizeof(struct net_device_stats));
return 0;
}
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index cfdfd7e2a172..26eaebf4aaa9 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1103,7 +1103,7 @@ done:
out:
release_sock(sk);
- return 0;
+ return err;
}
/*
diff --git a/net/ax25/ax25_ds_timer.c b/net/ax25/ax25_ds_timer.c
index 2ce79df00680..c7d81436213d 100644
--- a/net/ax25/ax25_ds_timer.c
+++ b/net/ax25/ax25_ds_timer.c
@@ -112,8 +112,8 @@ void ax25_ds_heartbeat_expiry(ax25_cb *ax25)
if (sk) {
sock_hold(sk);
ax25_destroy_socket(ax25);
- sock_put(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
} else
ax25_destroy_socket(ax25);
return;
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index 7805945a5fd6..a1690845dc6e 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -412,7 +412,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
{
ax25_uid_assoc *user;
ax25_route *ax25_rt;
- int err;
+ int err = 0;
if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
return -EHOSTUNREACH;
@@ -453,7 +453,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
put:
ax25_put_route(ax25_rt);
- return 0;
+ return err;
}
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 421c45bd1b95..ed0f22f57668 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -297,13 +297,12 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (sk->sk_state == BT_CLOSED)
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 7dca91bb8c57..15ea84ba344e 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -179,13 +179,13 @@ static unsigned char rfcomm_crc_table[256] = {
/* FCS on 2 bytes */
static inline u8 __fcs(u8 *data)
{
- return (0xff - __crc(data));
+ return 0xff - __crc(data);
}
/* FCS on 3 bytes */
static inline u8 __fcs2(u8 *data)
{
- return (0xff - rfcomm_crc_table[__crc(data) ^ data[2]]);
+ return 0xff - rfcomm_crc_table[__crc(data) ^ data[2]];
}
/* Check FCS */
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c03d2c3ff03e..89ad25a76202 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -61,30 +61,27 @@ static int port_cost(struct net_device *dev)
}
-/*
- * Check for port carrier transistions.
- * Called from work queue to allow for calling functions that
- * might sleep (such as speed check), and to debounce.
- */
+/* Check for port carrier transistions. */
void br_port_carrier_check(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
struct net_bridge *br = p->br;
- if (netif_carrier_ok(dev))
+ if (netif_running(dev) && netif_carrier_ok(dev))
p->path_cost = port_cost(dev);
- if (netif_running(br->dev)) {
- spin_lock_bh(&br->lock);
- if (netif_carrier_ok(dev)) {
- if (p->state == BR_STATE_DISABLED)
- br_stp_enable_port(p);
- } else {
- if (p->state != BR_STATE_DISABLED)
- br_stp_disable_port(p);
- }
- spin_unlock_bh(&br->lock);
+ if (!netif_running(br->dev))
+ return;
+
+ spin_lock_bh(&br->lock);
+ if (netif_running(dev) && netif_carrier_ok(dev)) {
+ if (p->state == BR_STATE_DISABLED)
+ br_stp_enable_port(p);
+ } else {
+ if (p->state != BR_STATE_DISABLED)
+ br_stp_disable_port(p);
}
+ spin_unlock_bh(&br->lock);
}
static void release_nbp(struct kobject *kobj)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 826cd5221536..6d04cfdf4541 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -141,7 +141,7 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
const unsigned char *dest = eth_hdr(skb)->h_dest;
int (*rhook)(struct sk_buff *skb);
- if (skb->pkt_type == PACKET_LOOPBACK)
+ if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
return skb;
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 2c911c0759c2..77f7b5fda45a 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -162,8 +162,8 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
if (tmp) {
memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
atomic_set(&tmp->use, 1);
- nf_bridge_put(nf_bridge);
}
+ nf_bridge_put(nf_bridge);
nf_bridge = tmp;
}
return nf_bridge;
@@ -209,6 +209,72 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
skb->protocol = htons(ETH_P_PPP_SES);
}
+/* When handing a packet over to the IP layer
+ * check whether we have a skb that is in the
+ * expected format
+ */
+
+int br_parse_ip_options(struct sk_buff *skb)
+{
+ struct ip_options *opt;
+ struct iphdr *iph;
+ struct net_device *dev = skb->dev;
+ u32 len;
+
+ iph = ip_hdr(skb);
+ opt = &(IPCB(skb)->opt);
+
+ /* Basic sanity checks */
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ goto inhdr_error;
+
+ iph = ip_hdr(skb);
+ if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
+ goto inhdr_error;
+
+ len = ntohs(iph->tot_len);
+ if (skb->len < len) {
+ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
+ goto drop;
+ } else if (len < (iph->ihl*4))
+ goto inhdr_error;
+
+ if (pskb_trim_rcsum(skb, len)) {
+ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
+ goto drop;
+ }
+
+ /* Zero out the CB buffer if no options present */
+ if (iph->ihl == 5) {
+ memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ return 0;
+ }
+
+ opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
+ if (ip_options_compile(dev_net(dev), opt, skb))
+ goto inhdr_error;
+
+ /* Check correct handling of SRR option */
+ if (unlikely(opt->srr)) {
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
+ if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
+ goto drop;
+
+ if (ip_options_rcv_srr(skb))
+ goto drop;
+ }
+
+ return 0;
+
+inhdr_error:
+ IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
+drop:
+ return -1;
+}
+
/* Fill in the header for fragmented IP packets handled by
* the IPv4 connection tracking code.
*/
@@ -549,7 +615,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
{
struct net_bridge_port *p;
struct net_bridge *br;
- struct iphdr *iph;
__u32 len = nf_bridge_encap_header_len(skb);
if (unlikely(!pskb_may_pull(skb, len)))
@@ -578,28 +643,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
nf_bridge_pull_encap_header_rcsum(skb);
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- goto inhdr_error;
-
- iph = ip_hdr(skb);
- if (iph->ihl < 5 || iph->version != 4)
- goto inhdr_error;
-
- if (!pskb_may_pull(skb, 4 * iph->ihl))
- goto inhdr_error;
-
- iph = ip_hdr(skb);
- if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
- goto inhdr_error;
-
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < 4 * iph->ihl)
- goto inhdr_error;
-
- pskb_trim_rcsum(skb, len);
-
- /* BUG: Should really parse the IP options here. */
- memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+ if (br_parse_ip_options(skb))
+ /* Drop invalid packet */
+ goto out;
nf_bridge_put(skb->nf_bridge);
if (!nf_bridge_alloc(skb))
@@ -614,8 +660,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
return NF_STOLEN;
-inhdr_error:
-// IP_INC_STATS_BH(IpInHdrErrors);
out:
return NF_DROP;
}
@@ -759,12 +803,19 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
{
+ int ret;
+
if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
- !skb_is_gso(skb))
- return ip_fragment(skb, br_dev_queue_push_xmit);
- else
- return br_dev_queue_push_xmit(skb);
+ !skb_is_gso(skb)) {
+ if (br_parse_ip_options(skb))
+ /* Drop invalid packet */
+ return NF_DROP;
+ ret = ip_fragment(skb, br_dev_queue_push_xmit);
+ } else
+ ret = br_dev_queue_push_xmit(skb);
+
+ return ret;
}
#else
static int br_nf_dev_queue_xmit(struct sk_buff *skb)
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 0b586e9d1378..b99369a055d1 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -9,6 +9,8 @@
* and Sakari Ailus <sakari.ailus@nokia.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -171,7 +173,7 @@ static int receive(struct sk_buff *skb, struct net_device *dev,
net = dev_net(dev);
pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
caifd = caif_get(dev);
- if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
return NET_RX_DROP;
if (caifd->layer.up->receive(caifd->layer.up, pkt))
@@ -214,7 +216,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
switch (what) {
case NETDEV_REGISTER:
- pr_info("CAIF: %s():register %s\n", __func__, dev->name);
+ netdev_info(dev, "register\n");
caifd = caif_device_alloc(dev);
if (caifd == NULL)
break;
@@ -225,14 +227,13 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
break;
case NETDEV_UP:
- pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
+ netdev_info(dev, "up\n");
caifd = caif_get(dev);
if (caifd == NULL)
break;
caifdev = netdev_priv(dev);
if (atomic_read(&caifd->state) == NETDEV_UP) {
- pr_info("CAIF: %s():%s already up\n",
- __func__, dev->name);
+ netdev_info(dev, "already up\n");
break;
}
atomic_set(&caifd->state, what);
@@ -273,7 +274,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
caifd = caif_get(dev);
if (caifd == NULL)
break;
- pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
+ netdev_info(dev, "going down\n");
if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
atomic_read(&caifd->state) == NETDEV_DOWN)
@@ -295,11 +296,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
caifd = caif_get(dev);
if (caifd == NULL)
break;
- pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
+ netdev_info(dev, "down\n");
if (atomic_read(&caifd->in_use))
- pr_warning("CAIF: %s(): "
- "Unregistering an active CAIF device: %s\n",
- __func__, dev->name);
+ netdev_warn(dev,
+ "Unregistering an active CAIF device\n");
cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
dev_put(dev);
atomic_set(&caifd->state, what);
@@ -307,7 +307,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
case NETDEV_UNREGISTER:
caifd = caif_get(dev);
- pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
+ netdev_info(dev, "unregister\n");
atomic_set(&caifd->state, what);
caif_device_destroy(dev);
break;
@@ -391,7 +391,7 @@ static int __init caif_device_init(void)
int result;
cfg = cfcnfg_create();
if (!cfg) {
- pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
+ pr_warn("can't create cfcnfg\n");
goto err_cfcnfg_create_failed;
}
result = register_pernet_device(&caif_net_ops);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8ce904786116..abcba5358b63 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -15,7 +17,6 @@
#include <linux/poll.h>
#include <linux/tcp.h>
#include <linux/uaccess.h>
-#include <linux/mutex.h>
#include <linux/debugfs.h>
#include <linux/caif/caif_socket.h>
#include <asm/atomic.h>
@@ -28,9 +29,6 @@
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(AF_CAIF);
-#define CAIF_DEF_SNDBUF (4096*10)
-#define CAIF_DEF_RCVBUF (4096*100)
-
/*
* CAIF state is re-using the TCP socket states.
* caif_states stored in sk_state reflect the state as reported by
@@ -157,9 +155,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
- trace_printk("CAIF: %s():"
- " sending flow OFF (queue len = %d %d)\n",
- __func__,
+ pr_debug("sending flow OFF (queue len = %d %d)\n",
atomic_read(&cf_sk->sk.sk_rmem_alloc),
sk_rcvbuf_lowwater(cf_sk));
set_rx_flow_off(cf_sk);
@@ -172,9 +168,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return err;
if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
set_rx_flow_off(cf_sk);
- trace_printk("CAIF: %s():"
- " sending flow OFF due to rmem_schedule\n",
- __func__);
+ pr_debug("sending flow OFF due to rmem_schedule\n");
dbfs_atomic_inc(&cnt.num_rx_flow_off);
caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
}
@@ -275,8 +269,7 @@ static void caif_ctrl_cb(struct cflayer *layr,
break;
default:
- pr_debug("CAIF: %s(): Unexpected flow command %d\n",
- __func__, flow);
+ pr_debug("Unexpected flow command %d\n", flow);
}
}
@@ -536,8 +529,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
/* Slight paranoia, probably not needed. */
if (unlikely(loopcnt++ > 1000)) {
- pr_warning("CAIF: %s(): transmit retries failed,"
- " error = %d\n", __func__, ret);
+ pr_warn("transmit retries failed, error = %d\n", ret);
break;
}
@@ -902,8 +894,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
cf_sk->maxframe = dev->mtu - (headroom + tailroom);
dev_put(dev);
if (cf_sk->maxframe < 1) {
- pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
- __func__, dev->mtu);
+ pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
err = -ENODEV;
goto out;
}
@@ -1123,10 +1114,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
/* Store the protocol */
sk->sk_protocol = (unsigned char) protocol;
- /* Sendbuf dictates the amount of outbound packets not yet sent */
- sk->sk_sndbuf = CAIF_DEF_SNDBUF;
- sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
-
/*
* Lock in order to try to stop someone from opening the socket
* too early.
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 1c29189b344d..41adafd18914 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -3,6 +3,9 @@
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/slab.h>
@@ -78,7 +81,7 @@ struct cfcnfg *cfcnfg_create(void)
/* Initiate this layer */
this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
this->mux = cfmuxl_create();
@@ -106,7 +109,7 @@ struct cfcnfg *cfcnfg_create(void)
layer_set_up(this->ctrl, this);
return this;
out_of_mem:
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
kfree(this->mux);
kfree(this->ctrl);
kfree(this);
@@ -194,7 +197,7 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
caif_assert(adap_layer != NULL);
channel_id = adap_layer->id;
if (adap_layer->dn == NULL || channel_id == 0) {
- pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
+ pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
ret = -ENOTCONN;
goto end;
}
@@ -204,9 +207,8 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
layer_set_up(servl, NULL);
ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
if (servl == NULL) {
- pr_err("CAIF: %s(): PROTOCOL ERROR "
- "- Error removing service_layer Channel_Id(%d)",
- __func__, channel_id);
+ pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)",
+ channel_id);
ret = -EINVAL;
goto end;
}
@@ -216,18 +218,14 @@ int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
if (phyinfo == NULL) {
- pr_warning("CAIF: %s(): "
- "No interface to send disconnect to\n",
- __func__);
+ pr_warn("No interface to send disconnect to\n");
ret = -ENODEV;
goto end;
}
if (phyinfo->id != phyid ||
phyinfo->phy_layer->id != phyid ||
phyinfo->frm_layer->id != phyid) {
- pr_err("CAIF: %s(): "
- "Inconsistency in phy registration\n",
- __func__);
+ pr_err("Inconsistency in phy registration\n");
ret = -EINVAL;
goto end;
}
@@ -276,21 +274,20 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
{
struct cflayer *frml;
if (adap_layer == NULL) {
- pr_err("CAIF: %s(): adap_layer is zero", __func__);
+ pr_err("adap_layer is zero\n");
return -EINVAL;
}
if (adap_layer->receive == NULL) {
- pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
+ pr_err("adap_layer->receive is NULL\n");
return -EINVAL;
}
if (adap_layer->ctrlcmd == NULL) {
- pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
+ pr_err("adap_layer->ctrlcmd == NULL\n");
return -EINVAL;
}
frml = cnfg->phy_layers[param->phyid].frm_layer;
if (frml == NULL) {
- pr_err("CAIF: %s(): Specified PHY type does not exist!",
- __func__);
+ pr_err("Specified PHY type does not exist!\n");
return -ENODEV;
}
caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
@@ -330,9 +327,7 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
struct net_device *netdev;
if (adapt_layer == NULL) {
- pr_debug("CAIF: %s(): link setup response "
- "but no client exist, send linkdown back\n",
- __func__);
+ pr_debug("link setup response but no client exist, send linkdown back\n");
cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
return;
}
@@ -374,13 +369,11 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
break;
default:
- pr_err("CAIF: %s(): Protocol error. "
- "Link setup response - unknown channel type\n",
- __func__);
+ pr_err("Protocol error. Link setup response - unknown channel type\n");
return;
}
if (!servicel) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
layer_set_dn(servicel, cnfg->mux);
@@ -418,7 +411,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
}
}
if (*phyid == 0) {
- pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
+ pr_err("No Available PHY ID\n");
return;
}
@@ -427,7 +420,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
phy_driver =
cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
if (!phy_driver) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
@@ -436,7 +429,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
phy_driver = NULL;
break;
default:
- pr_err("CAIF: %s(): %d", __func__, phy_type);
+ pr_err("%d\n", phy_type);
return;
break;
}
@@ -455,7 +448,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
phy_layer->type = phy_type;
frml = cffrml_create(*phyid, fcs);
if (!frml) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cnfg->phy_layers[*phyid].frm_layer = frml;
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 563145fdc4c3..08f267a109aa 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -36,7 +38,7 @@ struct cflayer *cfctrl_create(void)
struct cfctrl *this =
kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -132,9 +134,7 @@ struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
if (cfctrl_req_eq(req, p)) {
if (p != first)
- pr_warning("CAIF: %s(): Requests are not "
- "received in order\n",
- __func__);
+ pr_warn("Requests are not received in order\n");
atomic_set(&ctrl->rsp_seq_no,
p->sequence_no);
@@ -177,7 +177,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
int ret;
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
@@ -189,8 +189,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
ret =
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
if (ret < 0) {
- pr_err("CAIF: %s(): Could not transmit enum message\n",
- __func__);
+ pr_err("Could not transmit enum message\n");
cfpkt_destroy(pkt);
}
}
@@ -208,7 +207,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
char utility_name[16];
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
@@ -253,13 +252,13 @@ int cfctrl_linkup_request(struct cflayer *layer,
param->u.utility.paramlen);
break;
default:
- pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
- __func__, param->linktype);
+ pr_warn("Request setup of bad link type = %d\n",
+ param->linktype);
return -EINVAL;
}
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
req->client_layer = user_layer;
@@ -276,8 +275,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
ret =
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
if (ret < 0) {
- pr_err("CAIF: %s(): Could not transmit linksetup request\n",
- __func__);
+ pr_err("Could not transmit linksetup request\n");
cfpkt_destroy(pkt);
return -ENODEV;
}
@@ -291,7 +289,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
@@ -300,8 +298,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
ret =
cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
if (ret < 0) {
- pr_err("CAIF: %s(): Could not transmit link-down request\n",
- __func__);
+ pr_err("Could not transmit link-down request\n");
cfpkt_destroy(pkt);
}
return ret;
@@ -313,7 +310,7 @@ void cfctrl_sleep_req(struct cflayer *layer)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
@@ -330,7 +327,7 @@ void cfctrl_wake_req(struct cflayer *layer)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
@@ -347,7 +344,7 @@ void cfctrl_getstartreason_req(struct cflayer *layer)
struct cfctrl *cfctrl = container_obj(layer);
struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return;
}
cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
@@ -364,12 +361,11 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
struct cfctrl_request_info *p, *tmp;
struct cfctrl *ctrl = container_obj(layr);
spin_lock(&ctrl->info_list_lock);
- pr_warning("CAIF: %s(): enter\n", __func__);
+ pr_warn("enter\n");
list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
if (p->client_layer == adap_layer) {
- pr_warning("CAIF: %s(): cancel req :%d\n", __func__,
- p->sequence_no);
+ pr_warn("cancel req :%d\n", p->sequence_no);
list_del(&p->list);
kfree(p);
}
@@ -520,9 +516,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cfpkt_extr_head(pkt, &param, len);
break;
default:
- pr_warning("CAIF: %s(): Request setup "
- "- invalid link type (%d)",
- __func__, serv);
+ pr_warn("Request setup - invalid link type (%d)\n",
+ serv);
goto error;
}
@@ -532,9 +527,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
cfpkt_erroneous(pkt)) {
- pr_err("CAIF: %s(): Invalid O/E bit or parse "
- "error on CAIF control channel",
- __func__);
+ pr_err("Invalid O/E bit or parse error on CAIF control channel\n");
cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
0,
req ? req->client_layer
@@ -556,8 +549,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
break;
case CFCTRL_CMD_LINK_ERR:
- pr_err("CAIF: %s(): Frame Error Indication received\n",
- __func__);
+ pr_err("Frame Error Indication received\n");
cfctrl->res.linkerror_ind();
break;
case CFCTRL_CMD_ENUM:
@@ -576,7 +568,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
cfctrl->res.radioset_rsp();
break;
default:
- pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
+ pr_err("Unrecognized Control Frame\n");
goto error;
break;
}
@@ -595,8 +587,7 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
case CAIF_CTRLCMD_FLOW_OFF_IND:
spin_lock(&this->info_list_lock);
if (!list_empty(&this->list)) {
- pr_debug("CAIF: %s(): Received flow off in "
- "control layer", __func__);
+ pr_debug("Received flow off in control layer\n");
}
spin_unlock(&this->info_list_lock);
break;
@@ -620,7 +611,7 @@ static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
if (!ctrl->loop_linkused[linkid])
goto found;
spin_unlock(&ctrl->loop_linkid_lock);
- pr_err("CAIF: %s(): Out of link-ids\n", __func__);
+ pr_err("Out of link-ids\n");
return -EINVAL;
found:
if (!ctrl->loop_linkused[linkid])
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 676648cac8dd..496fda9ac66f 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/slab.h>
#include <net/caif/caif_layer.h>
@@ -17,7 +19,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!dbg) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index ed9d53aff280..d3ed264ad6c4 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!dgm) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -49,14 +51,14 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
caif_assert(layr->ctrlcmd != NULL);
if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
if ((cmd & DGM_CMD_BIT) == 0) {
if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -75,8 +77,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
return 0;
default:
cfpkt_destroy(pkt);
- pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
- __func__, cmd, cmd);
+ pr_info("Unknown datagram control %d (0x%x)\n", cmd, cmd);
return -EPROTO;
}
}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index e86a4ca3b217..a445043931ae 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -6,6 +6,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -32,7 +34,7 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
{
struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cffrml, layer) == 0);
@@ -83,7 +85,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_setlen(pkt, len) < 0) {
++cffrml_rcv_error;
- pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
+ pr_err("Framing length error (%d)\n", len);
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -99,14 +101,14 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_add_trail(pkt, &tmp, 2);
++cffrml_rcv_error;
++cffrml_rcv_checsum_error;
- pr_info("CAIF: %s(): Frame checksum error "
- "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
+ pr_info("Frame checksum error (0x%x != 0x%x)\n",
+ hdrchks, pktchks);
return -EILSEQ;
}
}
if (cfpkt_erroneous(pkt)) {
++cffrml_rcv_error;
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -132,7 +134,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_add_head(pkt, &tmp, 2);
cfpkt_info(pkt)->hdr_len += 2;
if (cfpkt_erroneous(pkt)) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
return -EPROTO;
}
ret = layr->dn->transmit(layr->dn, pkt);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 80c8d332b258..46f34b2e0478 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -3,6 +3,9 @@
* Author: Sjur Brendeland/sjur.brandeland@stericsson.com
* License terms: GNU General Public License (GPL) version 2
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -190,7 +193,7 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
u8 id;
struct cflayer *up;
if (cfpkt_extr_head(pkt, &id, 1) < 0) {
- pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
+ pr_err("erroneous Caif Packet\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -199,8 +202,8 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
up = get_up(muxl, id);
spin_unlock(&muxl->receive_lock);
if (up == NULL) {
- pr_info("CAIF: %s():Received data on unknown link ID = %d "
- "(0x%x) up == NULL", __func__, id, id);
+ pr_info("Received data on unknown link ID = %d (0x%x) up == NULL",
+ id, id);
cfpkt_destroy(pkt);
/*
* Don't return ERROR, since modem misbehaves and sends out
@@ -223,9 +226,8 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
struct caif_payload_info *info = cfpkt_info(pkt);
dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
if (dn == NULL) {
- pr_warning("CAIF: %s(): Send data on unknown phy "
- "ID = %d (0x%x)\n",
- __func__, info->dev_info->id, info->dev_info->id);
+ pr_warn("Send data on unknown phy ID = %d (0x%x)\n",
+ info->dev_info->id, info->dev_info->id);
return -ENOTCONN;
}
info->hdr_len += 1;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 01f238ff2346..d7e865e2ff65 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -4,19 +4,22 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/string.h>
#include <linux/skbuff.h>
#include <linux/hardirq.h>
#include <net/caif/cfpkt.h>
-#define PKT_PREFIX 16
+#define PKT_PREFIX 48
#define PKT_POSTFIX 2
#define PKT_LEN_WHEN_EXTENDING 128
-#define PKT_ERROR(pkt, errmsg) do { \
- cfpkt_priv(pkt)->erronous = true; \
- skb_reset_tail_pointer(&pkt->skb); \
- pr_warning("CAIF: " errmsg);\
- } while (0)
+#define PKT_ERROR(pkt, errmsg) \
+do { \
+ cfpkt_priv(pkt)->erronous = true; \
+ skb_reset_tail_pointer(&pkt->skb); \
+ pr_warn(errmsg); \
+} while (0)
struct cfpktq {
struct sk_buff_head head;
@@ -130,13 +133,13 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
return -EPROTO;
if (unlikely(len > skb->len)) {
- PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
+ PKT_ERROR(pkt, "read beyond end of packet\n");
return -EPROTO;
}
if (unlikely(len > skb_headlen(skb))) {
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
}
@@ -156,11 +159,11 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
return -EPROTO;
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
- PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
+ PKT_ERROR(pkt, "read beyond end of packet\n");
return -EPROTO;
}
from = skb_tail_pointer(skb) - len;
@@ -202,7 +205,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
/* Make sure data is writable */
if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
- PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
+ PKT_ERROR(pkt, "cow failed\n");
return -EPROTO;
}
/*
@@ -211,8 +214,7 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
* lengths of the top SKB.
*/
if (lastskb != skb) {
- pr_warning("CAIF: %s(): Packet is non-linear\n",
- __func__);
+ pr_warn("Packet is non-linear\n");
skb->len += len;
skb->data_len += len;
}
@@ -242,14 +244,14 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
if (unlikely(is_erronous(pkt)))
return -EPROTO;
if (unlikely(skb_headroom(skb) < len)) {
- PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
+ PKT_ERROR(pkt, "no headroom\n");
return -EPROTO;
}
/* Make sure data is writable */
ret = skb_cow_data(skb, 0, &lastskb);
if (unlikely(ret < 0)) {
- PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
+ PKT_ERROR(pkt, "cow failed\n");
return ret;
}
@@ -283,7 +285,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
if (unlikely(is_erronous(pkt)))
return -EPROTO;
if (unlikely(skb_linearize(&pkt->skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
@@ -309,7 +311,7 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
/* Need to expand SKB */
if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
- PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
+ PKT_ERROR(pkt, "skb_pad_trail failed\n");
return cfpkt_getlen(pkt);
}
@@ -380,8 +382,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
return NULL;
if (skb->data + pos > skb_tail_pointer(skb)) {
- PKT_ERROR(pkt,
- "cfpkt_split: trying to split beyond end of packet");
+ PKT_ERROR(pkt, "trying to split beyond end of packet\n");
return NULL;
}
@@ -455,17 +456,17 @@ int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
return -EPROTO;
/* Make sure SKB is writable */
if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
- PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
+ PKT_ERROR(pkt, "skb_cow_data failed\n");
return -EPROTO;
}
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
if (unlikely(skb_tailroom(skb) < buflen)) {
- PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
+ PKT_ERROR(pkt, "buffer too short - failed\n");
return -EPROTO;
}
@@ -483,14 +484,13 @@ int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
return -EPROTO;
if (unlikely(buflen > skb->len)) {
- PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
- "- failed\n");
+ PKT_ERROR(pkt, "buflen too large - failed\n");
return -EPROTO;
}
if (unlikely(buflen > skb_headlen(skb))) {
if (unlikely(skb_linearize(skb) != 0)) {
- PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
+ PKT_ERROR(pkt, "linearize failed\n");
return -EPROTO;
}
}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index eb1602022ac0..bde8481e8d25 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -4,10 +4,12 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/unaligned/le_byteshift.h>
+#include <asm/unaligned.h>
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
@@ -48,7 +50,7 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
@@ -178,9 +180,7 @@ out:
cfpkt_destroy(rfml->incomplete_frm);
rfml->incomplete_frm = NULL;
- pr_info("CAIF: %s(): "
- "Connection error %d triggered on RFM link\n",
- __func__, err);
+ pr_info("Connection error %d triggered on RFM link\n", err);
/* Trigger connection error upon failure.*/
layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
@@ -280,9 +280,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
out:
if (err != 0) {
- pr_info("CAIF: %s(): "
- "Connection error %d triggered on RFM link\n",
- __func__, err);
+ pr_info("Connection error %d triggered on RFM link\n", err);
/* Trigger connection error upon failure.*/
layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index a11fbd68a13d..9297f7dea9d8 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
@@ -34,7 +36,7 @@ struct cflayer *cfserl_create(int type, int instance, bool use_stx)
{
struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
if (!this) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfserl, layer) == 0);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index f40939a91211..ab5e542526bf 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -79,8 +81,7 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
layr->up->ctrlcmd(layr->up, ctrl, phyid);
break;
default:
- pr_warning("CAIF: %s(): "
- "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
+ pr_warn("Unexpected ctrl in cfsrvl (%d)\n", ctrl);
/* We have both modem and phy flow on, send flow on */
layr->up->ctrlcmd(layr->up, ctrl, phyid);
service->phy_flow_on = true;
@@ -107,14 +108,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
u8 flow_on = SRVL_FLOW_ON;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n",
- __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n",
- __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -131,14 +130,12 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
u8 flow_off = SRVL_FLOW_OFF;
pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
if (!pkt) {
- pr_warning("CAIF: %s(): Out of memory\n",
- __func__);
+ pr_warn("Out of memory\n");
return -ENOMEM;
}
if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n",
- __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 02795aff57a4..efad410e4c82 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -26,7 +28,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!util) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
caif_assert(layr->up->receive != NULL);
caif_assert(layr->up->ctrlcmd != NULL);
if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -64,16 +66,14 @@ static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_destroy(pkt);
return 0;
case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
- pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
- __func__);
+ pr_err("REMOTE SHUTDOWN REQUEST RECEIVED\n");
layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
service->open = false;
cfpkt_destroy(pkt);
return 0;
default:
cfpkt_destroy(pkt);
- pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
- __func__, cmd, cmd);
+ pr_warn("Unknown service control %d (0x%x)\n", cmd, cmd);
return -EPROTO;
}
}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index 77cc09faac9a..3b425b189a99 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/stddef.h>
#include <linux/slab.h>
#include <net/caif/caif_layer.h>
@@ -25,7 +27,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!vei) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -47,7 +49,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -67,8 +69,7 @@ static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
cfpkt_destroy(pkt);
return 0;
default: /* SET RS232 PIN */
- pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
- __func__, cmd, cmd);
+ pr_warn("Unknown VEI control packet %d (0x%x)!\n", cmd, cmd);
cfpkt_destroy(pkt);
return -EPROTO;
}
@@ -86,7 +87,7 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
caif_assert(layr->dn->transmit != NULL);
if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
return -EPROTO;
}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index ada6ee2d48f5..bf6fef2a0eff 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -4,6 +4,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
@@ -21,7 +23,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
{
struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
if (!vid) {
- pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ pr_warn("Out of memory\n");
return NULL;
}
caif_assert(offsetof(struct cfsrvl, layer) == 0);
@@ -38,7 +40,7 @@ static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
{
u32 videoheader;
if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
- pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ pr_err("Packet is erroneous!\n");
cfpkt_destroy(pkt);
return -EPROTO;
}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 4293e190ec53..84a422c98941 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -5,6 +5,8 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
+
#include <linux/version.h>
#include <linux/fs.h>
#include <linux/init.h>
@@ -28,9 +30,6 @@
#define CONNECT_TIMEOUT (5 * HZ)
#define CAIF_NET_DEFAULT_QUEUE_LEN 500
-#undef pr_debug
-#define pr_debug pr_warning
-
/*This list is protected by the rtnl lock. */
static LIST_HEAD(chnl_net_list);
@@ -142,8 +141,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
int phyid)
{
struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
- pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n",
- __func__,
+ pr_debug("NET flowctrl func called flow: %s\n",
flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
@@ -196,12 +194,12 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
priv = netdev_priv(dev);
if (skb->len > priv->netdev->mtu) {
- pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
+ pr_warn("Size of skb exceeded MTU\n");
return -ENOSPC;
}
if (!priv->flowenabled) {
- pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
+ pr_debug("dropping packets flow off\n");
return NETDEV_TX_BUSY;
}
@@ -237,7 +235,7 @@ static int chnl_net_open(struct net_device *dev)
ASSERT_RTNL();
priv = netdev_priv(dev);
if (!priv) {
- pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
+ pr_debug("chnl_net_open: no priv\n");
return -ENODEV;
}
@@ -246,18 +244,17 @@ static int chnl_net_open(struct net_device *dev)
result = caif_connect_client(&priv->conn_req, &priv->chnl,
&llifindex, &headroom, &tailroom);
if (result != 0) {
- pr_debug("CAIF: %s(): err: "
- "Unable to register and open device,"
- " Err:%d\n",
- __func__,
- result);
+ pr_debug("err: "
+ "Unable to register and open device,"
+ " Err:%d\n",
+ result);
goto error;
}
lldev = dev_get_by_index(dev_net(dev), llifindex);
if (lldev == NULL) {
- pr_debug("CAIF: %s(): no interface?\n", __func__);
+ pr_debug("no interface?\n");
result = -ENODEV;
goto error;
}
@@ -279,9 +276,7 @@ static int chnl_net_open(struct net_device *dev)
dev_put(lldev);
if (mtu < 100) {
- pr_warning("CAIF: %s(): "
- "CAIF Interface MTU too small (%d)\n",
- __func__, mtu);
+ pr_warn("CAIF Interface MTU too small (%d)\n", mtu);
result = -ENODEV;
goto error;
}
@@ -296,33 +291,32 @@ static int chnl_net_open(struct net_device *dev)
rtnl_lock();
if (result == -ERESTARTSYS) {
- pr_debug("CAIF: %s(): wait_event_interruptible"
- " woken by a signal\n", __func__);
+ pr_debug("wait_event_interruptible woken by a signal\n");
result = -ERESTARTSYS;
goto error;
}
if (result == 0) {
- pr_debug("CAIF: %s(): connect timeout\n", __func__);
+ pr_debug("connect timeout\n");
caif_disconnect_client(&priv->chnl);
priv->state = CAIF_DISCONNECTED;
- pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ pr_debug("state disconnected\n");
result = -ETIMEDOUT;
goto error;
}
if (priv->state != CAIF_CONNECTED) {
- pr_debug("CAIF: %s(): connect failed\n", __func__);
+ pr_debug("connect failed\n");
result = -ECONNREFUSED;
goto error;
}
- pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
+ pr_debug("CAIF Netdevice connected\n");
return 0;
error:
caif_disconnect_client(&priv->chnl);
priv->state = CAIF_DISCONNECTED;
- pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ pr_debug("state disconnected\n");
return result;
}
@@ -413,7 +407,7 @@ static void caif_netlink_parms(struct nlattr *data[],
struct caif_connect_request *conn_req)
{
if (!data) {
- pr_warning("CAIF: %s: no params data found\n", __func__);
+ pr_warn("no params data found\n");
return;
}
if (data[IFLA_CAIF_IPV4_CONNID])
@@ -442,8 +436,7 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
ret = register_netdevice(dev);
if (ret)
- pr_warning("CAIF: %s(): device rtml registration failed\n",
- __func__);
+ pr_warn("device rtml registration failed\n");
return ret;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9c65e9deb9c3..08ffe9e4be20 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -60,6 +60,13 @@
#include <net/sock.h>
#include <net/net_namespace.h>
+/*
+ * To send multiple CAN frame content within TX_SETUP or to filter
+ * CAN messages with multiplex index within RX_SETUP, the number of
+ * different filters is limited to 256 due to the one byte index value.
+ */
+#define MAX_NFRAMES 256
+
/* use of last_frames[index].can_dlc */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -89,16 +96,16 @@ struct bcm_op {
struct list_head list;
int ifindex;
canid_t can_id;
- int flags;
+ u32 flags;
unsigned long frames_abs, frames_filtered;
struct timeval ival1, ival2;
struct hrtimer timer, thrtimer;
struct tasklet_struct tsklet, thrtsklet;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex;
- int count;
- int nframes;
- int currframe;
+ u32 count;
+ u32 nframes;
+ u32 currframe;
struct can_frame *frames;
struct can_frame *last_frames;
struct can_frame sframe;
@@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
seq_printf(m, "rx_op: %03X %-5s ",
op->can_id, bcm_proc_getifname(ifname, op->ifindex));
- seq_printf(m, "[%d]%c ", op->nframes,
+ seq_printf(m, "[%u]%c ", op->nframes,
(op->flags & RX_CHECK_DLC)?'d':' ');
if (op->kt_ival1.tv64)
seq_printf(m, "timeo=%lld ",
@@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v)
list_for_each_entry(op, &bo->tx_ops, list) {
- seq_printf(m, "tx_op: %03X %s [%d] ",
+ seq_printf(m, "tx_op: %03X %s [%u] ",
op->can_id,
bcm_proc_getifname(ifname, op->ifindex),
op->nframes);
@@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
struct can_frame *firstframe;
struct sockaddr_can *addr;
struct sock *sk = op->sk;
- int datalen = head->nframes * CFSIZ;
+ unsigned int datalen = head->nframes * CFSIZ;
int err;
skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
@@ -468,7 +475,7 @@ rx_changed_settime:
* bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
* received data stored in op->last_frames[]
*/
-static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
+static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index,
const struct can_frame *rxdata)
{
/*
@@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
/*
* bcm_rx_do_flush - helper for bcm_rx_thr_flush
*/
-static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update,
+ unsigned int index)
{
if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
if (update)
@@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update)
int updated = 0;
if (op->nframes > 1) {
- int i;
+ unsigned int i;
/* for MUX filter we start at index 1 */
for (i = 1; i < op->nframes; i++)
@@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
{
struct bcm_op *op = (struct bcm_op *)data;
const struct can_frame *rxframe = (struct can_frame *)skb->data;
- int i;
+ unsigned int i;
/* disable timeout */
hrtimer_cancel(&op->timer);
@@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
{
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
- int i, err;
+ unsigned int i;
+ int err;
/* we need a real device to send frames */
if (!ifindex)
return -ENODEV;
- /* we need at least one can_frame */
- if (msg_head->nframes < 1)
+ /* check nframes boundaries - we need at least one can_frame */
+ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
return -EINVAL;
/* check the given can_id */
@@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
msg_head->nframes = 0;
}
+ /* the first element contains the mux-mask => MAX_NFRAMES + 1 */
+ if (msg_head->nframes > MAX_NFRAMES + 1)
+ return -EINVAL;
+
if ((msg_head->flags & RX_RTR_FRAME) &&
((msg_head->nframes != 1) ||
(!(msg_head->can_id & CAN_RTR_FLAG))))
diff --git a/net/can/raw.c b/net/can/raw.c
index a10e3338f084..7d77e67e57af 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -647,12 +647,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
if (err < 0)
goto free_skb;
- err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (err < 0)
goto free_skb;
/* to be able to check the received tx sock reference in raw_rcv() */
- skb_tx(skb)->prevent_sk_orphan = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
skb->dev = dev;
skb->sk = sk;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 251997a95483..4df1b7a6c1bf 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -746,13 +746,12 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
diff --git a/net/core/dev.c b/net/core/dev.c
index 1ae654391442..7d149550e8d6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -129,6 +129,7 @@
#include <linux/random.h>
#include <trace/events/napi.h>
#include <linux/pci.h>
+#include <linux/inetdevice.h>
#include "net-sysfs.h"
@@ -371,6 +372,14 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
* --ANK (980803)
*/
+static inline struct list_head *ptype_head(const struct packet_type *pt)
+{
+ if (pt->type == htons(ETH_P_ALL))
+ return &ptype_all;
+ else
+ return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+}
+
/**
* dev_add_pack - add packet handler
* @pt: packet type declaration
@@ -386,16 +395,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
void dev_add_pack(struct packet_type *pt)
{
- int hash;
+ struct list_head *head = ptype_head(pt);
- spin_lock_bh(&ptype_lock);
- if (pt->type == htons(ETH_P_ALL))
- list_add_rcu(&pt->list, &ptype_all);
- else {
- hash = ntohs(pt->type) & PTYPE_HASH_MASK;
- list_add_rcu(&pt->list, &ptype_base[hash]);
- }
- spin_unlock_bh(&ptype_lock);
+ spin_lock(&ptype_lock);
+ list_add_rcu(&pt->list, head);
+ spin_unlock(&ptype_lock);
}
EXPORT_SYMBOL(dev_add_pack);
@@ -414,15 +418,10 @@ EXPORT_SYMBOL(dev_add_pack);
*/
void __dev_remove_pack(struct packet_type *pt)
{
- struct list_head *head;
+ struct list_head *head = ptype_head(pt);
struct packet_type *pt1;
- spin_lock_bh(&ptype_lock);
-
- if (pt->type == htons(ETH_P_ALL))
- head = &ptype_all;
- else
- head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+ spin_lock(&ptype_lock);
list_for_each_entry(pt1, head, list) {
if (pt == pt1) {
@@ -433,7 +432,7 @@ void __dev_remove_pack(struct packet_type *pt)
printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
out:
- spin_unlock_bh(&ptype_lock);
+ spin_unlock(&ptype_lock);
}
EXPORT_SYMBOL(__dev_remove_pack);
@@ -1484,8 +1483,9 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb_orphan(skb);
nf_reset(skb);
- if (!(dev->flags & IFF_UP) ||
- (skb->len > (dev->mtu + dev->hard_header_len))) {
+ if (unlikely(!(dev->flags & IFF_UP) ||
+ (skb->len > (dev->mtu + dev->hard_header_len)))) {
+ atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -1568,6 +1568,41 @@ void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
}
EXPORT_SYMBOL(netif_set_real_num_tx_queues);
+#ifdef CONFIG_RPS
+/**
+ * netif_set_real_num_rx_queues - set actual number of RX queues used
+ * @dev: Network device
+ * @rxq: Actual number of RX queues
+ *
+ * This must be called either with the rtnl_lock held or before
+ * registration of the net device. Returns 0 on success, or a
+ * negative error code. If called before registration, it also
+ * sets the maximum number of queues, and always succeeds.
+ */
+int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
+{
+ int rc;
+
+ if (dev->reg_state == NETREG_REGISTERED) {
+ ASSERT_RTNL();
+
+ if (rxq > dev->num_rx_queues)
+ return -EINVAL;
+
+ rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
+ rxq);
+ if (rc)
+ return rc;
+ } else {
+ dev->num_rx_queues = rxq;
+ }
+
+ dev->real_num_rx_queues = rxq;
+ return 0;
+}
+EXPORT_SYMBOL(netif_set_real_num_rx_queues);
+#endif
+
static inline void __netif_reschedule(struct Qdisc *q)
{
struct softnet_data *sd;
@@ -1902,14 +1937,14 @@ static int dev_gso_segment(struct sk_buff *skb)
/*
* Try to orphan skb early, right before transmission by the device.
- * We cannot orphan skb if tx timestamp is requested, since
- * drivers need to call skb_tstamp_tx() to send the timestamp.
+ * We cannot orphan skb if tx timestamp is requested or the sk-reference
+ * is needed on driver level for other reasons, e.g. see net/can/raw.c
*/
static inline void skb_orphan_try(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
- if (sk && !skb_tx(skb)->flags) {
+ if (sk && !skb_shinfo(skb)->tx_flags) {
/* skb_tx_hash() wont be able to get sk.
* We copy sk_hash into skb->rxhash
*/
@@ -1930,7 +1965,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
struct net_device *dev)
{
return skb_is_nonlinear(skb) &&
- ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+ ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
(skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
illegal_highdma(dev, skb))));
}
@@ -2058,16 +2093,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
struct sk_buff *skb)
{
int queue_index;
- struct sock *sk = skb->sk;
+ const struct net_device_ops *ops = dev->netdev_ops;
- queue_index = sk_tx_queue_get(sk);
- if (queue_index < 0) {
- const struct net_device_ops *ops = dev->netdev_ops;
+ if (ops->ndo_select_queue) {
+ queue_index = ops->ndo_select_queue(dev, skb);
+ queue_index = dev_cap_txqueue(dev, queue_index);
+ } else {
+ struct sock *sk = skb->sk;
+ queue_index = sk_tx_queue_get(sk);
+ if (queue_index < 0) {
- if (ops->ndo_select_queue) {
- queue_index = ops->ndo_select_queue(dev, skb);
- queue_index = dev_cap_txqueue(dev, queue_index);
- } else {
queue_index = 0;
if (dev->real_num_tx_queues > 1)
queue_index = skb_tx_hash(dev, skb);
@@ -2143,6 +2178,9 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
return rc;
}
+static DEFINE_PER_CPU(int, xmit_recursion);
+#define RECURSION_LIMIT 3
+
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
@@ -2208,10 +2246,15 @@ int dev_queue_xmit(struct sk_buff *skb)
if (txq->xmit_lock_owner != cpu) {
+ if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
+ goto recursion_alert;
+
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_tx_queue_stopped(txq)) {
+ __this_cpu_inc(xmit_recursion);
rc = dev_hard_start_xmit(skb, dev, txq);
+ __this_cpu_dec(xmit_recursion);
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
@@ -2223,7 +2266,9 @@ int dev_queue_xmit(struct sk_buff *skb)
"queue packet!\n", dev->name);
} else {
/* Recursion is detected! It is possible,
- * unfortunately */
+ * unfortunately
+ */
+recursion_alert:
if (net_ratelimit())
printk(KERN_CRIT "Dead loop on virtual device "
"%s, fix it urgently!\n", dev->name);
@@ -2259,69 +2304,44 @@ static inline void ____napi_schedule(struct softnet_data *sd,
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
}
-#ifdef CONFIG_RPS
-
-/* One global table that all flow-based protocols share. */
-struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
-EXPORT_SYMBOL(rps_sock_flow_table);
-
/*
- * get_rps_cpu is called from netif_receive_skb and returns the target
- * CPU from the RPS map of the receiving queue for a given skb.
- * rcu_read_lock must be held on entry.
+ * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
+ * and src/dst port numbers. Returns a non-zero hash number on success
+ * and 0 on failure.
*/
-static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
- struct rps_dev_flow **rflowp)
+__u32 __skb_get_rxhash(struct sk_buff *skb)
{
+ int nhoff, hash = 0, poff;
struct ipv6hdr *ip6;
struct iphdr *ip;
- struct netdev_rx_queue *rxqueue;
- struct rps_map *map;
- struct rps_dev_flow_table *flow_table;
- struct rps_sock_flow_table *sock_flow_table;
- int cpu = -1;
u8 ip_proto;
- u16 tcpu;
u32 addr1, addr2, ihl;
union {
u32 v32;
u16 v16[2];
} ports;
- if (skb_rx_queue_recorded(skb)) {
- u16 index = skb_get_rx_queue(skb);
- if (unlikely(index >= dev->num_rx_queues)) {
- WARN_ONCE(dev->num_rx_queues > 1, "%s received packet "
- "on queue %u, but number of RX queues is %u\n",
- dev->name, index, dev->num_rx_queues);
- goto done;
- }
- rxqueue = dev->_rx + index;
- } else
- rxqueue = dev->_rx;
-
- if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
- goto done;
-
- if (skb->rxhash)
- goto got_hash; /* Skip hash computation on packet header */
+ nhoff = skb_network_offset(skb);
switch (skb->protocol) {
case __constant_htons(ETH_P_IP):
- if (!pskb_may_pull(skb, sizeof(*ip)))
+ if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
goto done;
- ip = (struct iphdr *) skb->data;
- ip_proto = ip->protocol;
+ ip = (struct iphdr *) (skb->data + nhoff);
+ if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ ip_proto = 0;
+ else
+ ip_proto = ip->protocol;
addr1 = (__force u32) ip->saddr;
addr2 = (__force u32) ip->daddr;
ihl = ip->ihl;
break;
case __constant_htons(ETH_P_IPV6):
- if (!pskb_may_pull(skb, sizeof(*ip6)))
+ if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
goto done;
- ip6 = (struct ipv6hdr *) skb->data;
+ ip6 = (struct ipv6hdr *) (skb->data + nhoff);
ip_proto = ip6->nexthdr;
addr1 = (__force u32) ip6->saddr.s6_addr32[3];
addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2330,33 +2350,81 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
default:
goto done;
}
- switch (ip_proto) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_DCCP:
- case IPPROTO_ESP:
- case IPPROTO_AH:
- case IPPROTO_SCTP:
- case IPPROTO_UDPLITE:
- if (pskb_may_pull(skb, (ihl * 4) + 4)) {
- ports.v32 = * (__force u32 *) (skb->data + (ihl * 4));
+
+ ports.v32 = 0;
+ poff = proto_ports_offset(ip_proto);
+ if (poff >= 0) {
+ nhoff += ihl * 4 + poff;
+ if (pskb_may_pull(skb, nhoff + 4)) {
+ ports.v32 = * (__force u32 *) (skb->data + nhoff);
if (ports.v16[1] < ports.v16[0])
swap(ports.v16[0], ports.v16[1]);
- break;
}
- default:
- ports.v32 = 0;
- break;
}
/* get a consistent hash (same value on both flow directions) */
if (addr2 < addr1)
swap(addr1, addr2);
- skb->rxhash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
- if (!skb->rxhash)
- skb->rxhash = 1;
-got_hash:
+ hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
+ if (!hash)
+ hash = 1;
+
+done:
+ return hash;
+}
+EXPORT_SYMBOL(__skb_get_rxhash);
+
+#ifdef CONFIG_RPS
+
+/* One global table that all flow-based protocols share. */
+struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
+EXPORT_SYMBOL(rps_sock_flow_table);
+
+/*
+ * get_rps_cpu is called from netif_receive_skb and returns the target
+ * CPU from the RPS map of the receiving queue for a given skb.
+ * rcu_read_lock must be held on entry.
+ */
+static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
+ struct rps_dev_flow **rflowp)
+{
+ struct netdev_rx_queue *rxqueue;
+ struct rps_map *map = NULL;
+ struct rps_dev_flow_table *flow_table;
+ struct rps_sock_flow_table *sock_flow_table;
+ int cpu = -1;
+ u16 tcpu;
+
+ if (skb_rx_queue_recorded(skb)) {
+ u16 index = skb_get_rx_queue(skb);
+ if (unlikely(index >= dev->real_num_rx_queues)) {
+ WARN_ONCE(dev->real_num_rx_queues > 1,
+ "%s received packet on queue %u, but number "
+ "of RX queues is %u\n",
+ dev->name, index, dev->real_num_rx_queues);
+ goto done;
+ }
+ rxqueue = dev->_rx + index;
+ } else
+ rxqueue = dev->_rx;
+
+ if (rxqueue->rps_map) {
+ map = rcu_dereference(rxqueue->rps_map);
+ if (map && map->len == 1) {
+ tcpu = map->cpus[0];
+ if (cpu_online(tcpu))
+ cpu = tcpu;
+ goto done;
+ }
+ } else if (!rxqueue->rps_flow_table) {
+ goto done;
+ }
+
+ skb_reset_network_header(skb);
+ if (!skb_get_rxhash(skb))
+ goto done;
+
flow_table = rcu_dereference(rxqueue->rps_flow_table);
sock_flow_table = rcu_dereference(rps_sock_flow_table);
if (flow_table && sock_flow_table) {
@@ -2396,7 +2464,6 @@ got_hash:
}
}
- map = rcu_dereference(rxqueue->rps_map);
if (map) {
tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
@@ -2482,6 +2549,7 @@ enqueue:
local_irq_restore(flags);
+ atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
return NET_RX_DROP;
}
@@ -2636,11 +2704,10 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
* the ingress scheduler, you just cant add policies on ingress.
*
*/
-static int ing_filter(struct sk_buff *skb)
+static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
{
struct net_device *dev = skb->dev;
u32 ttl = G_TC_RTTL(skb->tc_verd);
- struct netdev_queue *rxq;
int result = TC_ACT_OK;
struct Qdisc *q;
@@ -2654,8 +2721,6 @@ static int ing_filter(struct sk_buff *skb)
skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
- rxq = &dev->rx_queue;
-
q = rxq->qdisc;
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
@@ -2671,7 +2736,9 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
struct packet_type **pt_prev,
int *ret, struct net_device *orig_dev)
{
- if (skb->dev->rx_queue.qdisc == &noop_qdisc)
+ struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
+
+ if (!rxq || rxq->qdisc == &noop_qdisc)
goto out;
if (*pt_prev) {
@@ -2679,7 +2746,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
*pt_prev = NULL;
}
- switch (ing_filter(skb)) {
+ switch (ing_filter(skb, rxq)) {
case TC_ACT_SHOT:
case TC_ACT_STOLEN:
kfree_skb(skb);
@@ -2828,8 +2895,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
if (!netdev_tstamp_prequeue)
net_timestamp_check(skb);
- if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
- return NET_RX_SUCCESS;
+ if (vlan_tx_tag_present(skb))
+ vlan_hwaccel_do_receive(skb);
/* if we've gotten here through NAPI, check netpoll */
if (netpoll_receive_skb(skb))
@@ -2930,6 +2997,7 @@ ncls:
if (pt_prev) {
ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
} else {
+ atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
/* Jamal, now you will not able to escape explaining
* me how you were going to use this. :-)
@@ -3050,7 +3118,7 @@ out:
return netif_receive_skb(skb);
}
-static void napi_gro_flush(struct napi_struct *napi)
+inline void napi_gro_flush(struct napi_struct *napi)
{
struct sk_buff *skb, *next;
@@ -3063,6 +3131,7 @@ static void napi_gro_flush(struct napi_struct *napi)
napi->gro_count = 0;
napi->gro_list = NULL;
}
+EXPORT_SYMBOL(napi_gro_flush);
enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
@@ -3077,7 +3146,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
goto normal;
- if (skb_is_gso(skb) || skb_has_frags(skb))
+ if (skb_is_gso(skb) || skb_has_frag_list(skb))
goto normal;
rcu_read_lock();
@@ -3143,7 +3212,7 @@ pull:
put_page(skb_shinfo(skb)->frags[0].page);
memmove(skb_shinfo(skb)->frags,
skb_shinfo(skb)->frags + 1,
- --skb_shinfo(skb)->nr_frags);
+ --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
}
}
@@ -3156,16 +3225,18 @@ normal:
}
EXPORT_SYMBOL(dev_gro_receive);
-static gro_result_t
+static inline gro_result_t
__napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
{
struct sk_buff *p;
for (p = napi->gro_list; p; p = p->next) {
- NAPI_GRO_CB(p)->same_flow =
- (p->dev == skb->dev) &&
- !compare_ether_header(skb_mac_header(p),
+ unsigned long diffs;
+
+ diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+ diffs |= compare_ether_header(skb_mac_header(p),
skb_gro_mac_header(skb));
+ NAPI_GRO_CB(p)->same_flow = !diffs;
NAPI_GRO_CB(p)->flush = 0;
}
@@ -4845,7 +4916,7 @@ static void rollback_registered_many(struct list_head *head)
dev = list_first_entry(head, struct net_device, unreg_list);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
- synchronize_net();
+ rcu_barrier();
list_for_each_entry(dev, head, unreg_list)
dev_put(dev);
@@ -4871,7 +4942,6 @@ static void __netdev_init_queue_locks_one(struct net_device *dev,
static void netdev_init_queue_locks(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
- __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
}
unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -4941,6 +5011,34 @@ void netif_stacked_transfer_operstate(const struct net_device *rootdev,
}
EXPORT_SYMBOL(netif_stacked_transfer_operstate);
+static int netif_alloc_rx_queues(struct net_device *dev)
+{
+#ifdef CONFIG_RPS
+ unsigned int i, count = dev->num_rx_queues;
+
+ if (count) {
+ struct netdev_rx_queue *rx;
+
+ rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
+ if (!rx) {
+ pr_err("netdev: Unable to allocate %u rx queues.\n",
+ count);
+ return -ENOMEM;
+ }
+ dev->_rx = rx;
+ atomic_set(&rx->count, count);
+
+ /*
+ * Set a pointer to first element in the array which holds the
+ * reference count.
+ */
+ for (i = 0; i < count; i++)
+ rx[i].first = rx;
+ }
+#endif
+ return 0;
+}
+
/**
* register_netdevice - register a network device
* @dev: device to register
@@ -4978,24 +5076,10 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
-#ifdef CONFIG_RPS
- if (!dev->num_rx_queues) {
- /*
- * Allocate a single RX queue if driver never called
- * alloc_netdev_mq
- */
-
- dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
- if (!dev->_rx) {
- ret = -ENOMEM;
- goto out;
- }
+ ret = netif_alloc_rx_queues(dev);
+ if (ret)
+ goto out;
- dev->_rx->first = dev->_rx;
- atomic_set(&dev->_rx->count, 1);
- dev->num_rx_queues = 1;
- }
-#endif
/* Init, if this function is available */
if (dev->netdev_ops->ndo_init) {
ret = dev->netdev_ops->ndo_init(dev);
@@ -5035,6 +5119,12 @@ int register_netdevice(struct net_device *dev)
if (dev->features & NETIF_F_SG)
dev->features |= NETIF_F_GSO;
+ /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
+ * vlan_dev_init() will do the dev->features check, so these features
+ * are enabled only if supported by underlying device.
+ */
+ dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA);
+
ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
ret = notifier_to_errno(ret);
if (ret)
@@ -5264,7 +5354,7 @@ void netdev_run_todo(void)
/* paranoia */
BUG_ON(atomic_read(&dev->refcnt));
- WARN_ON(dev->ip_ptr);
+ WARN_ON(rcu_dereference_raw(dev->ip_ptr));
WARN_ON(dev->ip6_ptr);
WARN_ON(dev->dn_ptr);
@@ -5342,14 +5432,14 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
if (ops->ndo_get_stats64) {
memset(storage, 0, sizeof(*storage));
- return ops->ndo_get_stats64(dev, storage);
- }
- if (ops->ndo_get_stats) {
+ ops->ndo_get_stats64(dev, storage);
+ } else if (ops->ndo_get_stats) {
netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
- return storage;
+ } else {
+ netdev_stats_to_stats64(storage, &dev->stats);
+ dev_txq_stats_fold(dev, storage);
}
- netdev_stats_to_stats64(storage, &dev->stats);
- dev_txq_stats_fold(dev, storage);
+ storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
return storage;
}
EXPORT_SYMBOL(dev_get_stats);
@@ -5363,11 +5453,29 @@ static void netdev_init_one_queue(struct net_device *dev,
static void netdev_init_queues(struct net_device *dev)
{
- netdev_init_one_queue(dev, &dev->rx_queue, NULL);
netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
spin_lock_init(&dev->tx_global_lock);
}
+struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
+{
+ struct netdev_queue *queue = dev_ingress_queue(dev);
+
+#ifdef CONFIG_NET_CLS_ACT
+ if (queue)
+ return queue;
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue)
+ return NULL;
+ netdev_init_one_queue(dev, queue, NULL);
+ __netdev_init_queue_locks_one(dev, queue, NULL);
+ queue->qdisc = &noop_qdisc;
+ queue->qdisc_sleeping = &noop_qdisc;
+ rcu_assign_pointer(dev->ingress_queue, queue);
+#endif
+ return queue;
+}
+
/**
* alloc_netdev_mq - allocate network device
* @sizeof_priv: size of private data to allocate space for
@@ -5386,10 +5494,6 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
struct net_device *dev;
size_t alloc_size;
struct net_device *p;
-#ifdef CONFIG_RPS
- struct netdev_rx_queue *rx;
- int i;
-#endif
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -5415,29 +5519,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
goto free_p;
}
-#ifdef CONFIG_RPS
- rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
- if (!rx) {
- printk(KERN_ERR "alloc_netdev: Unable to allocate "
- "rx queues.\n");
- goto free_tx;
- }
-
- atomic_set(&rx->count, queue_count);
-
- /*
- * Set a pointer to first element in the array which holds the
- * reference count.
- */
- for (i = 0; i < queue_count; i++)
- rx[i].first = rx;
-#endif
dev = PTR_ALIGN(p, NETDEV_ALIGN);
dev->padded = (char *)dev - (char *)p;
if (dev_addr_init(dev))
- goto free_rx;
+ goto free_tx;
dev_mc_init(dev);
dev_uc_init(dev);
@@ -5449,8 +5536,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->real_num_tx_queues = queue_count;
#ifdef CONFIG_RPS
- dev->_rx = rx;
dev->num_rx_queues = queue_count;
+ dev->real_num_rx_queues = queue_count;
#endif
dev->gso_max_size = GSO_MAX_SIZE;
@@ -5467,11 +5554,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
strcpy(dev->name, name);
return dev;
-free_rx:
-#ifdef CONFIG_RPS
- kfree(rx);
free_tx:
-#endif
kfree(tx);
free_p:
kfree(p);
@@ -5495,6 +5578,8 @@ void free_netdev(struct net_device *dev)
kfree(dev->_tx);
+ kfree(rcu_dereference_raw(dev->ingress_queue));
+
/* Flush device addresses */
dev_addr_flush(dev);
@@ -5658,6 +5743,10 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* Notify protocols, that we are about to destroy
this device. They should clean all the things.
+
+ Note that dev->reg_state stays at NETREG_REGISTERED.
+ This is wanted because this way 8021q and macvlan know
+ the device is just moving and can keep their slaves up.
*/
call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 7a85367b3c2f..7d7e572cedc7 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -19,6 +19,7 @@
#include <linux/netdevice.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include <linux/slab.h>
/*
@@ -205,18 +206,24 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo info;
const struct ethtool_ops *ops = dev->ethtool_ops;
- if (!ops->get_drvinfo)
- return -EOPNOTSUPP;
-
memset(&info, 0, sizeof(info));
info.cmd = ETHTOOL_GDRVINFO;
- ops->get_drvinfo(dev, &info);
+ if (ops && ops->get_drvinfo) {
+ ops->get_drvinfo(dev, &info);
+ } else if (dev->dev.parent && dev->dev.parent->driver) {
+ strlcpy(info.bus_info, dev_name(dev->dev.parent),
+ sizeof(info.bus_info));
+ strlcpy(info.driver, dev->dev.parent->driver->name,
+ sizeof(info.driver));
+ } else {
+ return -EOPNOTSUPP;
+ }
/*
* this method of obtaining string set info is deprecated;
* Use ETHTOOL_GSSET_INFO instead.
*/
- if (ops->get_sset_count) {
+ if (ops && ops->get_sset_count) {
int rc;
rc = ops->get_sset_count(dev, ETH_SS_TEST);
@@ -229,9 +236,9 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (rc >= 0)
info.n_priv_flags = rc;
}
- if (ops->get_regs_len)
+ if (ops && ops->get_regs_len)
info.regdump_len = ops->get_regs_len(dev);
- if (ops->get_eeprom_len)
+ if (ops && ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
if (copy_to_user(useraddr, &info, sizeof(info)))
@@ -479,6 +486,38 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
list->count++;
}
+/*
+ * ethtool does not (or did not) set masks for flow parameters that are
+ * not specified, so if both value and mask are 0 then this must be
+ * treated as equivalent to a mask with all bits set. Implement that
+ * here rather than in drivers.
+ */
+static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
+
+ if (fs->flow_type != TCP_V4_FLOW &&
+ fs->flow_type != UDP_V4_FLOW &&
+ fs->flow_type != SCTP_V4_FLOW)
+ return;
+
+ if (!(entry->ip4src | mask->ip4src))
+ mask->ip4src = htonl(0xffffffff);
+ if (!(entry->ip4dst | mask->ip4dst))
+ mask->ip4dst = htonl(0xffffffff);
+ if (!(entry->psrc | mask->psrc))
+ mask->psrc = htons(0xffff);
+ if (!(entry->pdst | mask->pdst))
+ mask->pdst = htons(0xffff);
+ if (!(entry->tos | mask->tos))
+ mask->tos = 0xff;
+ if (!(fs->vlan_tag | fs->vlan_tag_mask))
+ fs->vlan_tag_mask = 0xffff;
+ if (!(fs->data | fs->data_mask))
+ fs->data_mask = 0xffffffffffffffffULL;
+}
+
static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
void __user *useraddr)
{
@@ -493,6 +532,8 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
return -EFAULT;
+ rx_ntuple_fix_masks(&cmd.fs);
+
/*
* Cache filter in dev struct for GET operation only if
* the underlying driver doesn't have its own GET operation, and
@@ -667,19 +708,19 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
break;
case IP_USER_FLOW:
sprintf(p, "\tSrc IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4src);
+ fsc->fs.h_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tSrc IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4src);
+ fsc->fs.m_u.usr_ip4_spec.ip4src);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP addr: 0x%x\n",
- fsc->fs.h_u.raw_ip4_spec.ip4dst);
+ fsc->fs.h_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
sprintf(p, "\tDest IP mask: 0x%x\n",
- fsc->fs.m_u.raw_ip4_spec.ip4dst);
+ fsc->fs.m_u.usr_ip4_spec.ip4dst);
p += ETH_GSTRING_LEN;
num_strings++;
break;
@@ -775,7 +816,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (regs.len > reglen)
regs.len = reglen;
- regbuf = kmalloc(reglen, GFP_USER);
+ regbuf = vmalloc(reglen);
if (!regbuf)
return -ENOMEM;
@@ -790,7 +831,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
ret = 0;
out:
- kfree(regbuf);
+ vfree(regbuf);
return ret;
}
@@ -1175,8 +1216,11 @@ static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
return -EFAULT;
if (edata.data) {
- if (!dev->ethtool_ops->get_rx_csum ||
- !dev->ethtool_ops->get_rx_csum(dev))
+ u32 rxcsum = dev->ethtool_ops->get_rx_csum ?
+ dev->ethtool_ops->get_rx_csum(dev) :
+ ethtool_op_get_rx_csum(dev);
+
+ if (!rxcsum)
return -EINVAL;
dev->features |= NETIF_F_GRO;
} else
@@ -1402,14 +1446,22 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
if (!dev || !netif_device_present(dev))
return -ENODEV;
- if (!dev->ethtool_ops)
- return -EOPNOTSUPP;
-
if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
+ if (!dev->ethtool_ops) {
+ /* ETHTOOL_GDRVINFO does not require any driver support.
+ * It is also unprivileged and does not change anything,
+ * so we can take a shortcut to it. */
+ if (ethcmd == ETHTOOL_GDRVINFO)
+ return ethtool_get_drvinfo(dev, useraddr);
+ else
+ return -EOPNOTSUPP;
+ }
+
/* Allow some commands to be done by anyone */
switch (ethcmd) {
+ case ETHTOOL_GSET:
case ETHTOOL_GDRVINFO:
case ETHTOOL_GMSGLVL:
case ETHTOOL_GCOALESCE:
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 42e84e08a1be..21698f8c49ee 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -144,7 +144,7 @@ fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
}
EXPORT_SYMBOL_GPL(fib_rules_register);
-void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
+static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
{
struct fib_rule *rule, *tmp;
@@ -153,7 +153,6 @@ void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
fib_rule_put(rule);
}
}
-EXPORT_SYMBOL_GPL(fib_rules_cleanup_ops);
static void fib_rules_put_rcu(struct rcu_head *head)
{
@@ -182,7 +181,8 @@ static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
{
int ret = 0;
- if (rule->iifindex && (rule->iifindex != fl->iif))
+ if (rule->iifindex && (rule->iifindex != fl->iif) &&
+ !(fl->flags & FLOWI_FLAG_MATCH_ANY_IIF))
goto out;
if (rule->oifindex && (rule->oifindex != fl->oif))
@@ -225,9 +225,12 @@ jumped:
err = ops->action(rule, fl, flags, arg);
if (err != -EAGAIN) {
- fib_rule_get(rule);
- arg->rule = rule;
- goto out;
+ if ((arg->flags & FIB_LOOKUP_NOREF) ||
+ likely(atomic_inc_not_zero(&rule->refcnt))) {
+ arg->rule = rule;
+ goto out;
+ }
+ break;
}
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 52b051f82a01..7adf50352918 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -638,10 +638,9 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
return err;
}
- rcu_read_lock_bh();
- old_fp = rcu_dereference_bh(sk->sk_filter);
+ old_fp = rcu_dereference_protected(sk->sk_filter,
+ sock_owned_by_user(sk));
rcu_assign_pointer(sk->sk_filter, fp);
- rcu_read_unlock_bh();
if (old_fp)
sk_filter_delayed_uncharge(sk, old_fp);
@@ -654,14 +653,13 @@ int sk_detach_filter(struct sock *sk)
int ret = -ENOENT;
struct sk_filter *filter;
- rcu_read_lock_bh();
- filter = rcu_dereference_bh(sk->sk_filter);
+ filter = rcu_dereference_protected(sk->sk_filter,
+ sock_owned_by_user(sk));
if (filter) {
rcu_assign_pointer(sk->sk_filter, NULL);
sk_filter_delayed_uncharge(sk, filter);
ret = 0;
}
- rcu_read_unlock_bh();
return ret;
}
EXPORT_SYMBOL_GPL(sk_detach_filter);
diff --git a/net/core/flow.c b/net/core/flow.c
index f67dcbfe54ef..127c8a7ffd61 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -53,8 +53,7 @@ struct flow_flush_info {
struct flow_cache {
u32 hash_shift;
- unsigned long order;
- struct flow_cache_percpu *percpu;
+ struct flow_cache_percpu __percpu *percpu;
struct notifier_block hotcpu_notifier;
int low_watermark;
int high_watermark;
@@ -64,7 +63,7 @@ struct flow_cache {
atomic_t flow_cache_genid = ATOMIC_INIT(0);
EXPORT_SYMBOL(flow_cache_genid);
static struct flow_cache flow_cache_global;
-static struct kmem_cache *flow_cachep;
+static struct kmem_cache *flow_cachep __read_mostly;
static DEFINE_SPINLOCK(flow_cache_gc_lock);
static LIST_HEAD(flow_cache_gc_list);
@@ -177,15 +176,11 @@ static u32 flow_hash_code(struct flow_cache *fc,
{
u32 *k = (u32 *) key;
- return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
- & (flow_cache_hash_size(fc) - 1));
+ return jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+ & (flow_cache_hash_size(fc) - 1);
}
-#if (BITS_PER_LONG == 64)
-typedef u64 flow_compare_t;
-#else
-typedef u32 flow_compare_t;
-#endif
+typedef unsigned long flow_compare_t;
/* I hear what you're saying, use memcmp. But memcmp cannot make
* important assumptions that we can here, such as alignment and
@@ -357,62 +352,73 @@ void flow_cache_flush(void)
put_online_cpus();
}
-static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
- struct flow_cache_percpu *fcp)
+static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
{
- fcp->hash_table = (struct hlist_head *)
- __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
- if (!fcp->hash_table)
- panic("NET: failed to allocate flow cache order %lu\n", fc->order);
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
+ size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc);
- fcp->hash_rnd_recalc = 1;
- fcp->hash_count = 0;
- tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
+ if (!fcp->hash_table) {
+ fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
+ if (!fcp->hash_table) {
+ pr_err("NET: failed to allocate flow cache sz %zu\n", sz);
+ return -ENOMEM;
+ }
+ fcp->hash_rnd_recalc = 1;
+ fcp->hash_count = 0;
+ tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
+ }
+ return 0;
}
-static int flow_cache_cpu(struct notifier_block *nfb,
+static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
- int cpu = (unsigned long) hcpu;
+ int res, cpu = (unsigned long) hcpu;
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ res = flow_cache_cpu_prepare(fc, cpu);
+ if (res)
+ return notifier_from_errno(res);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
__flow_cache_shrink(fc, fcp, 0);
+ break;
+ }
return NOTIFY_OK;
}
-static int flow_cache_init(struct flow_cache *fc)
+static int __init flow_cache_init(struct flow_cache *fc)
{
- unsigned long order;
int i;
fc->hash_shift = 10;
fc->low_watermark = 2 * flow_cache_hash_size(fc);
fc->high_watermark = 4 * flow_cache_hash_size(fc);
- for (order = 0;
- (PAGE_SIZE << order) <
- (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
- order++)
- /* NOTHING */;
- fc->order = order;
fc->percpu = alloc_percpu(struct flow_cache_percpu);
+ if (!fc->percpu)
+ return -ENOMEM;
- setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
- (unsigned long) fc);
- fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
- add_timer(&fc->rnd_timer);
-
- for_each_possible_cpu(i)
- flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
-
+ for_each_online_cpu(i) {
+ if (flow_cache_cpu_prepare(fc, i))
+ return -ENOMEM;
+ }
fc->hotcpu_notifier = (struct notifier_block){
.notifier_call = flow_cache_cpu,
};
register_hotcpu_notifier(&fc->hotcpu_notifier);
+ setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
+ (unsigned long) fc);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
+
return 0;
}
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 9fbe7f7429b0..7c2373321b74 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -232,7 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
est->last_packets = bstats->packets;
est->avpps = rate_est->pps<<10;
- spin_lock(&est_tree_lock);
+ spin_lock_bh(&est_tree_lock);
if (!elist[idx].timer.function) {
INIT_LIST_HEAD(&elist[idx].list);
setup_timer(&elist[idx].timer, est_timer, idx);
@@ -243,7 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
list_add_rcu(&est->list, &elist[idx].list);
gen_add_node(est);
- spin_unlock(&est_tree_lock);
+ spin_unlock_bh(&est_tree_lock);
return 0;
}
@@ -270,18 +270,18 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
{
struct gen_estimator *e;
- spin_lock(&est_tree_lock);
+ spin_lock_bh(&est_tree_lock);
while ((e = gen_find_node(bstats, rate_est))) {
rb_erase(&e->node, &est_root);
- write_lock_bh(&est_lock);
+ write_lock(&est_lock);
e->bstats = NULL;
- write_unlock_bh(&est_lock);
+ write_unlock(&est_lock);
list_del_rcu(&e->list);
call_rcu(&e->e_rcu, __gen_kill_estimator);
}
- spin_unlock(&est_tree_lock);
+ spin_unlock_bh(&est_tree_lock);
}
EXPORT_SYMBOL(gen_kill_estimator);
@@ -320,9 +320,9 @@ bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
ASSERT_RTNL();
- spin_lock(&est_tree_lock);
+ spin_lock_bh(&est_tree_lock);
res = gen_find_node(bstats, rate_est) != NULL;
- spin_unlock(&est_tree_lock);
+ spin_unlock_bh(&est_tree_lock);
return res;
}
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1cd98df412df..72aceb1fe4fa 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -35,13 +35,16 @@
* in any case.
*/
-int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
+long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode)
{
- int size, err, ct;
+ int size, ct;
+ long err;
if (m->msg_namelen) {
if (mode == VERIFY_READ) {
- err = move_addr_to_kernel(m->msg_name, m->msg_namelen,
+ void __user *namep;
+ namep = (void __user __force *) m->msg_name;
+ err = move_addr_to_kernel(namep, m->msg_namelen,
address);
if (err < 0)
return err;
@@ -52,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address,
}
size = m->msg_iovlen * sizeof(struct iovec);
- if (copy_from_user(iov, m->msg_iov, size))
+ if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
return -EFAULT;
m->msg_iov = iov;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index a4e0a7482c2b..3ffafaa0414c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -122,7 +122,7 @@ static void neigh_cleanup_and_release(struct neighbour *neigh)
unsigned long neigh_rand_reach_time(unsigned long base)
{
- return (base ? (net_random() % base) + (base >> 1) : 0);
+ return base ? (net_random() % base) + (base >> 1) : 0;
}
EXPORT_SYMBOL(neigh_rand_reach_time);
@@ -131,15 +131,20 @@ static int neigh_forced_gc(struct neigh_table *tbl)
{
int shrunk = 0;
int i;
+ struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
write_lock_bh(&tbl->lock);
- for (i = 0; i <= tbl->hash_mask; i++) {
- struct neighbour *n, **np;
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
+ for (i = 0; i <= nht->hash_mask; i++) {
+ struct neighbour *n;
+ struct neighbour __rcu **np;
- np = &tbl->hash_buckets[i];
- while ((n = *np) != NULL) {
+ np = &nht->hash_buckets[i];
+ while ((n = rcu_dereference_protected(*np,
+ lockdep_is_held(&tbl->lock))) != NULL) {
/* Neighbour record may be discarded if:
* - nobody refers to it.
* - it is not permanent
@@ -147,7 +152,9 @@ static int neigh_forced_gc(struct neigh_table *tbl)
write_lock(&n->lock);
if (atomic_read(&n->refcnt) == 1 &&
!(n->nud_state & NUD_PERMANENT)) {
- *np = n->next;
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(n->next,
+ lockdep_is_held(&tbl->lock)));
n->dead = 1;
shrunk = 1;
write_unlock(&n->lock);
@@ -199,16 +206,24 @@ static void pneigh_queue_purge(struct sk_buff_head *list)
static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
{
int i;
+ struct neigh_hash_table *nht;
- for (i = 0; i <= tbl->hash_mask; i++) {
- struct neighbour *n, **np = &tbl->hash_buckets[i];
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
- while ((n = *np) != NULL) {
+ for (i = 0; i <= nht->hash_mask; i++) {
+ struct neighbour *n;
+ struct neighbour __rcu **np = &nht->hash_buckets[i];
+
+ while ((n = rcu_dereference_protected(*np,
+ lockdep_is_held(&tbl->lock))) != NULL) {
if (dev && n->dev != dev) {
np = &n->next;
continue;
}
- *np = n->next;
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(n->next,
+ lockdep_is_held(&tbl->lock)));
write_lock(&n->lock);
neigh_del_timer(n);
n->dead = 1;
@@ -297,64 +312,86 @@ out_entries:
goto out;
}
-static struct neighbour **neigh_hash_alloc(unsigned int entries)
+static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
{
- unsigned long size = entries * sizeof(struct neighbour *);
- struct neighbour **ret;
+ size_t size = entries * sizeof(struct neighbour *);
+ struct neigh_hash_table *ret;
+ struct neighbour **buckets;
- if (size <= PAGE_SIZE) {
- ret = kzalloc(size, GFP_ATOMIC);
- } else {
- ret = (struct neighbour **)
- __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
+ ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+ if (size <= PAGE_SIZE)
+ buckets = kzalloc(size, GFP_ATOMIC);
+ else
+ buckets = (struct neighbour **)
+ __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
+ get_order(size));
+ if (!buckets) {
+ kfree(ret);
+ return NULL;
}
+ rcu_assign_pointer(ret->hash_buckets, buckets);
+ ret->hash_mask = entries - 1;
+ get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
return ret;
}
-static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
+static void neigh_hash_free_rcu(struct rcu_head *head)
{
- unsigned long size = entries * sizeof(struct neighbour *);
+ struct neigh_hash_table *nht = container_of(head,
+ struct neigh_hash_table,
+ rcu);
+ size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *);
+ struct neighbour **buckets = nht->hash_buckets;
if (size <= PAGE_SIZE)
- kfree(hash);
+ kfree(buckets);
else
- free_pages((unsigned long)hash, get_order(size));
+ free_pages((unsigned long)buckets, get_order(size));
+ kfree(nht);
}
-static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
+static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
+ unsigned long new_entries)
{
- struct neighbour **new_hash, **old_hash;
- unsigned int i, new_hash_mask, old_entries;
+ unsigned int i, hash;
+ struct neigh_hash_table *new_nht, *old_nht;
NEIGH_CACHE_STAT_INC(tbl, hash_grows);
BUG_ON(!is_power_of_2(new_entries));
- new_hash = neigh_hash_alloc(new_entries);
- if (!new_hash)
- return;
+ old_nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
+ new_nht = neigh_hash_alloc(new_entries);
+ if (!new_nht)
+ return old_nht;
- old_entries = tbl->hash_mask + 1;
- new_hash_mask = new_entries - 1;
- old_hash = tbl->hash_buckets;
-
- get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
- for (i = 0; i < old_entries; i++) {
+ for (i = 0; i <= old_nht->hash_mask; i++) {
struct neighbour *n, *next;
- for (n = old_hash[i]; n; n = next) {
- unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
-
- hash_val &= new_hash_mask;
- next = n->next;
-
- n->next = new_hash[hash_val];
- new_hash[hash_val] = n;
+ for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
+ lockdep_is_held(&tbl->lock));
+ n != NULL;
+ n = next) {
+ hash = tbl->hash(n->primary_key, n->dev,
+ new_nht->hash_rnd);
+
+ hash &= new_nht->hash_mask;
+ next = rcu_dereference_protected(n->next,
+ lockdep_is_held(&tbl->lock));
+
+ rcu_assign_pointer(n->next,
+ rcu_dereference_protected(
+ new_nht->hash_buckets[hash],
+ lockdep_is_held(&tbl->lock)));
+ rcu_assign_pointer(new_nht->hash_buckets[hash], n);
}
}
- tbl->hash_buckets = new_hash;
- tbl->hash_mask = new_hash_mask;
- neigh_hash_free(old_hash, old_entries);
+ rcu_assign_pointer(tbl->nht, new_nht);
+ call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
+ return new_nht;
}
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
@@ -363,19 +400,26 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val;
+ struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, lookups);
- read_lock_bh(&tbl->lock);
- hash_val = tbl->hash(pkey, dev);
- for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+ hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
+
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
- neigh_hold(n);
+ if (!atomic_inc_not_zero(&n->refcnt))
+ n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
break;
}
}
- read_unlock_bh(&tbl->lock);
+
+ rcu_read_unlock_bh();
return n;
}
EXPORT_SYMBOL(neigh_lookup);
@@ -386,20 +430,27 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val;
+ struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, lookups);
- read_lock_bh(&tbl->lock);
- hash_val = tbl->hash(pkey, NULL);
- for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+ hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask;
+
+ for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
if (!memcmp(n->primary_key, pkey, key_len) &&
net_eq(dev_net(n->dev), net)) {
- neigh_hold(n);
+ if (!atomic_inc_not_zero(&n->refcnt))
+ n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
break;
}
}
- read_unlock_bh(&tbl->lock);
+
+ rcu_read_unlock_bh();
return n;
}
EXPORT_SYMBOL(neigh_lookup_nodev);
@@ -411,6 +462,7 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
int key_len = tbl->key_len;
int error;
struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+ struct neigh_hash_table *nht;
if (!n) {
rc = ERR_PTR(-ENOBUFS);
@@ -437,18 +489,24 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
write_lock_bh(&tbl->lock);
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
- if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
- neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
+ if (atomic_read(&tbl->entries) > (nht->hash_mask + 1))
+ nht = neigh_hash_grow(tbl, (nht->hash_mask + 1) << 1);
- hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
+ hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask;
if (n->parms->dead) {
rc = ERR_PTR(-EINVAL);
goto out_tbl_unlock;
}
- for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
+ for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
+ lockdep_is_held(&tbl->lock));
+ n1 != NULL;
+ n1 = rcu_dereference_protected(n1->next,
+ lockdep_is_held(&tbl->lock))) {
if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
neigh_hold(n1);
rc = n1;
@@ -456,10 +514,12 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
}
}
- n->next = tbl->hash_buckets[hash_val];
- tbl->hash_buckets[hash_val] = n;
n->dead = 0;
neigh_hold(n);
+ rcu_assign_pointer(n->next,
+ rcu_dereference_protected(nht->hash_buckets[hash_val],
+ lockdep_is_held(&tbl->lock)));
+ rcu_assign_pointer(nht->hash_buckets[hash_val], n);
write_unlock_bh(&tbl->lock);
NEIGH_PRINTK2("neigh %p is created.\n", n);
rc = n;
@@ -616,6 +676,12 @@ static inline void neigh_parms_put(struct neigh_parms *parms)
neigh_parms_destroy(parms);
}
+static void neigh_destroy_rcu(struct rcu_head *head)
+{
+ struct neighbour *neigh = container_of(head, struct neighbour, rcu);
+
+ kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
+}
/*
* neighbour must already be out of the table;
*
@@ -655,7 +721,7 @@ void neigh_destroy(struct neighbour *neigh)
NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
atomic_dec(&neigh->tbl->entries);
- kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
+ call_rcu(&neigh->rcu, neigh_destroy_rcu);
}
EXPORT_SYMBOL(neigh_destroy);
@@ -696,12 +762,16 @@ static void neigh_connect(struct neighbour *neigh)
static void neigh_periodic_work(struct work_struct *work)
{
struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
- struct neighbour *n, **np;
+ struct neighbour *n;
+ struct neighbour __rcu **np;
unsigned int i;
+ struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
write_lock_bh(&tbl->lock);
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
/*
* periodically recompute ReachableTime from random function
@@ -715,10 +785,11 @@ static void neigh_periodic_work(struct work_struct *work)
neigh_rand_reach_time(p->base_reachable_time);
}
- for (i = 0 ; i <= tbl->hash_mask; i++) {
- np = &tbl->hash_buckets[i];
+ for (i = 0 ; i <= nht->hash_mask; i++) {
+ np = &nht->hash_buckets[i];
- while ((n = *np) != NULL) {
+ while ((n = rcu_dereference_protected(*np,
+ lockdep_is_held(&tbl->lock))) != NULL) {
unsigned int state;
write_lock(&n->lock);
@@ -766,9 +837,9 @@ next_elt:
static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
- return (n->nud_state & NUD_PROBE ?
+ return (n->nud_state & NUD_PROBE) ?
p->ucast_probes :
- p->ucast_probes + p->app_probes + p->mcast_probes);
+ p->ucast_probes + p->app_probes + p->mcast_probes;
}
static void neigh_invalidate(struct neighbour *neigh)
@@ -1175,8 +1246,8 @@ static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
}
/* This function can be used in contexts, where only old dev_queue_xmit
- worked, f.e. if you want to override normal output path (eql, shaper),
- but resolution is not made yet.
+ * worked, f.e. if you want to override normal output path (eql, shaper),
+ * but resolution is not made yet.
*/
int neigh_compat_output(struct sk_buff *skb)
@@ -1210,7 +1281,9 @@ int neigh_resolve_output(struct sk_buff *skb)
if (!neigh_event_send(neigh, skb)) {
int err;
struct net_device *dev = neigh->dev;
- if (dev->header_ops->cache && !dst->hh) {
+ if (dev->header_ops->cache &&
+ !dst->hh &&
+ !(dst->flags & DST_NOCACHE)) {
write_lock_bh(&neigh->lock);
if (!dst->hh)
neigh_hh_init(neigh, dst, dst->ops->protocol);
@@ -1436,17 +1509,14 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
panic("cannot create neighbour proc dir entry");
#endif
- tbl->hash_mask = 1;
- tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
+ tbl->nht = neigh_hash_alloc(8);
phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
- if (!tbl->hash_buckets || !tbl->phash_buckets)
+ if (!tbl->nht || !tbl->phash_buckets)
panic("cannot allocate neighbour cache hashes");
- get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
-
rwlock_init(&tbl->lock);
INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
@@ -1502,8 +1572,8 @@ int neigh_table_clear(struct neigh_table *tbl)
}
write_unlock(&neigh_tbl_lock);
- neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
- tbl->hash_buckets = NULL;
+ call_rcu(&tbl->nht->rcu, neigh_hash_free_rcu);
+ tbl->nht = NULL;
kfree(tbl->phash_buckets);
tbl->phash_buckets = NULL;
@@ -1529,6 +1599,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
struct net_device *dev = NULL;
int err = -EINVAL;
+ ASSERT_RTNL();
if (nlmsg_len(nlh) < sizeof(*ndm))
goto out;
@@ -1538,7 +1609,7 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
- dev = dev_get_by_index(net, ndm->ndm_ifindex);
+ dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
err = -ENODEV;
goto out;
@@ -1554,34 +1625,31 @@ static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
read_unlock(&neigh_tbl_lock);
if (nla_len(dst_attr) < tbl->key_len)
- goto out_dev_put;
+ goto out;
if (ndm->ndm_flags & NTF_PROXY) {
err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
- goto out_dev_put;
+ goto out;
}
if (dev == NULL)
- goto out_dev_put;
+ goto out;
neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
if (neigh == NULL) {
err = -ENOENT;
- goto out_dev_put;
+ goto out;
}
err = neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE |
NEIGH_UPDATE_F_ADMIN);
neigh_release(neigh);
- goto out_dev_put;
+ goto out;
}
read_unlock(&neigh_tbl_lock);
err = -EAFNOSUPPORT;
-out_dev_put:
- if (dev)
- dev_put(dev);
out:
return err;
}
@@ -1595,6 +1663,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
struct net_device *dev = NULL;
int err;
+ ASSERT_RTNL();
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
if (err < 0)
goto out;
@@ -1605,14 +1674,14 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex) {
- dev = dev_get_by_index(net, ndm->ndm_ifindex);
+ dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
err = -ENODEV;
goto out;
}
if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
- goto out_dev_put;
+ goto out;
}
read_lock(&neigh_tbl_lock);
@@ -1626,7 +1695,7 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
read_unlock(&neigh_tbl_lock);
if (nla_len(tb[NDA_DST]) < tbl->key_len)
- goto out_dev_put;
+ goto out;
dst = nla_data(tb[NDA_DST]);
lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
@@ -1639,29 +1708,29 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
pn->flags = ndm->ndm_flags;
err = 0;
}
- goto out_dev_put;
+ goto out;
}
if (dev == NULL)
- goto out_dev_put;
+ goto out;
neigh = neigh_lookup(tbl, dst, dev);
if (neigh == NULL) {
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
err = -ENOENT;
- goto out_dev_put;
+ goto out;
}
neigh = __neigh_lookup_errno(tbl, dst, dev);
if (IS_ERR(neigh)) {
err = PTR_ERR(neigh);
- goto out_dev_put;
+ goto out;
}
} else {
if (nlh->nlmsg_flags & NLM_F_EXCL) {
err = -EEXIST;
neigh_release(neigh);
- goto out_dev_put;
+ goto out;
}
if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
@@ -1674,15 +1743,11 @@ static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
} else
err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
neigh_release(neigh);
- goto out_dev_put;
+ goto out;
}
read_unlock(&neigh_tbl_lock);
err = -EAFNOSUPPORT;
-
-out_dev_put:
- if (dev)
- dev_put(dev);
out:
return err;
}
@@ -1748,18 +1813,22 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
unsigned long now = jiffies;
unsigned int flush_delta = now - tbl->last_flush;
unsigned int rand_delta = now - tbl->last_rand;
-
+ struct neigh_hash_table *nht;
struct ndt_config ndc = {
.ndtc_key_len = tbl->key_len,
.ndtc_entry_size = tbl->entry_size,
.ndtc_entries = atomic_read(&tbl->entries),
.ndtc_last_flush = jiffies_to_msecs(flush_delta),
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
- .ndtc_hash_rnd = tbl->hash_rnd,
- .ndtc_hash_mask = tbl->hash_mask,
.ndtc_proxy_qlen = tbl->proxy_queue.qlen,
};
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+ ndc.ndtc_hash_rnd = nht->hash_rnd;
+ ndc.ndtc_hash_mask = nht->hash_mask;
+ rcu_read_unlock_bh();
+
NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
}
@@ -2087,18 +2156,23 @@ static void neigh_update_notify(struct neighbour *neigh)
static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct net * net = sock_net(skb->sk);
+ struct net *net = sock_net(skb->sk);
struct neighbour *n;
int rc, h, s_h = cb->args[1];
int idx, s_idx = idx = cb->args[2];
+ struct neigh_hash_table *nht;
- read_lock_bh(&tbl->lock);
- for (h = 0; h <= tbl->hash_mask; h++) {
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+
+ for (h = 0; h <= nht->hash_mask; h++) {
if (h < s_h)
continue;
if (h > s_h)
s_idx = 0;
- for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
+ for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
+ n != NULL;
+ n = rcu_dereference_bh(n->next)) {
if (!net_eq(dev_net(n->dev), net))
continue;
if (idx < s_idx)
@@ -2107,17 +2181,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI) <= 0) {
- read_unlock_bh(&tbl->lock);
rc = -1;
goto out;
}
- next:
+next:
idx++;
}
}
- read_unlock_bh(&tbl->lock);
rc = skb->len;
out:
+ rcu_read_unlock_bh();
cb->args[1] = h;
cb->args[2] = idx;
return rc;
@@ -2150,15 +2223,22 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
{
int chain;
+ struct neigh_hash_table *nht;
- read_lock_bh(&tbl->lock);
- for (chain = 0; chain <= tbl->hash_mask; chain++) {
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+
+ read_lock(&tbl->lock); /* avoid resizes */
+ for (chain = 0; chain <= nht->hash_mask; chain++) {
struct neighbour *n;
- for (n = tbl->hash_buckets[chain]; n; n = n->next)
+ for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
+ n != NULL;
+ n = rcu_dereference_bh(n->next))
cb(n, cookie);
}
- read_unlock_bh(&tbl->lock);
+ read_unlock(&tbl->lock);
+ rcu_read_unlock_bh();
}
EXPORT_SYMBOL(neigh_for_each);
@@ -2167,18 +2247,25 @@ void __neigh_for_each_release(struct neigh_table *tbl,
int (*cb)(struct neighbour *))
{
int chain;
+ struct neigh_hash_table *nht;
- for (chain = 0; chain <= tbl->hash_mask; chain++) {
- struct neighbour *n, **np;
+ nht = rcu_dereference_protected(tbl->nht,
+ lockdep_is_held(&tbl->lock));
+ for (chain = 0; chain <= nht->hash_mask; chain++) {
+ struct neighbour *n;
+ struct neighbour __rcu **np;
- np = &tbl->hash_buckets[chain];
- while ((n = *np) != NULL) {
+ np = &nht->hash_buckets[chain];
+ while ((n = rcu_dereference_protected(*np,
+ lockdep_is_held(&tbl->lock))) != NULL) {
int release;
write_lock(&n->lock);
release = cb(n);
if (release) {
- *np = n->next;
+ rcu_assign_pointer(*np,
+ rcu_dereference_protected(n->next,
+ lockdep_is_held(&tbl->lock)));
n->dead = 1;
} else
np = &n->next;
@@ -2196,13 +2283,13 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
{
struct neigh_seq_state *state = seq->private;
struct net *net = seq_file_net(seq);
- struct neigh_table *tbl = state->tbl;
+ struct neigh_hash_table *nht = state->nht;
struct neighbour *n = NULL;
int bucket = state->bucket;
state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
- for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
- n = tbl->hash_buckets[bucket];
+ for (bucket = 0; bucket <= nht->hash_mask; bucket++) {
+ n = rcu_dereference_bh(nht->hash_buckets[bucket]);
while (n) {
if (!net_eq(dev_net(n->dev), net))
@@ -2219,8 +2306,8 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
break;
if (n->nud_state & ~NUD_NOARP)
break;
- next:
- n = n->next;
+next:
+ n = rcu_dereference_bh(n->next);
}
if (n)
@@ -2237,14 +2324,14 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
{
struct neigh_seq_state *state = seq->private;
struct net *net = seq_file_net(seq);
- struct neigh_table *tbl = state->tbl;
+ struct neigh_hash_table *nht = state->nht;
if (state->neigh_sub_iter) {
void *v = state->neigh_sub_iter(state, n, pos);
if (v)
return n;
}
- n = n->next;
+ n = rcu_dereference_bh(n->next);
while (1) {
while (n) {
@@ -2261,17 +2348,17 @@ static struct neighbour *neigh_get_next(struct seq_file *seq,
if (n->nud_state & ~NUD_NOARP)
break;
- next:
- n = n->next;
+next:
+ n = rcu_dereference_bh(n->next);
}
if (n)
break;
- if (++state->bucket > tbl->hash_mask)
+ if (++state->bucket > nht->hash_mask)
break;
- n = tbl->hash_buckets[state->bucket];
+ n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
}
if (n && pos)
@@ -2369,7 +2456,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
}
void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
- __acquires(tbl->lock)
+ __acquires(rcu_bh)
{
struct neigh_seq_state *state = seq->private;
@@ -2377,7 +2464,8 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
state->bucket = 0;
state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
- read_lock_bh(&tbl->lock);
+ rcu_read_lock_bh();
+ state->nht = rcu_dereference_bh(tbl->nht);
return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
}
@@ -2411,12 +2499,9 @@ out:
EXPORT_SYMBOL(neigh_seq_next);
void neigh_seq_stop(struct seq_file *seq, void *v)
- __releases(tbl->lock)
+ __releases(rcu_bh)
{
- struct neigh_seq_state *state = seq->private;
- struct neigh_table *tbl = state->tbl;
-
- read_unlock_bh(&tbl->lock);
+ rcu_read_unlock_bh();
}
EXPORT_SYMBOL(neigh_seq_stop);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7d748542d97e..fa81fd0a488f 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -515,7 +515,7 @@ static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
return attribute->store(queue, attribute, buf, count);
}
-static struct sysfs_ops rx_queue_sysfs_ops = {
+static const struct sysfs_ops rx_queue_sysfs_ops = {
.show = rx_queue_attr_show,
.store = rx_queue_attr_store,
};
@@ -742,34 +742,38 @@ static int rx_queue_add_kobject(struct net_device *net, int index)
return error;
}
-static int rx_queue_register_kobjects(struct net_device *net)
+int
+net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
{
int i;
int error = 0;
- net->queues_kset = kset_create_and_add("queues",
- NULL, &net->dev.kobj);
- if (!net->queues_kset)
- return -ENOMEM;
- for (i = 0; i < net->num_rx_queues; i++) {
+ for (i = old_num; i < new_num; i++) {
error = rx_queue_add_kobject(net, i);
- if (error)
+ if (error) {
+ new_num = old_num;
break;
+ }
}
- if (error)
- while (--i >= 0)
- kobject_put(&net->_rx[i].kobj);
+ while (--i >= new_num)
+ kobject_put(&net->_rx[i].kobj);
return error;
}
-static void rx_queue_remove_kobjects(struct net_device *net)
+static int rx_queue_register_kobjects(struct net_device *net)
{
- int i;
+ net->queues_kset = kset_create_and_add("queues",
+ NULL, &net->dev.kobj);
+ if (!net->queues_kset)
+ return -ENOMEM;
+ return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues);
+}
- for (i = 0; i < net->num_rx_queues; i++)
- kobject_put(&net->_rx[i].kobj);
+static void rx_queue_remove_kobjects(struct net_device *net)
+{
+ net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0);
kset_unregister(net->queues_kset);
}
#endif /* CONFIG_RPS */
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 805555e8b187..778e1571548d 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,4 +4,8 @@
int netdev_kobject_init(void);
int netdev_register_kobject(struct net_device *);
void netdev_unregister_kobject(struct net_device *);
+#ifdef CONFIG_RPS
+int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
+#endif
+
#endif
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 10a1ea72010d..2c0df0f95b3d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -729,16 +729,14 @@ static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
*num = 0;
for (; i < maxlen; i++) {
+ int value;
char c;
*num <<= 4;
if (get_user(c, &user_buffer[i]))
return -EFAULT;
- if ((c >= '0') && (c <= '9'))
- *num |= c - '0';
- else if ((c >= 'a') && (c <= 'f'))
- *num |= c - 'a' + 10;
- else if ((c >= 'A') && (c <= 'F'))
- *num |= c - 'A' + 10;
+ value = hex_to_bin(c);
+ if (value >= 0)
+ *num |= value;
else
break;
}
@@ -3907,8 +3905,6 @@ static void __exit pg_cleanup(void)
{
struct pktgen_thread *t;
struct list_head *q, *n;
- wait_queue_head_t queue;
- init_waitqueue_head(&queue);
/* Stop all interfaces & threads */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index f78d821bd935..b2a718dfd720 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -612,36 +612,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
{
- struct rtnl_link_stats64 a;
-
- a.rx_packets = b->rx_packets;
- a.tx_packets = b->tx_packets;
- a.rx_bytes = b->rx_bytes;
- a.tx_bytes = b->tx_bytes;
- a.rx_errors = b->rx_errors;
- a.tx_errors = b->tx_errors;
- a.rx_dropped = b->rx_dropped;
- a.tx_dropped = b->tx_dropped;
-
- a.multicast = b->multicast;
- a.collisions = b->collisions;
-
- a.rx_length_errors = b->rx_length_errors;
- a.rx_over_errors = b->rx_over_errors;
- a.rx_crc_errors = b->rx_crc_errors;
- a.rx_frame_errors = b->rx_frame_errors;
- a.rx_fifo_errors = b->rx_fifo_errors;
- a.rx_missed_errors = b->rx_missed_errors;
-
- a.tx_aborted_errors = b->tx_aborted_errors;
- a.tx_carrier_errors = b->tx_carrier_errors;
- a.tx_fifo_errors = b->tx_fifo_errors;
- a.tx_heartbeat_errors = b->tx_heartbeat_errors;
- a.tx_window_errors = b->tx_window_errors;
-
- a.rx_compressed = b->rx_compressed;
- a.tx_compressed = b->tx_compressed;
- memcpy(v, &a, sizeof(a));
+ memcpy(v, b, sizeof(*b));
}
/* All VF info */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3a2513f0d0c3..752c1972b3a7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -202,8 +202,6 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->data = data;
skb_reset_tail_pointer(skb);
skb->end = skb->tail + size;
- kmemcheck_annotate_bitfield(skb, flags1);
- kmemcheck_annotate_bitfield(skb, flags2);
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb->mac_header = ~0U;
#endif
@@ -340,7 +338,7 @@ static void skb_release_data(struct sk_buff *skb)
put_page(skb_shinfo(skb)->frags[i].page);
}
- if (skb_has_frags(skb))
+ if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
kfree(skb->head);
@@ -685,16 +683,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
{
- int headerlen = skb->data - skb->head;
- /*
- * Allocate the copy buffer
- */
- struct sk_buff *n;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- n = alloc_skb(skb->end + skb->data_len, gfp_mask);
-#else
- n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
-#endif
+ int headerlen = skb_headroom(skb);
+ unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
+ struct sk_buff *n = alloc_skb(size, gfp_mask);
+
if (!n)
return NULL;
@@ -726,20 +718,14 @@ EXPORT_SYMBOL(skb_copy);
struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
{
- /*
- * Allocate the copy buffer
- */
- struct sk_buff *n;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- n = alloc_skb(skb->end, gfp_mask);
-#else
- n = alloc_skb(skb->end - skb->head, gfp_mask);
-#endif
+ unsigned int size = skb_end_pointer(skb) - skb->head;
+ struct sk_buff *n = alloc_skb(size, gfp_mask);
+
if (!n)
goto out;
/* Set the data pointer */
- skb_reserve(n, skb->data - skb->head);
+ skb_reserve(n, skb_headroom(skb));
/* Set the tail pointer and length */
skb_put(n, skb_headlen(skb));
/* Copy the bytes */
@@ -759,7 +745,7 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
skb_shinfo(n)->nr_frags = i;
}
- if (skb_has_frags(skb)) {
+ if (skb_has_frag_list(skb)) {
skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
skb_clone_fraglist(n);
}
@@ -791,12 +777,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
{
int i;
u8 *data;
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- int size = nhead + skb->end + ntail;
-#else
- int size = nhead + (skb->end - skb->head) + ntail;
-#endif
+ int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
long off;
+ bool fastpath;
BUG_ON(nhead < 0);
@@ -810,23 +793,36 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
goto nodata;
/* Copy only real data... and, alas, header. This should be
- * optimized for the cases when header is void. */
-#ifdef NET_SKBUFF_DATA_USES_OFFSET
- memcpy(data + nhead, skb->head, skb->tail);
-#else
- memcpy(data + nhead, skb->head, skb->tail - skb->head);
-#endif
- memcpy(data + size, skb_end_pointer(skb),
+ * optimized for the cases when header is void.
+ */
+ memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
+
+ memcpy((struct skb_shared_info *)(data + size),
+ skb_shinfo(skb),
offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- get_page(skb_shinfo(skb)->frags[i].page);
+ /* Check if we can avoid taking references on fragments if we own
+ * the last reference on skb->head. (see skb_release_data())
+ */
+ if (!skb->cloned)
+ fastpath = true;
+ else {
+ int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
- if (skb_has_frags(skb))
- skb_clone_fraglist(skb);
+ fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
+ }
- skb_release_data(skb);
+ if (fastpath) {
+ kfree(skb->head);
+ } else {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ get_page(skb_shinfo(skb)->frags[i].page);
+
+ if (skb_has_frag_list(skb))
+ skb_clone_fraglist(skb);
+ skb_release_data(skb);
+ }
off = (data + nhead) - skb->head;
skb->head = data;
@@ -1099,7 +1095,7 @@ drop_pages:
for (; i < nfrags; i++)
put_page(skb_shinfo(skb)->frags[i].page);
- if (skb_has_frags(skb))
+ if (skb_has_frag_list(skb))
skb_drop_fraglist(skb);
goto done;
}
@@ -1194,7 +1190,7 @@ unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
/* Optimization: no fragments, no reasons to preestimate
* size of pulled pages. Superb.
*/
- if (!skb_has_frags(skb))
+ if (!skb_has_frag_list(skb))
goto pull_pages;
/* Estimate size of pulled pages. */
@@ -2323,7 +2319,7 @@ next_skb:
st->frag_data = NULL;
}
- if (st->root_skb == st->cur_skb && skb_has_frags(st->root_skb)) {
+ if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
st->frag_idx = 0;
goto next_skb;
@@ -2573,6 +2569,10 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
__copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
+ /* nskb and skb might have different headroom */
+ if (nskb->ip_summed == CHECKSUM_PARTIAL)
+ nskb->csum_start += skb_headroom(nskb) - headroom;
+
skb_reset_mac_header(nskb);
skb_set_network_header(nskb, skb->mac_len);
nskb->transport_header = (nskb->network_header +
@@ -2703,7 +2703,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
return -E2BIG;
headroom = skb_headroom(p);
- nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
+ nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
if (unlikely(!nskb))
return -ENOMEM;
@@ -2889,7 +2889,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
return -ENOMEM;
/* Easy case. Most of packets will go this way. */
- if (!skb_has_frags(skb)) {
+ if (!skb_has_frag_list(skb)) {
/* A little of trouble, not enough of space for trailer.
* This should not happen, when stack is tuned to generate
* good frames. OK, on miss we reallocate and reserve even more
@@ -2924,7 +2924,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
if (skb1->next == NULL && tailbits) {
if (skb_shinfo(skb1)->nr_frags ||
- skb_has_frags(skb1) ||
+ skb_has_frag_list(skb1) ||
skb_tailroom(skb1) < tailbits)
ntail = tailbits + 128;
}
@@ -2933,7 +2933,7 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
skb_cloned(skb1) ||
ntail ||
skb_shinfo(skb1)->nr_frags ||
- skb_has_frags(skb1)) {
+ skb_has_frag_list(skb1)) {
struct sk_buff *skb2;
/* Fuck, we are miserable poor guys... */
@@ -3016,7 +3016,7 @@ void skb_tstamp_tx(struct sk_buff *orig_skb,
} else {
/*
* no hardware time stamps available,
- * so keep the skb_shared_tx and only
+ * so keep the shared tx_flags and only
* store software time stamp
*/
skb->tstamp = ktime_get_real();
diff --git a/net/core/sock.c b/net/core/sock.c
index b05b9b6ddb87..42365deeba27 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1351,9 +1351,9 @@ int sock_i_uid(struct sock *sk)
{
int uid;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return uid;
}
EXPORT_SYMBOL(sock_i_uid);
@@ -1362,9 +1362,9 @@ unsigned long sock_i_ino(struct sock *sk)
{
unsigned long ino;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
return ino;
}
EXPORT_SYMBOL(sock_i_ino);
@@ -1557,6 +1557,8 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
EXPORT_SYMBOL(sock_alloc_send_skb);
static void __lock_sock(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
+ __acquires(&sk->sk_lock.slock)
{
DEFINE_WAIT(wait);
@@ -1573,6 +1575,8 @@ static void __lock_sock(struct sock *sk)
}
static void __release_sock(struct sock *sk)
+ __releases(&sk->sk_lock.slock)
+ __acquires(&sk->sk_lock.slock)
{
struct sk_buff *skb = sk->sk_backlog.head;
diff --git a/net/core/stream.c b/net/core/stream.c
index d959e0f41528..f5df85dcd20b 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -141,10 +141,10 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
sk->sk_write_pending++;
- sk_wait_event(sk, &current_timeo, !sk->sk_err &&
- !(sk->sk_shutdown & SEND_SHUTDOWN) &&
- sk_stream_memory_free(sk) &&
- vm_wait);
+ sk_wait_event(sk, &current_timeo, sk->sk_err ||
+ (sk->sk_shutdown & SEND_SHUTDOWN) ||
+ (sk_stream_memory_free(sk) &&
+ !vm_wait));
sk->sk_write_pending--;
if (vm_wait) {
diff --git a/net/core/utils.c b/net/core/utils.c
index f41854470539..5fea0ab21902 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -75,7 +75,7 @@ __be32 in_aton(const char *str)
str++;
}
}
- return(htonl(l));
+ return htonl(l);
}
EXPORT_SYMBOL(in_aton);
@@ -92,18 +92,19 @@ EXPORT_SYMBOL(in_aton);
static inline int xdigit2bin(char c, int delim)
{
+ int val;
+
if (c == delim || c == '\0')
return IN6PTON_DELIM;
if (c == ':')
return IN6PTON_COLON_MASK;
if (c == '.')
return IN6PTON_DOT;
- if (c >= '0' && c <= '9')
- return (IN6PTON_XDIGIT | IN6PTON_DIGIT| (c - '0'));
- if (c >= 'a' && c <= 'f')
- return (IN6PTON_XDIGIT | (c - 'a' + 10));
- if (c >= 'A' && c <= 'F')
- return (IN6PTON_XDIGIT | (c - 'A' + 10));
+
+ val = hex_to_bin(c);
+ if (val >= 0)
+ return val | IN6PTON_XDIGIT | (val < 10 ? IN6PTON_DIGIT : 0);
+
if (delim == -1)
return IN6PTON_DELIM;
return IN6PTON_UNKNOWN;
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6df6f8ac9636..6d16a9070ff0 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -62,18 +62,14 @@ struct ccid_operations {
void (*ccid_hc_tx_exit)(struct sock *sk);
void (*ccid_hc_rx_packet_recv)(struct sock *sk,
struct sk_buff *skb);
- int (*ccid_hc_rx_parse_options)(struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value);
+ int (*ccid_hc_rx_parse_options)(struct sock *sk, u8 pkt,
+ u8 opt, u8 *val, u8 len);
int (*ccid_hc_rx_insert_options)(struct sock *sk,
struct sk_buff *skb);
void (*ccid_hc_tx_packet_recv)(struct sock *sk,
struct sk_buff *skb);
- int (*ccid_hc_tx_parse_options)(struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value);
+ int (*ccid_hc_tx_parse_options)(struct sock *sk, u8 pkt,
+ u8 opt, u8 *val, u8 len);
int (*ccid_hc_tx_send_packet)(struct sock *sk,
struct sk_buff *skb);
void (*ccid_hc_tx_packet_sent)(struct sock *sk,
@@ -168,27 +164,31 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
ccid->ccid_ops->ccid_hc_tx_packet_recv(sk, skb);
}
+/**
+ * ccid_hc_tx_parse_options - Parse CCID-specific options sent by the receiver
+ * @pkt: type of packet that @opt appears on (RFC 4340, 5.1)
+ * @opt: the CCID-specific option type (RFC 4340, 5.8 and 10.3)
+ * @val: value of @opt
+ * @len: length of @val in bytes
+ */
static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value)
+ u8 pkt, u8 opt, u8 *val, u8 len)
{
- int rc = 0;
- if (ccid->ccid_ops->ccid_hc_tx_parse_options != NULL)
- rc = ccid->ccid_ops->ccid_hc_tx_parse_options(sk, option, len, idx,
- value);
- return rc;
+ if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+ return 0;
+ return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
}
+/**
+ * ccid_hc_rx_parse_options - Parse CCID-specific options sent by the sender
+ * Arguments are analogous to ccid_hc_tx_parse_options()
+ */
static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
- unsigned char option,
- unsigned char len, u16 idx,
- unsigned char* value)
+ u8 pkt, u8 opt, u8 *val, u8 len)
{
- int rc = 0;
- if (ccid->ccid_ops->ccid_hc_rx_parse_options != NULL)
- rc = ccid->ccid_ops->ccid_hc_rx_parse_options(sk, option, len, idx, value);
- return rc;
+ if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+ return 0;
+ return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
}
static inline int ccid_hc_rx_insert_options(struct ccid *ccid, struct sock *sk,
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 8408398cd44e..0581143cb800 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -47,37 +47,6 @@ config IP_DCCP_CCID3_DEBUG
If in doubt, say N.
-config IP_DCCP_CCID3_RTO
- int "Use higher bound for nofeedback timer"
- default 100
- depends on IP_DCCP_CCID3 && EXPERIMENTAL
- ---help---
- Use higher lower bound for nofeedback timer expiration.
-
- The TFRC nofeedback timer normally expires after the maximum of 4
- RTTs and twice the current send interval (RFC 3448, 4.3). On LANs
- with a small RTT this can mean a high processing load and reduced
- performance, since then the nofeedback timer is triggered very
- frequently.
-
- This option enables to set a higher lower bound for the nofeedback
- value. Values in units of milliseconds can be set here.
-
- A value of 0 disables this feature by enforcing the value specified
- in RFC 3448. The following values have been suggested as bounds for
- experimental use:
- * 16-20ms to match the typical multimedia inter-frame interval
- * 100ms as a reasonable compromise [default]
- * 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
-
- The default of 100ms is a compromise between a large value for
- efficient DCCP implementations, and a small value to avoid disrupting
- the network in times of congestion.
-
- The purpose of the nofeedback timer is to slow DCCP down when there
- is serious network congestion: experimenting with larger values should
- therefore not be performed on WANs.
-
config IP_DCCP_TFRC_LIB
def_bool y if IP_DCCP_CCID3
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 9b3ae9922be1..dc18172b1e59 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -25,59 +25,14 @@
*/
#include <linux/slab.h>
#include "../feat.h"
-#include "../ccid.h"
-#include "../dccp.h"
#include "ccid2.h"
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
static int ccid2_debug;
#define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
-
-static void ccid2_hc_tx_check_sanity(const struct ccid2_hc_tx_sock *hc)
-{
- int len = 0;
- int pipe = 0;
- struct ccid2_seq *seqp = hc->tx_seqh;
-
- /* there is data in the chain */
- if (seqp != hc->tx_seqt) {
- seqp = seqp->ccid2s_prev;
- len++;
- if (!seqp->ccid2s_acked)
- pipe++;
-
- while (seqp != hc->tx_seqt) {
- struct ccid2_seq *prev = seqp->ccid2s_prev;
-
- len++;
- if (!prev->ccid2s_acked)
- pipe++;
-
- /* packets are sent sequentially */
- BUG_ON(dccp_delta_seqno(seqp->ccid2s_seq,
- prev->ccid2s_seq ) >= 0);
- BUG_ON(time_before(seqp->ccid2s_sent,
- prev->ccid2s_sent));
-
- seqp = prev;
- }
- }
-
- BUG_ON(pipe != hc->tx_pipe);
- ccid2_pr_debug("len of chain=%d\n", len);
-
- do {
- seqp = seqp->ccid2s_prev;
- len++;
- } while (seqp != hc->tx_seqh);
-
- ccid2_pr_debug("total len=%d\n", len);
- BUG_ON(len != hc->tx_seqbufc * CCID2_SEQBUF_LEN);
-}
#else
#define ccid2_pr_debug(format, a...)
-#define ccid2_hc_tx_check_sanity(hc)
#endif
static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
@@ -156,19 +111,10 @@ static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
dp->dccps_l_ack_ratio = val;
}
-static void ccid2_change_srtt(struct ccid2_hc_tx_sock *hc, long val)
-{
- ccid2_pr_debug("change SRTT to %ld\n", val);
- hc->tx_srtt = val;
-}
-
-static void ccid2_start_rto_timer(struct sock *sk);
-
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- long s;
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
@@ -178,23 +124,19 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
ccid2_pr_debug("RTO_EXPIRE\n");
- ccid2_hc_tx_check_sanity(hc);
-
/* back-off timer */
hc->tx_rto <<= 1;
+ if (hc->tx_rto > DCCP_RTO_MAX)
+ hc->tx_rto = DCCP_RTO_MAX;
- s = hc->tx_rto / HZ;
- if (s > 60)
- hc->tx_rto = 60 * HZ;
-
- ccid2_start_rto_timer(sk);
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
/* adjust pipe, cwnd etc */
hc->tx_ssthresh = hc->tx_cwnd / 2;
if (hc->tx_ssthresh < 2)
hc->tx_ssthresh = 2;
- hc->tx_cwnd = 1;
- hc->tx_pipe = 0;
+ hc->tx_cwnd = 1;
+ hc->tx_pipe = 0;
/* clear state about stuff we sent */
hc->tx_seqt = hc->tx_seqh;
@@ -204,22 +146,11 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
hc->tx_rpseq = 0;
hc->tx_rpdupack = -1;
ccid2_change_l_ack_ratio(sk, 1);
- ccid2_hc_tx_check_sanity(hc);
out:
bh_unlock_sock(sk);
sock_put(sk);
}
-static void ccid2_start_rto_timer(struct sock *sk)
-{
- struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- ccid2_pr_debug("setting RTO timeout=%ld\n", hc->tx_rto);
-
- BUG_ON(timer_pending(&hc->tx_rtotimer));
- sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
-}
-
static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
{
struct dccp_sock *dp = dccp_sk(sk);
@@ -230,7 +161,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
hc->tx_seqh->ccid2s_acked = 0;
- hc->tx_seqh->ccid2s_sent = jiffies;
+ hc->tx_seqh->ccid2s_sent = ccid2_time_stamp;
next = hc->tx_seqh->ccid2s_next;
/* check if we need to alloc more space */
@@ -296,23 +227,20 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
}
#endif
- /* setup RTO timer */
- if (!timer_pending(&hc->tx_rtotimer))
- ccid2_start_rto_timer(sk);
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
do {
struct ccid2_seq *seqp = hc->tx_seqt;
while (seqp != hc->tx_seqh) {
- ccid2_pr_debug("out seq=%llu acked=%d time=%lu\n",
+ ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
(unsigned long long)seqp->ccid2s_seq,
seqp->ccid2s_acked, seqp->ccid2s_sent);
seqp = seqp->ccid2s_next;
}
} while (0);
ccid2_pr_debug("=========\n");
- ccid2_hc_tx_check_sanity(hc);
#endif
}
@@ -378,17 +306,87 @@ out_invalid_option:
return -1;
}
-static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
+/**
+ * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
+ * This code is almost identical with TCP's tcp_rtt_estimator(), since
+ * - it has a higher sampling frequency (recommended by RFC 1323),
+ * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
+ * - it is simple (cf. more complex proposals such as Eifel timer or research
+ * which suggests that the gain should be set according to window size),
+ * - in tests it was found to work well with CCID2 [gerrit].
+ */
+static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+ long m = mrtt ? : 1;
- sk_stop_timer(sk, &hc->tx_rtotimer);
- ccid2_pr_debug("deleted RTO timer\n");
+ if (hc->tx_srtt == 0) {
+ /* First measurement m */
+ hc->tx_srtt = m << 3;
+ hc->tx_mdev = m << 1;
+
+ hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
+ hc->tx_rttvar = hc->tx_mdev_max;
+
+ hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
+ } else {
+ /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
+ m -= (hc->tx_srtt >> 3);
+ hc->tx_srtt += m;
+
+ /* Similarly, update scaled mdev with regard to |m| */
+ if (m < 0) {
+ m = -m;
+ m -= (hc->tx_mdev >> 2);
+ /*
+ * This neutralises RTO increase when RTT < SRTT - mdev
+ * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
+ * in Linux TCP", USENIX 2002, pp. 49-62).
+ */
+ if (m > 0)
+ m >>= 3;
+ } else {
+ m -= (hc->tx_mdev >> 2);
+ }
+ hc->tx_mdev += m;
+
+ if (hc->tx_mdev > hc->tx_mdev_max) {
+ hc->tx_mdev_max = hc->tx_mdev;
+ if (hc->tx_mdev_max > hc->tx_rttvar)
+ hc->tx_rttvar = hc->tx_mdev_max;
+ }
+
+ /*
+ * Decay RTTVAR at most once per flight, exploiting that
+ * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
+ * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
+ * GAR is a useful bound for FlightSize = pipe.
+ * AWL is probably too low here, as it over-estimates pipe.
+ */
+ if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
+ if (hc->tx_mdev_max < hc->tx_rttvar)
+ hc->tx_rttvar -= (hc->tx_rttvar -
+ hc->tx_mdev_max) >> 2;
+ hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
+ hc->tx_mdev_max = tcp_rto_min(sk);
+ }
+ }
+
+ /*
+ * Set RTO from SRTT and RTTVAR
+ * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
+ * This agrees with RFC 4341, 5:
+ * "Because DCCP does not retransmit data, DCCP does not require
+ * TCP's recommended minimum timeout of one second".
+ */
+ hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
+
+ if (hc->tx_rto > DCCP_RTO_MAX)
+ hc->tx_rto = DCCP_RTO_MAX;
}
-static inline void ccid2_new_ack(struct sock *sk,
- struct ccid2_seq *seqp,
- unsigned int *maxincr)
+static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
+ unsigned int *maxincr)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
@@ -402,93 +400,27 @@ static inline void ccid2_new_ack(struct sock *sk,
hc->tx_cwnd += 1;
hc->tx_packets_acked = 0;
}
-
- /* update RTO */
- if (hc->tx_srtt == -1 ||
- time_after(jiffies, hc->tx_lastrtt + hc->tx_srtt)) {
- unsigned long r = (long)jiffies - (long)seqp->ccid2s_sent;
- int s;
-
- /* first measurement */
- if (hc->tx_srtt == -1) {
- ccid2_pr_debug("R: %lu Time=%lu seq=%llu\n",
- r, jiffies,
- (unsigned long long)seqp->ccid2s_seq);
- ccid2_change_srtt(hc, r);
- hc->tx_rttvar = r >> 1;
- } else {
- /* RTTVAR */
- long tmp = hc->tx_srtt - r;
- long srtt;
-
- if (tmp < 0)
- tmp *= -1;
-
- tmp >>= 2;
- hc->tx_rttvar *= 3;
- hc->tx_rttvar >>= 2;
- hc->tx_rttvar += tmp;
-
- /* SRTT */
- srtt = hc->tx_srtt;
- srtt *= 7;
- srtt >>= 3;
- tmp = r >> 3;
- srtt += tmp;
- ccid2_change_srtt(hc, srtt);
- }
- s = hc->tx_rttvar << 2;
- /* clock granularity is 1 when based on jiffies */
- if (!s)
- s = 1;
- hc->tx_rto = hc->tx_srtt + s;
-
- /* must be at least a second */
- s = hc->tx_rto / HZ;
- /* DCCP doesn't require this [but I like it cuz my code sux] */
-#if 1
- if (s < 1)
- hc->tx_rto = HZ;
-#endif
- /* max 60 seconds */
- if (s > 60)
- hc->tx_rto = HZ * 60;
-
- hc->tx_lastrtt = jiffies;
-
- ccid2_pr_debug("srtt: %ld rttvar: %ld rto: %ld (HZ=%d) R=%lu\n",
- hc->tx_srtt, hc->tx_rttvar,
- hc->tx_rto, HZ, r);
- }
-
- /* we got a new ack, so re-start RTO timer */
- ccid2_hc_tx_kill_rto_timer(sk);
- ccid2_start_rto_timer(sk);
-}
-
-static void ccid2_hc_tx_dec_pipe(struct sock *sk)
-{
- struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
-
- if (hc->tx_pipe == 0)
- DCCP_BUG("pipe == 0");
- else
- hc->tx_pipe--;
-
- if (hc->tx_pipe == 0)
- ccid2_hc_tx_kill_rto_timer(sk);
+ /*
+ * FIXME: RTT is sampled several times per acknowledgment (for each
+ * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
+ * This causes the RTT to be over-estimated, since the older entries
+ * in the Ack Vector have earlier sending times.
+ * The cleanest solution is to not use the ccid2s_sent field at all
+ * and instead use DCCP timestamps: requires changes in other places.
+ */
+ ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
}
static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
{
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
- if (time_before(seqp->ccid2s_sent, hc->tx_last_cong)) {
+ if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
return;
}
- hc->tx_last_cong = jiffies;
+ hc->tx_last_cong = ccid2_time_stamp;
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
@@ -510,7 +442,6 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
int done = 0;
unsigned int maxincr = 0;
- ccid2_hc_tx_check_sanity(hc);
/* check reverse path congestion */
seqno = DCCP_SKB_CB(skb)->dccpd_seq;
@@ -620,7 +551,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
seqp->ccid2s_acked = 1;
ccid2_pr_debug("Got ack for %llu\n",
(unsigned long long)seqp->ccid2s_seq);
- ccid2_hc_tx_dec_pipe(sk);
+ hc->tx_pipe--;
}
if (seqp == hc->tx_seqt) {
done = 1;
@@ -677,7 +608,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
* one ack vector.
*/
ccid2_congestion_event(sk, seqp);
- ccid2_hc_tx_dec_pipe(sk);
+ hc->tx_pipe--;
}
if (seqp == hc->tx_seqt)
break;
@@ -695,7 +626,11 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
hc->tx_seqt = hc->tx_seqt->ccid2s_next;
}
- ccid2_hc_tx_check_sanity(hc);
+ /* restart RTO timer if not all outstanding data has been acked */
+ if (hc->tx_pipe == 0)
+ sk_stop_timer(sk, &hc->tx_rtotimer);
+ else
+ sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
}
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -707,12 +642,8 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
/* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
hc->tx_ssthresh = ~0U;
- /*
- * RFC 4341, 5: "The cwnd parameter is initialized to at most four
- * packets for new connections, following the rules from [RFC3390]".
- * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
- */
- hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
+ /* Use larger initial windows (RFC 4341, section 5). */
+ hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
/* Make sure that Ack Ratio is enabled and within bounds. */
max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
@@ -723,15 +654,11 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
if (ccid2_hc_tx_alloc_seq(hc))
return -ENOMEM;
- hc->tx_rto = 3 * HZ;
- ccid2_change_srtt(hc, -1);
- hc->tx_rttvar = -1;
+ hc->tx_rto = DCCP_TIMEOUT_INIT;
hc->tx_rpdupack = -1;
- hc->tx_last_cong = jiffies;
+ hc->tx_last_cong = ccid2_time_stamp;
setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
(unsigned long)sk);
-
- ccid2_hc_tx_check_sanity(hc);
return 0;
}
@@ -740,7 +667,7 @@ static void ccid2_hc_tx_exit(struct sock *sk)
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
int i;
- ccid2_hc_tx_kill_rto_timer(sk);
+ sk_stop_timer(sk, &hc->tx_rtotimer);
for (i = 0; i < hc->tx_seqbufc; i++)
kfree(hc->tx_seqbuf[i]);
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 1ec6a30103bb..9731c2dc1487 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -18,18 +18,23 @@
#ifndef _DCCP_CCID2_H_
#define _DCCP_CCID2_H_
-#include <linux/dccp.h>
#include <linux/timer.h>
#include <linux/types.h>
#include "../ccid.h"
+#include "../dccp.h"
+
+/*
+ * CCID-2 timestamping faces the same issues as TCP timestamping.
+ * Hence we reuse/share as much of the code as possible.
+ */
+#define ccid2_time_stamp tcp_time_stamp
+
/* NUMDUPACK parameter from RFC 4341, p. 6 */
#define NUMDUPACK 3
-struct sock;
-
struct ccid2_seq {
u64 ccid2s_seq;
- unsigned long ccid2s_sent;
+ u32 ccid2s_sent;
int ccid2s_acked;
struct ccid2_seq *ccid2s_prev;
struct ccid2_seq *ccid2s_next;
@@ -42,7 +47,12 @@ struct ccid2_seq {
* struct ccid2_hc_tx_sock - CCID2 TX half connection
* @tx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
* @tx_packets_acked: Ack counter for deriving cwnd growth (RFC 3465)
- * @tx_lastrtt: time RTT was last measured
+ * @tx_srtt: smoothed RTT estimate, scaled by 2^3
+ * @tx_mdev: smoothed RTT variation, scaled by 2^2
+ * @tx_mdev_max: maximum of @mdev during one flight
+ * @tx_rttvar: moving average/maximum of @mdev_max
+ * @tx_rto: RTO value deriving from SRTT and RTTVAR (RFC 2988)
+ * @tx_rtt_seq: to decay RTTVAR at most once per flight
* @tx_rpseq: last consecutive seqno
* @tx_rpdupack: dupacks since rpseq
*/
@@ -55,14 +65,19 @@ struct ccid2_hc_tx_sock {
int tx_seqbufc;
struct ccid2_seq *tx_seqh;
struct ccid2_seq *tx_seqt;
- long tx_rto;
- long tx_srtt;
- long tx_rttvar;
- unsigned long tx_lastrtt;
+
+ /* RTT measurement: variables/principles are the same as in TCP */
+ u32 tx_srtt,
+ tx_mdev,
+ tx_mdev_max,
+ tx_rttvar,
+ tx_rto;
+ u64 tx_rtt_seq:48;
struct timer_list tx_rtotimer;
+
u64 tx_rpseq;
int tx_rpdupack;
- unsigned long tx_last_cong;
+ u32 tx_last_cong;
u64 tx_high_ack;
};
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 95f752986497..c3f3a25bbd7a 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -54,7 +54,6 @@ static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
[TFRC_SSTATE_NO_SENT] = "NO_SENT",
[TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
[TFRC_SSTATE_FBACK] = "FBACK",
- [TFRC_SSTATE_TERM] = "TERM",
};
return ccid3_state_names[state];
@@ -91,19 +90,16 @@ static inline u64 rfc3390_initial_rate(struct sock *sk)
return scaled_div(w_init << 6, hc->tx_rtt);
}
-/*
- * Recalculate t_ipi and delta (should be called whenever X changes)
+/**
+ * ccid3_update_send_interval - Calculate new t_ipi = s / X_inst
+ * This respects the granularity of X_inst (64 * bytes/second).
*/
static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
{
- /* Calculate new t_ipi = s / X_inst (X_inst is in 64 * bytes/second) */
hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x);
- /* Calculate new delta by delta = min(t_ipi / 2, t_gran / 2) */
- hc->tx_delta = min_t(u32, hc->tx_t_ipi / 2, TFRC_OPSYS_HALF_TIME_GRAN);
-
- ccid3_pr_debug("t_ipi=%u, delta=%u, s=%u, X=%u\n", hc->tx_t_ipi,
- hc->tx_delta, hc->tx_s, (unsigned)(hc->tx_x >> 6));
+ ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
+ hc->tx_s, (unsigned)(hc->tx_x >> 6));
}
static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -211,16 +207,19 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
ccid3_tx_state_name(hc->tx_state));
+ /* Ignore and do not restart after leaving the established state */
+ if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN))
+ goto out;
+
+ /* Reset feedback state to "no feedback received" */
if (hc->tx_state == TFRC_SSTATE_FBACK)
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
- else if (hc->tx_state != TFRC_SSTATE_NO_FBACK)
- goto out;
/*
* Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4
+ * RTO is 0 if and only if no feedback has been received yet.
*/
- if (hc->tx_t_rto == 0 || /* no feedback received yet */
- hc->tx_p == 0) {
+ if (hc->tx_t_rto == 0 || hc->tx_p == 0) {
/* halve send rate directly */
hc->tx_x = max(hc->tx_x / 2,
@@ -256,7 +255,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
* Set new timeout for the nofeedback timer.
* See comments in packet_recv() regarding the value of t_RTO.
*/
- if (unlikely(hc->tx_t_rto == 0)) /* no feedback yet */
+ if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */
t_nfb = TFRC_INITIAL_TIMEOUT;
else
t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi);
@@ -290,8 +289,7 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
if (unlikely(skb->len == 0))
return -EBADMSG;
- switch (hc->tx_state) {
- case TFRC_SSTATE_NO_SENT:
+ if (hc->tx_state == TFRC_SSTATE_NO_SENT) {
sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies +
usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
hc->tx_last_win_count = 0;
@@ -326,27 +324,22 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
ccid3_update_send_interval(hc);
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
- break;
- case TFRC_SSTATE_NO_FBACK:
- case TFRC_SSTATE_FBACK:
+
+ } else {
delay = ktime_us_delta(hc->tx_t_nom, now);
ccid3_pr_debug("delay=%ld\n", (long)delay);
/*
- * Scheduling of packet transmissions [RFC 3448, 4.6]
+ * Scheduling of packet transmissions (RFC 5348, 8.3)
*
* if (t_now > t_nom - delta)
* // send the packet now
* else
* // send the packet in (t_nom - t_now) milliseconds.
*/
- if (delay - (s64)hc->tx_delta >= 1000)
- return (u32)delay / 1000L;
+ if (delay >= TFRC_T_DELTA)
+ return (u32)delay / USEC_PER_MSEC;
ccid3_hc_tx_update_win_count(hc, now);
- break;
- case TFRC_SSTATE_TERM:
- DCCP_BUG("%s(%p) - Illegal state TERM", dccp_role(sk), sk);
- return -EINVAL;
}
/* prepare to send now (add options etc.) */
@@ -372,48 +365,34 @@ static void ccid3_hc_tx_packet_sent(struct sock *sk, int more,
static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
- struct ccid3_options_received *opt_recv;
+ struct tfrc_tx_hist_entry *acked;
ktime_t now;
unsigned long t_nfb;
- u32 pinv, r_sample;
+ u32 r_sample;
/* we are only interested in ACKs */
if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK ||
DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK))
return;
- /* ... and only in the established state */
- if (hc->tx_state != TFRC_SSTATE_FBACK &&
- hc->tx_state != TFRC_SSTATE_NO_FBACK)
- return;
-
- opt_recv = &hc->tx_options_received;
- now = ktime_get_real();
-
- /* Estimate RTT from history if ACK number is valid */
- r_sample = tfrc_tx_hist_rtt(hc->tx_hist,
- DCCP_SKB_CB(skb)->dccpd_ack_seq, now);
- if (r_sample == 0) {
- DCCP_WARN("%s(%p): %s with bogus ACK-%llu\n", dccp_role(sk), sk,
- dccp_packet_name(DCCP_SKB_CB(skb)->dccpd_type),
- (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq);
- return;
- }
-
- /* Update receive rate in units of 64 * bytes/second */
- hc->tx_x_recv = opt_recv->ccid3or_receive_rate;
- hc->tx_x_recv <<= 6;
-
- /* Update loss event rate (which is scaled by 1e6) */
- pinv = opt_recv->ccid3or_loss_event_rate;
- if (pinv == ~0U || pinv == 0) /* see RFC 4342, 8.5 */
- hc->tx_p = 0;
- else /* can not exceed 100% */
- hc->tx_p = scaled_div(1, pinv);
/*
- * Validate new RTT sample and update moving average
+ * Locate the acknowledged packet in the TX history.
+ *
+ * Returning "entry not found" here can for instance happen when
+ * - the host has not sent out anything (e.g. a passive server),
+ * - the Ack is outdated (packet with higher Ack number was received),
+ * - it is a bogus Ack (for a packet not sent on this connection).
*/
- r_sample = dccp_sample_rtt(sk, r_sample);
+ acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb));
+ if (acked == NULL)
+ return;
+ /* For the sake of RTT sampling, ignore/remove all older entries */
+ tfrc_tx_hist_purge(&acked->next);
+
+ /* Update the moving average for the RTT estimate (RFC 3448, 4.3) */
+ now = ktime_get_real();
+ r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp));
hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9);
+
/*
* Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3
*/
@@ -461,13 +440,12 @@ done_computing_x:
sk->sk_write_space(sk);
/*
- * Update timeout interval for the nofeedback timer.
- * We use a configuration option to increase the lower bound.
- * This can help avoid triggering the nofeedback timer too
- * often ('spinning') on LANs with small RTTs.
+ * Update timeout interval for the nofeedback timer. In order to control
+ * rate halving on networks with very low RTTs (<= 1 ms), use per-route
+ * tunable RTAX_RTO_MIN value as the lower bound.
*/
- hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, (CONFIG_IP_DCCP_CCID3_RTO *
- (USEC_PER_SEC / 1000)));
+ hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt,
+ USEC_PER_SEC/HZ * tcp_rto_min(sk));
/*
* Schedule no feedback timer to expire in
* max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi)
@@ -482,66 +460,41 @@ done_computing_x:
jiffies + usecs_to_jiffies(t_nfb));
}
-static int ccid3_hc_tx_parse_options(struct sock *sk, unsigned char option,
- unsigned char len, u16 idx,
- unsigned char *value)
+static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type,
+ u8 option, u8 *optval, u8 optlen)
{
- int rc = 0;
- const struct dccp_sock *dp = dccp_sk(sk);
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
- struct ccid3_options_received *opt_recv;
__be32 opt_val;
- opt_recv = &hc->tx_options_received;
-
- if (opt_recv->ccid3or_seqno != dp->dccps_gsr) {
- opt_recv->ccid3or_seqno = dp->dccps_gsr;
- opt_recv->ccid3or_loss_event_rate = ~0;
- opt_recv->ccid3or_loss_intervals_idx = 0;
- opt_recv->ccid3or_loss_intervals_len = 0;
- opt_recv->ccid3or_receive_rate = 0;
- }
-
switch (option) {
+ case TFRC_OPT_RECEIVE_RATE:
case TFRC_OPT_LOSS_EVENT_RATE:
- if (unlikely(len != 4)) {
- DCCP_WARN("%s(%p), invalid len %d "
- "for TFRC_OPT_LOSS_EVENT_RATE\n",
- dccp_role(sk), sk, len);
- rc = -EINVAL;
- } else {
- opt_val = get_unaligned((__be32 *)value);
- opt_recv->ccid3or_loss_event_rate = ntohl(opt_val);
- ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
- dccp_role(sk), sk,
- opt_recv->ccid3or_loss_event_rate);
+ /* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */
+ if (packet_type == DCCP_PKT_DATA)
+ break;
+ if (unlikely(optlen != 4)) {
+ DCCP_WARN("%s(%p), invalid len %d for %u\n",
+ dccp_role(sk), sk, optlen, option);
+ return -EINVAL;
}
- break;
- case TFRC_OPT_LOSS_INTERVALS:
- opt_recv->ccid3or_loss_intervals_idx = idx;
- opt_recv->ccid3or_loss_intervals_len = len;
- ccid3_pr_debug("%s(%p), LOSS_INTERVALS=(%u, %u)\n",
- dccp_role(sk), sk,
- opt_recv->ccid3or_loss_intervals_idx,
- opt_recv->ccid3or_loss_intervals_len);
- break;
- case TFRC_OPT_RECEIVE_RATE:
- if (unlikely(len != 4)) {
- DCCP_WARN("%s(%p), invalid len %d "
- "for TFRC_OPT_RECEIVE_RATE\n",
- dccp_role(sk), sk, len);
- rc = -EINVAL;
- } else {
- opt_val = get_unaligned((__be32 *)value);
- opt_recv->ccid3or_receive_rate = ntohl(opt_val);
+ opt_val = ntohl(get_unaligned((__be32 *)optval));
+
+ if (option == TFRC_OPT_RECEIVE_RATE) {
+ /* Receive Rate is kept in units of 64 bytes/second */
+ hc->tx_x_recv = opt_val;
+ hc->tx_x_recv <<= 6;
+
ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n",
- dccp_role(sk), sk,
- opt_recv->ccid3or_receive_rate);
+ dccp_role(sk), sk, opt_val);
+ } else {
+ /* Update the fixpoint Loss Event Rate fraction */
+ hc->tx_p = tfrc_invert_loss_event_rate(opt_val);
+
+ ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n",
+ dccp_role(sk), sk, opt_val);
}
- break;
}
-
- return rc;
+ return 0;
}
static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -559,42 +512,36 @@ static void ccid3_hc_tx_exit(struct sock *sk)
{
struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
- ccid3_hc_tx_set_state(sk, TFRC_SSTATE_TERM);
sk_stop_timer(sk, &hc->tx_no_feedback_timer);
-
tfrc_tx_hist_purge(&hc->tx_hist);
}
static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info)
{
- struct ccid3_hc_tx_sock *hc;
-
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return;
-
- hc = ccid3_hc_tx_sk(sk);
- info->tcpi_rto = hc->tx_t_rto;
- info->tcpi_rtt = hc->tx_rtt;
+ info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto;
+ info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt;
}
static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
- const struct ccid3_hc_tx_sock *hc;
+ const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk);
+ struct tfrc_tx_info tfrc;
const void *val;
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return -EINVAL;
-
- hc = ccid3_hc_tx_sk(sk);
switch (optname) {
case DCCP_SOCKOPT_CCID_TX_INFO:
- if (len < sizeof(hc->tx_tfrc))
+ if (len < sizeof(tfrc))
return -EINVAL;
- len = sizeof(hc->tx_tfrc);
- val = &hc->tx_tfrc;
+ tfrc.tfrctx_x = hc->tx_x;
+ tfrc.tfrctx_x_recv = hc->tx_x_recv;
+ tfrc.tfrctx_x_calc = hc->tx_x_calc;
+ tfrc.tfrctx_rtt = hc->tx_rtt;
+ tfrc.tfrctx_p = hc->tx_p;
+ tfrc.tfrctx_rto = hc->tx_t_rto;
+ tfrc.tfrctx_ipi = hc->tx_t_ipi;
+ len = sizeof(tfrc);
+ val = &tfrc;
break;
default:
return -ENOPROTOOPT;
@@ -624,7 +571,6 @@ static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
static const char *const ccid3_rx_state_names[] = {
[TFRC_RSTATE_NO_DATA] = "NO_DATA",
[TFRC_RSTATE_DATA] = "DATA",
- [TFRC_RSTATE_TERM] = "TERM",
};
return ccid3_rx_state_names[state];
@@ -650,14 +596,9 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
- ktime_t now;
+ ktime_t now = ktime_get_real();
s64 delta = 0;
- if (unlikely(hc->rx_state == TFRC_RSTATE_TERM))
- return;
-
- now = ktime_get_real();
-
switch (fbtype) {
case CCID3_FBACK_INITIAL:
hc->rx_x_recv = 0;
@@ -701,14 +642,12 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
{
- const struct ccid3_hc_rx_sock *hc;
+ const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
__be32 x_recv, pinv;
if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN))
return 0;
- hc = ccid3_hc_rx_sk(sk);
-
if (dccp_packet_without_ack(skb))
return 0;
@@ -749,10 +688,11 @@ static u32 ccid3_first_li(struct sock *sk)
x_recv = scaled_div32(hc->rx_bytes_recv, delta);
if (x_recv == 0) { /* would also trigger divide-by-zero */
DCCP_WARN("X_recv==0\n");
- if ((x_recv = hc->rx_x_recv) == 0) {
+ if (hc->rx_x_recv == 0) {
DCCP_BUG("stored value of X_recv is zero");
return ~0U;
}
+ x_recv = hc->rx_x_recv;
}
fval = scaled_div(hc->rx_s, hc->rx_rtt);
@@ -862,46 +802,31 @@ static void ccid3_hc_rx_exit(struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
- ccid3_hc_rx_set_state(sk, TFRC_RSTATE_TERM);
-
tfrc_rx_hist_purge(&hc->rx_hist);
tfrc_lh_cleanup(&hc->rx_li_hist);
}
static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info)
{
- const struct ccid3_hc_rx_sock *hc;
-
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return;
-
- hc = ccid3_hc_rx_sk(sk);
- info->tcpi_ca_state = hc->rx_state;
+ info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state;
info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
- info->tcpi_rcv_rtt = hc->rx_rtt;
+ info->tcpi_rcv_rtt = ccid3_hc_rx_sk(sk)->rx_rtt;
}
static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len,
u32 __user *optval, int __user *optlen)
{
- const struct ccid3_hc_rx_sock *hc;
+ const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct tfrc_rx_info rx_info;
const void *val;
- /* Listen socks doesn't have a private CCID block */
- if (sk->sk_state == DCCP_LISTEN)
- return -EINVAL;
-
- hc = ccid3_hc_rx_sk(sk);
switch (optname) {
case DCCP_SOCKOPT_CCID_RX_INFO:
if (len < sizeof(rx_info))
return -EINVAL;
rx_info.tfrcrx_x_recv = hc->rx_x_recv;
rx_info.tfrcrx_rtt = hc->rx_rtt;
- rx_info.tfrcrx_p = hc->rx_pinv == 0 ? ~0U :
- scaled_div(1, hc->rx_pinv);
+ rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hc->rx_pinv);
len = sizeof(rx_info);
val = &rx_info;
break;
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 032635776653..1a9933c29672 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -42,35 +42,36 @@
#include "lib/tfrc.h"
#include "../ccid.h"
-/* Two seconds as per RFC 3448 4.2 */
+/* Two seconds as per RFC 5348, 4.2 */
#define TFRC_INITIAL_TIMEOUT (2 * USEC_PER_SEC)
-/* In usecs - half the scheduling granularity as per RFC3448 4.6 */
-#define TFRC_OPSYS_HALF_TIME_GRAN (USEC_PER_SEC / (2 * HZ))
-
/* Parameter t_mbi from [RFC 3448, 4.3]: backoff interval in seconds */
#define TFRC_T_MBI 64
+/*
+ * The t_delta parameter (RFC 5348, 8.3): delays of less than %USEC_PER_MSEC are
+ * rounded down to 0, since sk_reset_timer() here uses millisecond granularity.
+ * Hence we can use a constant t_delta = %USEC_PER_MSEC when HZ >= 500. A coarse
+ * resolution of HZ < 500 means that the error is below one timer tick (t_gran)
+ * when using the constant t_delta = t_gran / 2 = %USEC_PER_SEC / (2 * HZ).
+ */
+#if (HZ >= 500)
+# define TFRC_T_DELTA USEC_PER_MSEC
+#else
+# define TFRC_T_DELTA (USEC_PER_SEC / (2 * HZ))
+#endif
+
enum ccid3_options {
TFRC_OPT_LOSS_EVENT_RATE = 192,
TFRC_OPT_LOSS_INTERVALS = 193,
TFRC_OPT_RECEIVE_RATE = 194,
};
-struct ccid3_options_received {
- u64 ccid3or_seqno:48,
- ccid3or_loss_intervals_idx:16;
- u16 ccid3or_loss_intervals_len;
- u32 ccid3or_loss_event_rate;
- u32 ccid3or_receive_rate;
-};
-
/* TFRC sender states */
enum ccid3_hc_tx_states {
TFRC_SSTATE_NO_SENT = 1,
TFRC_SSTATE_NO_FBACK,
TFRC_SSTATE_FBACK,
- TFRC_SSTATE_TERM,
};
/**
@@ -90,19 +91,16 @@ enum ccid3_hc_tx_states {
* @tx_no_feedback_timer: Handle to no feedback timer
* @tx_t_ld: Time last doubled during slow start
* @tx_t_nom: Nominal send time of next packet
- * @tx_delta: Send timer delta (RFC 3448, 4.6) in usecs
* @tx_hist: Packet history
- * @tx_options_received: Parsed set of retrieved options
*/
struct ccid3_hc_tx_sock {
- struct tfrc_tx_info tx_tfrc;
-#define tx_x tx_tfrc.tfrctx_x
-#define tx_x_recv tx_tfrc.tfrctx_x_recv
-#define tx_x_calc tx_tfrc.tfrctx_x_calc
-#define tx_rtt tx_tfrc.tfrctx_rtt
-#define tx_p tx_tfrc.tfrctx_p
-#define tx_t_rto tx_tfrc.tfrctx_rto
-#define tx_t_ipi tx_tfrc.tfrctx_ipi
+ u64 tx_x;
+ u64 tx_x_recv;
+ u32 tx_x_calc;
+ u32 tx_rtt;
+ u32 tx_p;
+ u32 tx_t_rto;
+ u32 tx_t_ipi;
u16 tx_s;
enum ccid3_hc_tx_states tx_state:8;
u8 tx_last_win_count;
@@ -110,9 +108,7 @@ struct ccid3_hc_tx_sock {
struct timer_list tx_no_feedback_timer;
ktime_t tx_t_ld;
ktime_t tx_t_nom;
- u32 tx_delta;
struct tfrc_tx_hist_entry *tx_hist;
- struct ccid3_options_received tx_options_received;
};
static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
@@ -126,21 +122,16 @@ static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
enum ccid3_hc_rx_states {
TFRC_RSTATE_NO_DATA = 1,
TFRC_RSTATE_DATA,
- TFRC_RSTATE_TERM = 127,
};
/**
* struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
- * @rx_x_recv: Receiver estimate of send rate (RFC 3448 4.3)
- * @rx_rtt: Receiver estimate of rtt (non-standard)
- * @rx_p: Current loss event rate (RFC 3448 5.4)
* @rx_last_counter: Tracks window counter (RFC 4342, 8.1)
* @rx_state: Receiver state, one of %ccid3_hc_rx_states
* @rx_bytes_recv: Total sum of DCCP payload bytes
* @rx_x_recv: Receiver estimate of send rate (RFC 3448, sec. 4.3)
* @rx_rtt: Receiver estimate of RTT
* @rx_tstamp_last_feedback: Time at which last feedback was sent
- * @rx_tstamp_last_ack: Time at which last feedback was sent
* @rx_hist: Packet history (loss detection + RTT sampling)
* @rx_li_hist: Loss Interval database
* @rx_s: Received packet size in bytes
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 8fc3cbf79071..497723c4d4bb 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -116,7 +116,7 @@ u8 tfrc_lh_update_i_mean(struct tfrc_loss_hist *lh, struct sk_buff *skb)
cur->li_length = len;
tfrc_lh_calc_i_mean(lh);
- return (lh->i_mean < old_i_mean);
+ return lh->i_mean < old_i_mean;
}
/* Determine if `new_loss' does begin a new loss interval [RFC 4342, 10.2] */
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index 3a4f414e94a0..de8fe294bf0b 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -38,18 +38,6 @@
#include "packet_history.h"
#include "../../dccp.h"
-/**
- * tfrc_tx_hist_entry - Simple singly-linked TX history list
- * @next: next oldest entry (LIFO order)
- * @seqno: sequence number of this entry
- * @stamp: send time of packet with sequence number @seqno
- */
-struct tfrc_tx_hist_entry {
- struct tfrc_tx_hist_entry *next;
- u64 seqno;
- ktime_t stamp;
-};
-
/*
* Transmitter History Routines
*/
@@ -71,15 +59,6 @@ void tfrc_tx_packet_history_exit(void)
}
}
-static struct tfrc_tx_hist_entry *
- tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno)
-{
- while (head != NULL && head->seqno != seqno)
- head = head->next;
-
- return head;
-}
-
int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
{
struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any());
@@ -107,24 +86,6 @@ void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
*headp = NULL;
}
-u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
- const ktime_t now)
-{
- u32 rtt = 0;
- struct tfrc_tx_hist_entry *packet = tfrc_tx_hist_find_entry(head, seqno);
-
- if (packet != NULL) {
- rtt = ktime_us_delta(now, packet->stamp);
- /*
- * Garbage-collect older (irrelevant) entries:
- */
- tfrc_tx_hist_purge(&packet->next);
- }
-
- return rtt;
-}
-
-
/*
* Receiver History Routines
*/
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 7df6c5299999..7ee4a9d9d335 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -40,12 +40,28 @@
#include <linux/slab.h>
#include "tfrc.h"
-struct tfrc_tx_hist_entry;
+/**
+ * tfrc_tx_hist_entry - Simple singly-linked TX history list
+ * @next: next oldest entry (LIFO order)
+ * @seqno: sequence number of this entry
+ * @stamp: send time of packet with sequence number @seqno
+ */
+struct tfrc_tx_hist_entry {
+ struct tfrc_tx_hist_entry *next;
+ u64 seqno;
+ ktime_t stamp;
+};
+
+static inline struct tfrc_tx_hist_entry *
+ tfrc_tx_hist_find_entry(struct tfrc_tx_hist_entry *head, u64 seqno)
+{
+ while (head != NULL && head->seqno != seqno)
+ head = head->next;
+ return head;
+}
extern int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno);
extern void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp);
-extern u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head,
- const u64 seqno, const ktime_t now);
/* Subtraction a-b modulo-16, respects circular wrap-around */
#define SUB16(a, b) (((a) + 16 - (b)) & 0xF)
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index 01bb48e96c2e..f8ee3f549770 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -57,6 +57,7 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight)
extern u32 tfrc_calc_x(u16 s, u32 R, u32 p);
extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue);
+extern u32 tfrc_invert_loss_event_rate(u32 loss_event_rate);
extern int tfrc_tx_packet_history_init(void);
extern void tfrc_tx_packet_history_exit(void);
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index 22ca1cf0eb55..a052a4377e26 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -687,3 +687,17 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue)
index = tfrc_binsearch(fvalue, 0);
return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
}
+
+/**
+ * tfrc_invert_loss_event_rate - Compute p so that 10^6 corresponds to 100%
+ * When @loss_event_rate is large, there is a chance that p is truncated to 0.
+ * To avoid re-entering slow-start in that case, we set p = TFRC_SMALLEST_P > 0.
+ */
+u32 tfrc_invert_loss_event_rate(u32 loss_event_rate)
+{
+ if (loss_event_rate == UINT_MAX) /* see RFC 4342, 8.5 */
+ return 0;
+ if (unlikely(loss_event_rate == 0)) /* map 1/0 into 100% */
+ return 1000000;
+ return max_t(u32, scaled_div(1, loss_event_rate), TFRC_SMALLEST_P);
+}
diff --git a/net/dccp/options.c b/net/dccp/options.c
index bfda087bd90d..92718511eac5 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -96,18 +96,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
}
/*
- * CCID-Specific Options (from RFC 4340, sec. 10.3):
- *
- * Option numbers 128 through 191 are for options sent from the
- * HC-Sender to the HC-Receiver; option numbers 192 through 255
- * are for options sent from the HC-Receiver to the HC-Sender.
- *
* CCID-specific options are ignored during connection setup, as
* negotiation may still be in progress (see RFC 4340, 10.3).
* The same applies to Ack Vectors, as these depend on the CCID.
- *
*/
- if (dreq != NULL && (opt >= 128 ||
+ if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC ||
opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1))
goto ignore_option;
@@ -226,23 +219,15 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
dccp_pr_debug("%s rx opt: ELAPSED_TIME=%d\n",
dccp_role(sk), elapsed_time);
break;
- case 128 ... 191: {
- const u16 idx = value - options;
-
+ case DCCPO_MIN_RX_CCID_SPECIFIC ... DCCPO_MAX_RX_CCID_SPECIFIC:
if (ccid_hc_rx_parse_options(dp->dccps_hc_rx_ccid, sk,
- opt, len, idx,
- value) != 0)
+ pkt_type, opt, value, len))
goto out_invalid_option;
- }
break;
- case 192 ... 255: {
- const u16 idx = value - options;
-
+ case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
- opt, len, idx,
- value) != 0)
+ pkt_type, opt, value, len))
goto out_invalid_option;
- }
break;
default:
DCCP_CRIT("DCCP(%p): option %d(len=%d) not "
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 0363bb95cc7d..a085dbcf5c7f 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -48,7 +48,6 @@
#include <net/dn_neigh.h>
#include <net/dn_route.h>
-static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev);
static int dn_neigh_construct(struct neighbour *);
static void dn_long_error_report(struct neighbour *, struct sk_buff *);
static void dn_short_error_report(struct neighbour *, struct sk_buff *);
@@ -93,6 +92,13 @@ static const struct neigh_ops dn_phase3_ops = {
.queue_xmit = dev_queue_xmit
};
+static u32 dn_neigh_hash(const void *pkey,
+ const struct net_device *dev,
+ __u32 hash_rnd)
+{
+ return jhash_2words(*(__u16 *)pkey, 0, hash_rnd);
+}
+
struct neigh_table dn_neigh_table = {
.family = PF_DECnet,
.entry_size = sizeof(struct dn_neigh),
@@ -122,11 +128,6 @@ struct neigh_table dn_neigh_table = {
.gc_thresh3 = 1024,
};
-static u32 dn_neigh_hash(const void *pkey, const struct net_device *dev)
-{
- return jhash_2words(*(__u16 *)pkey, 0, dn_neigh_table.hash_rnd);
-}
-
static int dn_neigh_construct(struct neighbour *neigh)
{
struct net_device *dev = neigh->dev;
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index baeb1eaf011b..2ef115277bea 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -693,22 +693,22 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
aux = scp->accessdata.acc_userl;
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
+ memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);
aux = scp->accessdata.acc_passl;
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
+ memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);
aux = scp->accessdata.acc_accl;
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
+ memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);
aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
*skb_put(skb, 1) = aux;
if (aux > 0)
- memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux);
+ memcpy(skb_put(skb, aux), scp->conndata_out.opt_data, aux);
scp->persist = dn_nsp_persist(sk);
scp->persist_fxn = dn_nsp_retrans_conninit;
diff --git a/net/dns_resolver/Kconfig b/net/dns_resolver/Kconfig
new file mode 100644
index 000000000000..50d49f7e0472
--- /dev/null
+++ b/net/dns_resolver/Kconfig
@@ -0,0 +1,27 @@
+#
+# Configuration for DNS Resolver
+#
+config DNS_RESOLVER
+ tristate "DNS Resolver support"
+ depends on NET && KEYS
+ help
+ Saying Y here will include support for the DNS Resolver key type
+ which can be used to make upcalls to perform DNS lookups in
+ userspace.
+
+ DNS Resolver is used to query DNS server for information. Examples
+ being resolving a UNC hostname element to an IP address for CIFS or
+ performing a DNS query for AFSDB records so that AFS can locate a
+ cell's volume location database servers.
+
+ DNS Resolver is used by the CIFS and AFS modules, and would support
+ SMB2 later. DNS Resolver is supported by the userspace upcall
+ helper "/sbin/dns.resolver" via /etc/request-key.conf.
+
+ See <file:Documentation/networking/dns_resolver.txt> for further
+ information.
+
+ To compile this as a module, choose M here: the module will be called
+ dnsresolver.
+
+ If unsure, say N.
diff --git a/net/dns_resolver/Makefile b/net/dns_resolver/Makefile
new file mode 100644
index 000000000000..c0ef4e71dc49
--- /dev/null
+++ b/net/dns_resolver/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux DNS Resolver.
+#
+
+obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o
+
+dns_resolver-objs := dns_key.o dns_query.o
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
new file mode 100644
index 000000000000..739435a6af39
--- /dev/null
+++ b/net/dns_resolver/dns_key.c
@@ -0,0 +1,293 @@
+/* Key type used to cache DNS lookups made by the kernel
+ *
+ * See Documentation/networking/dns_resolver.txt
+ *
+ * Copyright (c) 2007 Igor Mammedov
+ * Author(s): Igor Mammedov (niallain@gmail.com)
+ * Steve French (sfrench@us.ibm.com)
+ * Wang Lei (wang840925@gmail.com)
+ * David Howells (dhowells@redhat.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/keyctl.h>
+#include <linux/err.h>
+#include <linux/seq_file.h>
+#include <keys/dns_resolver-type.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+MODULE_DESCRIPTION("DNS Resolver");
+MODULE_AUTHOR("Wang Lei");
+MODULE_LICENSE("GPL");
+
+unsigned dns_resolver_debug;
+module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
+
+const struct cred *dns_resolver_cache;
+
+#define DNS_ERRORNO_OPTION "dnserror"
+
+/*
+ * Instantiate a user defined key for dns_resolver.
+ *
+ * The data must be a NUL-terminated string, with the NUL char accounted in
+ * datalen.
+ *
+ * If the data contains a '#' characters, then we take the clause after each
+ * one to be an option of the form 'key=value'. The actual data of interest is
+ * the string leading up to the first '#'. For instance:
+ *
+ * "ip1,ip2,...#foo=bar"
+ */
+static int
+dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
+{
+ struct user_key_payload *upayload;
+ unsigned long derrno;
+ int ret;
+ size_t result_len = 0;
+ const char *data = _data, *end, *opt;
+
+ kenter("%%%d,%s,'%s',%zu",
+ key->serial, key->description, data, datalen);
+
+ if (datalen <= 1 || !data || data[datalen - 1] != '\0')
+ return -EINVAL;
+ datalen--;
+
+ /* deal with any options embedded in the data */
+ end = data + datalen;
+ opt = memchr(data, '#', datalen);
+ if (!opt) {
+ /* no options: the entire data is the result */
+ kdebug("no options");
+ result_len = datalen;
+ } else {
+ const char *next_opt;
+
+ result_len = opt - data;
+ opt++;
+ kdebug("options: '%s'", opt);
+ do {
+ const char *eq;
+ int opt_len, opt_nlen, opt_vlen, tmp;
+
+ next_opt = memchr(opt, '#', end - opt) ?: end;
+ opt_len = next_opt - opt;
+ if (!opt_len) {
+ printk(KERN_WARNING
+ "Empty option to dns_resolver key %d\n",
+ key->serial);
+ return -EINVAL;
+ }
+
+ eq = memchr(opt, '=', opt_len) ?: end;
+ opt_nlen = eq - opt;
+ eq++;
+ opt_vlen = next_opt - eq; /* will be -1 if no value */
+
+ tmp = opt_vlen >= 0 ? opt_vlen : 0;
+ kdebug("option '%*.*s' val '%*.*s'",
+ opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+
+ /* see if it's an error number representing a DNS error
+ * that's to be recorded as the result in this key */
+ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
+ memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
+ kdebug("dns error number option");
+ if (opt_vlen <= 0)
+ goto bad_option_value;
+
+ ret = strict_strtoul(eq, 10, &derrno);
+ if (ret < 0)
+ goto bad_option_value;
+
+ if (derrno < 1 || derrno > 511)
+ goto bad_option_value;
+
+ kdebug("dns error no. = %lu", derrno);
+ key->type_data.x[0] = -derrno;
+ continue;
+ }
+
+ bad_option_value:
+ printk(KERN_WARNING
+ "Option '%*.*s' to dns_resolver key %d:"
+ " bad/missing value\n",
+ opt_nlen, opt_nlen, opt, key->serial);
+ return -EINVAL;
+ } while (opt = next_opt + 1, opt < end);
+ }
+
+ /* don't cache the result if we're caching an error saying there's no
+ * result */
+ if (key->type_data.x[0]) {
+ kleave(" = 0 [h_error %ld]", key->type_data.x[0]);
+ return 0;
+ }
+
+ kdebug("store result");
+ ret = key_payload_reserve(key, result_len);
+ if (ret < 0)
+ return -EINVAL;
+
+ upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL);
+ if (!upayload) {
+ kleave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ upayload->datalen = result_len;
+ memcpy(upayload->data, data, result_len);
+ upayload->data[result_len] = '\0';
+ rcu_assign_pointer(key->payload.data, upayload);
+
+ kleave(" = 0");
+ return 0;
+}
+
+/*
+ * The description is of the form "[<type>:]<domain_name>"
+ *
+ * The domain name may be a simple name or an absolute domain name (which
+ * should end with a period). The domain name is case-independent.
+ */
+static int
+dns_resolver_match(const struct key *key, const void *description)
+{
+ int slen, dlen, ret = 0;
+ const char *src = key->description, *dsp = description;
+
+ kenter("%s,%s", src, dsp);
+
+ if (!src || !dsp)
+ goto no_match;
+
+ if (strcasecmp(src, dsp) == 0)
+ goto matched;
+
+ slen = strlen(src);
+ dlen = strlen(dsp);
+ if (slen <= 0 || dlen <= 0)
+ goto no_match;
+ if (src[slen - 1] == '.')
+ slen--;
+ if (dsp[dlen - 1] == '.')
+ dlen--;
+ if (slen != dlen || strncasecmp(src, dsp, slen) != 0)
+ goto no_match;
+
+matched:
+ ret = 1;
+no_match:
+ kleave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Describe a DNS key
+ */
+static void dns_resolver_describe(const struct key *key, struct seq_file *m)
+{
+ int err = key->type_data.x[0];
+
+ seq_puts(m, key->description);
+ if (err)
+ seq_printf(m, ": %d", err);
+ else
+ seq_printf(m, ": %u", key->datalen);
+}
+
+struct key_type key_type_dns_resolver = {
+ .name = "dns_resolver",
+ .instantiate = dns_resolver_instantiate,
+ .match = dns_resolver_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = dns_resolver_describe,
+ .read = user_read,
+};
+
+static int __init init_dns_resolver(void)
+{
+ struct cred *cred;
+ struct key *keyring;
+ int ret;
+
+ printk(KERN_NOTICE "Registering the %s key type\n",
+ key_type_dns_resolver.name);
+
+ /* create an override credential set with a special thread keyring in
+ * which DNS requests are cached
+ *
+ * this is used to prevent malicious redirections from being installed
+ * with add_key().
+ */
+ cred = prepare_kernel_cred(NULL);
+ if (!cred)
+ return -ENOMEM;
+
+ keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto failed_put_cred;
+ }
+
+ ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
+ if (ret < 0)
+ goto failed_put_key;
+
+ ret = register_key_type(&key_type_dns_resolver);
+ if (ret < 0)
+ goto failed_put_key;
+
+ /* instruct request_key() to use this special keyring as a cache for
+ * the results it looks up */
+ cred->thread_keyring = keyring;
+ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+ dns_resolver_cache = cred;
+
+ kdebug("DNS resolver keyring: %d\n", key_serial(keyring));
+ return 0;
+
+failed_put_key:
+ key_put(keyring);
+failed_put_cred:
+ put_cred(cred);
+ return ret;
+}
+
+static void __exit exit_dns_resolver(void)
+{
+ key_revoke(dns_resolver_cache->thread_keyring);
+ unregister_key_type(&key_type_dns_resolver);
+ put_cred(dns_resolver_cache);
+ printk(KERN_NOTICE "Unregistered %s key type\n",
+ key_type_dns_resolver.name);
+}
+
+module_init(init_dns_resolver)
+module_exit(exit_dns_resolver)
+MODULE_LICENSE("GPL");
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
new file mode 100644
index 000000000000..c32be292c7e3
--- /dev/null
+++ b/net/dns_resolver/dns_query.c
@@ -0,0 +1,165 @@
+/* Upcall routine, designed to work as a key type and working through
+ * /sbin/request-key to contact userspace when handling DNS queries.
+ *
+ * See Documentation/networking/dns_resolver.txt
+ *
+ * Copyright (c) 2007 Igor Mammedov
+ * Author(s): Igor Mammedov (niallain@gmail.com)
+ * Steve French (sfrench@us.ibm.com)
+ * Wang Lei (wang840925@gmail.com)
+ * David Howells (dhowells@redhat.com)
+ *
+ * The upcall wrapper used to make an arbitrary DNS query.
+ *
+ * This function requires the appropriate userspace tool dns.upcall to be
+ * installed and something like the following lines should be added to the
+ * /etc/request-key.conf file:
+ *
+ * create dns_resolver * * /sbin/dns.upcall %k
+ *
+ * For example to use this module to query AFSDB RR:
+ *
+ * create dns_resolver afsdb:* * /sbin/dns.afsdb %k
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dns_resolver.h>
+#include <linux/err.h>
+#include <keys/dns_resolver-type.h>
+#include <keys/user-type.h>
+
+#include "internal.h"
+
+/**
+ * dns_query - Query the DNS
+ * @type: Query type (or NULL for straight host->IP lookup)
+ * @name: Name to look up
+ * @namelen: Length of name
+ * @options: Request options (or NULL if no options)
+ * @_result: Where to place the returned data.
+ * @_expiry: Where to store the result expiry time (or NULL)
+ *
+ * The data will be returned in the pointer at *result, and the caller is
+ * responsible for freeing it.
+ *
+ * The description should be of the form "[<query_type>:]<domain_name>", and
+ * the options need to be appropriate for the query type requested. If no
+ * query_type is given, then the query is a straight hostname to IP address
+ * lookup.
+ *
+ * The DNS resolution lookup is performed by upcalling to userspace by way of
+ * requesting a key of type dns_resolver.
+ *
+ * Returns the size of the result on success, -ve error code otherwise.
+ */
+int dns_query(const char *type, const char *name, size_t namelen,
+ const char *options, char **_result, time_t *_expiry)
+{
+ struct key *rkey;
+ struct user_key_payload *upayload;
+ const struct cred *saved_cred;
+ size_t typelen, desclen;
+ char *desc, *cp;
+ int ret, len;
+
+ kenter("%s,%*.*s,%zu,%s",
+ type, (int)namelen, (int)namelen, name, namelen, options);
+
+ if (!name || namelen == 0 || !_result)
+ return -EINVAL;
+
+ /* construct the query key description as "[<type>:]<name>" */
+ typelen = 0;
+ desclen = 0;
+ if (type) {
+ typelen = strlen(type);
+ if (typelen < 1)
+ return -EINVAL;
+ desclen += typelen + 1;
+ }
+
+ if (!namelen)
+ namelen = strlen(name);
+ if (namelen < 3)
+ return -EINVAL;
+ desclen += namelen + 1;
+
+ desc = kmalloc(desclen, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ cp = desc;
+ if (type) {
+ memcpy(cp, type, typelen);
+ cp += typelen;
+ *cp++ = ':';
+ }
+ memcpy(cp, name, namelen);
+ cp += namelen;
+ *cp = '\0';
+
+ if (!options)
+ options = "";
+ kdebug("call request_key(,%s,%s)", desc, options);
+
+ /* make the upcall, using special credentials to prevent the use of
+ * add_key() to preinstall malicious redirections
+ */
+ saved_cred = override_creds(dns_resolver_cache);
+ rkey = request_key(&key_type_dns_resolver, desc, options);
+ revert_creds(saved_cred);
+ kfree(desc);
+ if (IS_ERR(rkey)) {
+ ret = PTR_ERR(rkey);
+ goto out;
+ }
+
+ down_read(&rkey->sem);
+ rkey->perm |= KEY_USR_VIEW;
+
+ ret = key_validate(rkey);
+ if (ret < 0)
+ goto put;
+
+ /* If the DNS server gave an error, return that to the caller */
+ ret = rkey->type_data.x[0];
+ if (ret)
+ goto put;
+
+ upayload = rcu_dereference_protected(rkey->payload.data,
+ lockdep_is_held(&rkey->sem));
+ len = upayload->datalen;
+
+ ret = -ENOMEM;
+ *_result = kmalloc(len + 1, GFP_KERNEL);
+ if (!*_result)
+ goto put;
+
+ memcpy(*_result, upayload->data, len + 1);
+ if (_expiry)
+ *_expiry = rkey->expiry;
+
+ ret = len;
+put:
+ up_read(&rkey->sem);
+ key_put(rkey);
+out:
+ kleave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(dns_query);
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
new file mode 100644
index 000000000000..189ca9e9b785
--- /dev/null
+++ b/net/dns_resolver/internal.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2010 Wang Lei
+ * Author(s): Wang Lei (wang840925@gmail.com). All Rights Reserved.
+ *
+ * Internal DNS Rsolver stuff
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+/*
+ * dns_key.c
+ */
+extern const struct cred *dns_resolver_cache;
+
+/*
+ * debug tracing
+ */
+extern unsigned dns_resolver_debug;
+
+#define kdebug(FMT, ...) \
+do { \
+ if (unlikely(dns_resolver_debug)) \
+ printk(KERN_DEBUG "[%-6.6s] "FMT"\n", \
+ current->comm, ##__VA_ARGS__); \
+} while (0)
+
+#define kenter(FMT, ...) kdebug("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) kdebug("<== %s()"FMT"", __func__, ##__VA_ARGS__)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 11201784d29a..87bb5f4de0e8 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
menuconfig NET_DSA
bool "Distributed Switch Architecture support"
default n
- depends on EXPERIMENTAL && NET_ETHERNET && !S390
+ depends on EXPERIMENTAL && NETDEVICES && !S390
select PHYLIB
---help---
This allows you to use hardware switch chips that use
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index dc54bd0d083b..f8c1ae4b41f0 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -392,7 +392,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
dev_queue_xmit(skb);
dev_put(dev);
mutex_unlock(&econet_mutex);
- return(len);
+ return len;
out_free:
kfree_skb(skb);
@@ -637,7 +637,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol,
eo->num = protocol;
econet_insert_socket(&econet_sklist, sk);
- return(0);
+ return 0;
out:
return err;
}
@@ -1009,7 +1009,6 @@ static int __init aun_udp_initialise(void)
struct sockaddr_in sin;
skb_queue_head_init(&aun_queue);
- spin_lock_init(&aun_queue_lock);
setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
ab_cleanup_timer.expires = jiffies + (HZ*2);
add_timer(&ab_cleanup_timer);
@@ -1167,7 +1166,6 @@ static int __init econet_proto_init(void)
goto out;
sock_register(&econet_family_ops);
#ifdef CONFIG_ECONET_AUNUDP
- spin_lock_init(&aun_queue_lock);
aun_udp_initialise();
#endif
#ifdef CONFIG_ECONET_NATIVE
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 215c83986a9d..f00ef2f1d814 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -367,7 +367,7 @@ struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
EXPORT_SYMBOL(alloc_etherdev_mq);
static size_t _format_mac_addr(char *buf, int buflen,
- const unsigned char *addr, int len)
+ const unsigned char *addr, int len)
{
int i;
char *cp = buf;
@@ -376,7 +376,7 @@ static size_t _format_mac_addr(char *buf, int buflen,
cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
if (i == len - 1)
break;
- cp += strlcpy(cp, ":", buflen - (cp - buf));
+ cp += scnprintf(cp, buflen - (cp - buf), ":");
}
return cp - buf;
}
@@ -386,7 +386,7 @@ ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
size_t l;
l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
- l += strlcpy(buf + l, "\n", PAGE_SIZE - l);
- return ((ssize_t) l);
+ l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
+ return (ssize_t)l;
}
EXPORT_SYMBOL(sysfs_format_mac);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 7c3a7d191249..e848e6c062cd 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -46,7 +46,7 @@ config IP_ADVANCED_ROUTER
rp_filter on use:
echo 1 > /proc/sys/net/ipv4/conf/<device>/rp_filter
- and
+ or
echo 1 > /proc/sys/net/ipv4/conf/all/rp_filter
Note that some distributions enable it in startup scripts.
@@ -215,8 +215,15 @@ config NET_IPIP
be inserted in and removed from the running kernel whenever you
want). Most people won't need this and can say N.
+config NET_IPGRE_DEMUX
+ tristate "IP: GRE demultiplexer"
+ help
+ This is helper module to demultiplex GRE packets on GRE version field criteria.
+ Required by ip_gre and pptp modules.
+
config NET_IPGRE
tristate "IP: GRE tunnels over IP"
+ depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
help
Tunneling means encapsulating data of one protocol type within
another protocol and sending it over a channel that understands the
@@ -412,7 +419,7 @@ config INET_XFRM_MODE_BEET
If unsure, say Y.
config INET_LRO
- bool "Large Receive Offload (ipv4/tcp)"
+ tristate "Large Receive Offload (ipv4/tcp)"
default y
---help---
Support for Large Receive Offload (ipv4/tcp).
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 80ff87ce43aa..4978d22f9a75 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
obj-$(CONFIG_IP_MROUTE) += ipmr.o
obj-$(CONFIG_NET_IPIP) += ipip.o
+obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_AH) += ah4.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 6a1100c25a9f..f581f77d1097 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -227,18 +227,16 @@ EXPORT_SYMBOL(inet_ehash_secret);
/*
* inet_ehash_secret must be set exactly once
- * Instead of using a dedicated spinlock, we (ab)use inetsw_lock
*/
void build_ehash_secret(void)
{
u32 rnd;
+
do {
get_random_bytes(&rnd, sizeof(rnd));
} while (rnd == 0);
- spin_lock_bh(&inetsw_lock);
- if (!inet_ehash_secret)
- inet_ehash_secret = rnd;
- spin_unlock_bh(&inetsw_lock);
+
+ cmpxchg(&inet_ehash_secret, 0, rnd);
}
EXPORT_SYMBOL(build_ehash_secret);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 96c1955b3e2f..f35309578170 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -55,7 +55,7 @@
* Stuart Cheshire : Metricom and grat arp fixes
* *** FOR 2.1 clean this up ***
* Lawrence V. Stefani: (08/12/96) Added FDDI support.
- * Alan Cox : Took the AP1000 nasty FDDI hack and
+ * Alan Cox : Took the AP1000 nasty FDDI hack and
* folded into the mainstream FDDI code.
* Ack spit, Linus how did you allow that
* one in...
@@ -120,14 +120,14 @@ EXPORT_SYMBOL(clip_tbl_hook);
#endif
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <linux/netfilter_arp.h>
/*
* Interface to generic neighbour cache.
*/
-static u32 arp_hash(const void *pkey, const struct net_device *dev);
+static u32 arp_hash(const void *pkey, const struct net_device *dev, __u32 rnd);
static int arp_constructor(struct neighbour *neigh);
static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -161,7 +161,7 @@ static const struct neigh_ops arp_direct_ops = {
.queue_xmit = dev_queue_xmit,
};
-const struct neigh_ops arp_broken_ops = {
+static const struct neigh_ops arp_broken_ops = {
.family = AF_INET,
.solicit = arp_solicit,
.error_report = arp_error_report,
@@ -170,35 +170,34 @@ const struct neigh_ops arp_broken_ops = {
.hh_output = dev_queue_xmit,
.queue_xmit = dev_queue_xmit,
};
-EXPORT_SYMBOL(arp_broken_ops);
struct neigh_table arp_tbl = {
- .family = AF_INET,
- .entry_size = sizeof(struct neighbour) + 4,
- .key_len = 4,
- .hash = arp_hash,
- .constructor = arp_constructor,
- .proxy_redo = parp_redo,
- .id = "arp_cache",
- .parms = {
- .tbl = &arp_tbl,
- .base_reachable_time = 30 * HZ,
- .retrans_time = 1 * HZ,
- .gc_staletime = 60 * HZ,
- .reachable_time = 30 * HZ,
- .delay_probe_time = 5 * HZ,
- .queue_len = 3,
- .ucast_probes = 3,
- .mcast_probes = 3,
- .anycast_delay = 1 * HZ,
- .proxy_delay = (8 * HZ) / 10,
- .proxy_qlen = 64,
- .locktime = 1 * HZ,
+ .family = AF_INET,
+ .entry_size = sizeof(struct neighbour) + 4,
+ .key_len = 4,
+ .hash = arp_hash,
+ .constructor = arp_constructor,
+ .proxy_redo = parp_redo,
+ .id = "arp_cache",
+ .parms = {
+ .tbl = &arp_tbl,
+ .base_reachable_time = 30 * HZ,
+ .retrans_time = 1 * HZ,
+ .gc_staletime = 60 * HZ,
+ .reachable_time = 30 * HZ,
+ .delay_probe_time = 5 * HZ,
+ .queue_len = 3,
+ .ucast_probes = 3,
+ .mcast_probes = 3,
+ .anycast_delay = 1 * HZ,
+ .proxy_delay = (8 * HZ) / 10,
+ .proxy_qlen = 64,
+ .locktime = 1 * HZ,
},
- .gc_interval = 30 * HZ,
- .gc_thresh1 = 128,
- .gc_thresh2 = 512,
- .gc_thresh3 = 1024,
+ .gc_interval = 30 * HZ,
+ .gc_thresh1 = 128,
+ .gc_thresh2 = 512,
+ .gc_thresh3 = 1024,
};
EXPORT_SYMBOL(arp_tbl);
@@ -226,14 +225,16 @@ int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
}
-static u32 arp_hash(const void *pkey, const struct net_device *dev)
+static u32 arp_hash(const void *pkey,
+ const struct net_device *dev,
+ __u32 hash_rnd)
{
- return jhash_2words(*(u32 *)pkey, dev->ifindex, arp_tbl.hash_rnd);
+ return jhash_2words(*(u32 *)pkey, dev->ifindex, hash_rnd);
}
static int arp_constructor(struct neighbour *neigh)
{
- __be32 addr = *(__be32*)neigh->primary_key;
+ __be32 addr = *(__be32 *)neigh->primary_key;
struct net_device *dev = neigh->dev;
struct in_device *in_dev;
struct neigh_parms *parms;
@@ -296,16 +297,19 @@ static int arp_constructor(struct neighbour *neigh)
neigh->ops = &arp_broken_ops;
neigh->output = neigh->ops->output;
return 0;
+#else
+ break;
#endif
- ;}
+ }
#endif
if (neigh->type == RTN_MULTICAST) {
neigh->nud_state = NUD_NOARP;
arp_mc_map(addr, neigh->ha, dev, 1);
- } else if (dev->flags&(IFF_NOARP|IFF_LOOPBACK)) {
+ } else if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) {
neigh->nud_state = NUD_NOARP;
memcpy(neigh->ha, dev->dev_addr, dev->addr_len);
- } else if (neigh->type == RTN_BROADCAST || dev->flags&IFF_POINTOPOINT) {
+ } else if (neigh->type == RTN_BROADCAST ||
+ (dev->flags & IFF_POINTOPOINT)) {
neigh->nud_state = NUD_NOARP;
memcpy(neigh->ha, dev->broadcast, dev->addr_len);
}
@@ -315,7 +319,7 @@ static int arp_constructor(struct neighbour *neigh)
else
neigh->ops = &arp_generic_ops;
- if (neigh->nud_state&NUD_VALID)
+ if (neigh->nud_state & NUD_VALID)
neigh->output = neigh->ops->connected_output;
else
neigh->output = neigh->ops->output;
@@ -334,7 +338,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
__be32 saddr = 0;
u8 *dst_ha = NULL;
struct net_device *dev = neigh->dev;
- __be32 target = *(__be32*)neigh->primary_key;
+ __be32 target = *(__be32 *)neigh->primary_key;
int probes = atomic_read(&neigh->probes);
struct in_device *in_dev;
@@ -347,7 +351,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
default:
case 0: /* By default announce any local IP */
- if (skb && inet_addr_type(dev_net(dev), ip_hdr(skb)->saddr) == RTN_LOCAL)
+ if (skb && inet_addr_type(dev_net(dev),
+ ip_hdr(skb)->saddr) == RTN_LOCAL)
saddr = ip_hdr(skb)->saddr;
break;
case 1: /* Restrict announcements of saddr in same subnet */
@@ -369,16 +374,21 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
if (!saddr)
saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
- if ((probes -= neigh->parms->ucast_probes) < 0) {
- if (!(neigh->nud_state&NUD_VALID))
- printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n");
+ probes -= neigh->parms->ucast_probes;
+ if (probes < 0) {
+ if (!(neigh->nud_state & NUD_VALID))
+ printk(KERN_DEBUG
+ "trying to ucast probe in NUD_INVALID\n");
dst_ha = neigh->ha;
read_lock_bh(&neigh->lock);
- } else if ((probes -= neigh->parms->app_probes) < 0) {
+ } else {
+ probes -= neigh->parms->app_probes;
+ if (probes < 0) {
#ifdef CONFIG_ARPD
- neigh_app_ns(neigh);
+ neigh_app_ns(neigh);
#endif
- return;
+ return;
+ }
}
arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr,
@@ -451,7 +461,8 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
* is allowed to use this function, it is scheduled to be removed. --ANK
*/
-static int arp_set_predefined(int addr_hint, unsigned char * haddr, __be32 paddr, struct net_device * dev)
+static int arp_set_predefined(int addr_hint, unsigned char *haddr,
+ __be32 paddr, struct net_device *dev)
{
switch (addr_hint) {
case RTN_LOCAL:
@@ -483,7 +494,8 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
paddr = skb_rtable(skb)->rt_gateway;
- if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev))
+ if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr,
+ paddr, dev))
return 0;
n = __neigh_lookup(&arp_tbl, &paddr, dev, 1);
@@ -515,13 +527,14 @@ int arp_bind_neighbour(struct dst_entry *dst)
return -EINVAL;
if (n == NULL) {
__be32 nexthop = ((struct rtable *)dst)->rt_gateway;
- if (dev->flags&(IFF_LOOPBACK|IFF_POINTOPOINT))
+ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
nexthop = 0;
n = __neigh_lookup_errno(
#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
- dev->type == ARPHRD_ATM ? clip_tbl_hook :
+ dev->type == ARPHRD_ATM ?
+ clip_tbl_hook :
#endif
- &arp_tbl, &nexthop, dev);
+ &arp_tbl, &nexthop, dev);
if (IS_ERR(n))
return PTR_ERR(n);
dst->neighbour = n;
@@ -543,8 +556,8 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
if (!IN_DEV_PROXY_ARP(in_dev))
return 0;
-
- if ((imi = IN_DEV_MEDIUM_ID(in_dev)) == 0)
+ imi = IN_DEV_MEDIUM_ID(in_dev);
+ if (imi == 0)
return 1;
if (imi == -1)
return 0;
@@ -555,7 +568,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
if (out_dev)
omi = IN_DEV_MEDIUM_ID(out_dev);
- return (omi != imi && omi != -1);
+ return omi != imi && omi != -1;
}
/*
@@ -685,7 +698,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
arp->ar_pln = 4;
arp->ar_op = htons(type);
- arp_ptr=(unsigned char *)(arp+1);
+ arp_ptr = (unsigned char *)(arp + 1);
memcpy(arp_ptr, src_hw, dev->addr_len);
arp_ptr += dev->addr_len;
@@ -735,9 +748,8 @@ void arp_send(int type, int ptype, __be32 dest_ip,
skb = arp_create(type, ptype, dest_ip, dev, src_ip,
dest_hw, src_hw, target_hw);
- if (skb == NULL) {
+ if (skb == NULL)
return;
- }
arp_xmit(skb);
}
@@ -815,7 +827,7 @@ static int arp_process(struct sk_buff *skb)
/*
* Extract fields
*/
- arp_ptr= (unsigned char *)(arp+1);
+ arp_ptr = (unsigned char *)(arp + 1);
sha = arp_ptr;
arp_ptr += dev->addr_len;
memcpy(&sip, arp_ptr, 4);
@@ -869,16 +881,17 @@ static int arp_process(struct sk_buff *skb)
addr_type = rt->rt_type;
if (addr_type == RTN_LOCAL) {
- int dont_send = 0;
+ int dont_send;
- if (!dont_send)
- dont_send |= arp_ignore(in_dev,sip,tip);
+ dont_send = arp_ignore(in_dev, sip, tip);
if (!dont_send && IN_DEV_ARPFILTER(in_dev))
- dont_send |= arp_filter(sip,tip,dev);
+ dont_send |= arp_filter(sip, tip, dev);
if (!dont_send) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n) {
- arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
+ dev, tip, sha, dev->dev_addr,
+ sha);
neigh_release(n);
}
}
@@ -887,8 +900,7 @@ static int arp_process(struct sk_buff *skb)
if (addr_type == RTN_UNICAST &&
(arp_fwd_proxy(in_dev, dev, rt) ||
arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
- pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
- {
+ pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n)
neigh_release(n);
@@ -896,9 +908,12 @@ static int arp_process(struct sk_buff *skb)
if (NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED ||
skb->pkt_type == PACKET_HOST ||
in_dev->arp_parms->proxy_delay == 0) {
- arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
+ arp_send(ARPOP_REPLY, ETH_P_ARP, sip,
+ dev, tip, sha, dev->dev_addr,
+ sha);
} else {
- pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb);
+ pneigh_enqueue(&arp_tbl,
+ in_dev->arp_parms, skb);
return 0;
}
goto out;
@@ -939,7 +954,8 @@ static int arp_process(struct sk_buff *skb)
if (arp->ar_op != htons(ARPOP_REPLY) ||
skb->pkt_type != PACKET_HOST)
state = NUD_STALE;
- neigh_update(n, sha, state, override ? NEIGH_UPDATE_F_OVERRIDE : 0);
+ neigh_update(n, sha, state,
+ override ? NEIGH_UPDATE_F_OVERRIDE : 0);
neigh_release(n);
}
@@ -975,7 +991,8 @@ static int arp_rcv(struct sk_buff *skb, struct net_device *dev,
arp->ar_pln != 4)
goto freeskb;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb == NULL)
goto out_of_mem;
memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
@@ -1019,7 +1036,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
return -EINVAL;
if (!dev && (r->arp_flags & ATF_COM)) {
dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
- r->arp_ha.sa_data);
+ r->arp_ha.sa_data);
if (!dev)
return -ENODEV;
}
@@ -1033,7 +1050,7 @@ static int arp_req_set_public(struct net *net, struct arpreq *r,
}
static int arp_req_set(struct net *net, struct arpreq *r,
- struct net_device * dev)
+ struct net_device *dev)
{
__be32 ip;
struct neighbour *neigh;
@@ -1046,10 +1063,11 @@ static int arp_req_set(struct net *net, struct arpreq *r,
if (r->arp_flags & ATF_PERM)
r->arp_flags |= ATF_COM;
if (dev == NULL) {
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip,
- .tos = RTO_ONLINK } } };
- struct rtable * rt;
- if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
+ struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
+ .tos = RTO_ONLINK } };
+ struct rtable *rt;
+ err = ip_route_output_key(net, &rt, &fl);
+ if (err != 0)
return err;
dev = rt->dst.dev;
ip_rt_put(rt);
@@ -1083,9 +1101,9 @@ static int arp_req_set(struct net *net, struct arpreq *r,
unsigned state = NUD_STALE;
if (r->arp_flags & ATF_PERM)
state = NUD_PERMANENT;
- err = neigh_update(neigh, (r->arp_flags&ATF_COM) ?
+ err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
r->arp_ha.sa_data : NULL, state,
- NEIGH_UPDATE_F_OVERRIDE|
+ NEIGH_UPDATE_F_OVERRIDE |
NEIGH_UPDATE_F_ADMIN);
neigh_release(neigh);
}
@@ -1094,12 +1112,12 @@ static int arp_req_set(struct net *net, struct arpreq *r,
static unsigned arp_state_to_flags(struct neighbour *neigh)
{
- unsigned flags = 0;
if (neigh->nud_state&NUD_PERMANENT)
- flags = ATF_PERM|ATF_COM;
+ return ATF_PERM | ATF_COM;
else if (neigh->nud_state&NUD_VALID)
- flags = ATF_COM;
- return flags;
+ return ATF_COM;
+ else
+ return 0;
}
/*
@@ -1142,7 +1160,7 @@ static int arp_req_delete_public(struct net *net, struct arpreq *r,
}
static int arp_req_delete(struct net *net, struct arpreq *r,
- struct net_device * dev)
+ struct net_device *dev)
{
int err;
__be32 ip;
@@ -1153,10 +1171,11 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
if (dev == NULL) {
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip,
- .tos = RTO_ONLINK } } };
- struct rtable * rt;
- if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
+ struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
+ .tos = RTO_ONLINK } };
+ struct rtable *rt;
+ err = ip_route_output_key(net, &rt, &fl);
+ if (err != 0)
return err;
dev = rt->dst.dev;
ip_rt_put(rt);
@@ -1166,7 +1185,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
err = -ENXIO;
neigh = neigh_lookup(&arp_tbl, &ip, dev);
if (neigh) {
- if (neigh->nud_state&~NUD_NOARP)
+ if (neigh->nud_state & ~NUD_NOARP)
err = neigh_update(neigh, NULL, NUD_FAILED,
NEIGH_UPDATE_F_OVERRIDE|
NEIGH_UPDATE_F_ADMIN);
@@ -1186,24 +1205,24 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
struct net_device *dev = NULL;
switch (cmd) {
- case SIOCDARP:
- case SIOCSARP:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- case SIOCGARP:
- err = copy_from_user(&r, arg, sizeof(struct arpreq));
- if (err)
- return -EFAULT;
- break;
- default:
- return -EINVAL;
+ case SIOCDARP:
+ case SIOCSARP:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ case SIOCGARP:
+ err = copy_from_user(&r, arg, sizeof(struct arpreq));
+ if (err)
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
}
if (r.arp_pa.sa_family != AF_INET)
return -EPFNOSUPPORT;
if (!(r.arp_flags & ATF_PUBL) &&
- (r.arp_flags & (ATF_NETMASK|ATF_DONTPUB)))
+ (r.arp_flags & (ATF_NETMASK | ATF_DONTPUB)))
return -EINVAL;
if (!(r.arp_flags & ATF_NETMASK))
((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
@@ -1211,7 +1230,8 @@ int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg)
rtnl_lock();
if (r.arp_dev[0]) {
err = -ENODEV;
- if ((dev = __dev_get_by_name(net, r.arp_dev)) == NULL)
+ dev = __dev_get_by_name(net, r.arp_dev);
+ if (dev == NULL)
goto out;
/* Mmmm... It is wrong... ARPHRD_NETROM==0 */
@@ -1243,7 +1263,8 @@ out:
return err;
}
-static int arp_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
+static int arp_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
{
struct net_device *dev = ptr;
@@ -1311,12 +1332,13 @@ static char *ax2asc2(ax25_address *a, char *buf)
for (n = 0, s = buf; n < 6; n++) {
c = (a->ax25_call[n] >> 1) & 0x7F;
- if (c != ' ') *s++ = c;
+ if (c != ' ')
+ *s++ = c;
}
*s++ = '-';
-
- if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
+ n = (a->ax25_call[6] >> 1) & 0x0F;
+ if (n > 9) {
*s++ = '1';
n -= 10;
}
@@ -1325,10 +1347,9 @@ static char *ax2asc2(ax25_address *a, char *buf)
*s++ = '\0';
if (*buf == '\0' || *buf == '-')
- return "*";
+ return "*";
return buf;
-
}
#endif /* CONFIG_AX25 */
@@ -1408,10 +1429,10 @@ static void *arp_seq_start(struct seq_file *seq, loff_t *pos)
/* ------------------------------------------------------------------------ */
static const struct seq_operations arp_seq_ops = {
- .start = arp_seq_start,
- .next = neigh_seq_next,
- .stop = neigh_seq_stop,
- .show = arp_seq_show,
+ .start = arp_seq_start,
+ .next = neigh_seq_next,
+ .stop = neigh_seq_stop,
+ .show = arp_seq_show,
};
static int arp_seq_open(struct inode *inode, struct file *file)
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index f0550941df7b..174be6caa5c8 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -62,14 +62,17 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
}
if (!inet->inet_saddr)
inet->inet_saddr = rt->rt_src; /* Update source address */
- if (!inet->inet_rcv_saddr)
+ if (!inet->inet_rcv_saddr) {
inet->inet_rcv_saddr = rt->rt_src;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ }
inet->inet_daddr = rt->rt_dst;
inet->inet_dport = usin->sin_port;
sk->sk_state = TCP_ESTABLISHED;
inet->inet_id = jiffies;
sk_dst_set(sk, &rt->dst);
- return(0);
+ return 0;
}
EXPORT_SYMBOL(ip4_datagram_connect);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index da14c49284f4..c2ff48fa18c7 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -209,7 +209,7 @@ static void inetdev_destroy(struct in_device *in_dev)
inet_free_ifa(ifa);
}
- dev->ip_ptr = NULL;
+ rcu_assign_pointer(dev->ip_ptr, NULL);
devinet_sysctl_unregister(in_dev);
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
@@ -1059,7 +1059,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_REGISTER:
printk(KERN_DEBUG "inetdev_event: bug\n");
- dev->ip_ptr = NULL;
+ rcu_assign_pointer(dev->ip_ptr, NULL);
break;
case NETDEV_UP:
if (!inetdev_valid_mtu(dev->mtu))
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index a43968918350..919f2ad19b49 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -147,35 +147,43 @@ static void fib_flush(struct net *net)
rt_cache_flush(net, -1);
}
-/*
- * Find the first device with a given source address.
+/**
+ * __ip_dev_find - find the first device with a given source address.
+ * @net: the net namespace
+ * @addr: the source address
+ * @devref: if true, take a reference on the found device
+ *
+ * If a caller uses devref=false, it should be protected by RCU
*/
-
-struct net_device * ip_dev_find(struct net *net, __be32 addr)
+struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
{
- struct flowi fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
- struct fib_result res;
+ struct flowi fl = {
+ .nl_u = {
+ .ip4_u = {
+ .daddr = addr
+ }
+ },
+ .flags = FLOWI_FLAG_MATCH_ANY_IIF
+ };
+ struct fib_result res = { 0 };
struct net_device *dev = NULL;
- struct fib_table *local_table;
-
-#ifdef CONFIG_IP_MULTIPLE_TABLES
- res.r = NULL;
-#endif
- local_table = fib_get_table(net, RT_TABLE_LOCAL);
- if (!local_table || fib_table_lookup(local_table, &fl, &res))
+ rcu_read_lock();
+ if (fib_lookup(net, &fl, &res)) {
+ rcu_read_unlock();
return NULL;
+ }
if (res.type != RTN_LOCAL)
goto out;
dev = FIB_RES_DEV(res);
- if (dev)
+ if (dev && devref)
dev_hold(dev);
out:
- fib_res_put(&res);
+ rcu_read_unlock();
return dev;
}
-EXPORT_SYMBOL(ip_dev_find);
+EXPORT_SYMBOL(__ip_dev_find);
/*
* Find address type as if only "dev" was present in the system. If
@@ -202,11 +210,12 @@ static inline unsigned __inet_dev_addr_type(struct net *net,
local_table = fib_get_table(net, RT_TABLE_LOCAL);
if (local_table) {
ret = RTN_UNICAST;
- if (!fib_table_lookup(local_table, &fl, &res)) {
+ rcu_read_lock();
+ if (!fib_table_lookup(local_table, &fl, &res, FIB_LOOKUP_NOREF)) {
if (!dev || dev == res.fi->fib_dev)
ret = res.type;
- fib_res_put(&res);
}
+ rcu_read_unlock();
}
return ret;
}
@@ -220,37 +229,41 @@ EXPORT_SYMBOL(inet_addr_type);
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
__be32 addr)
{
- return __inet_dev_addr_type(net, dev, addr);
+ return __inet_dev_addr_type(net, dev, addr);
}
EXPORT_SYMBOL(inet_dev_addr_type);
/* Given (packet source, input interface) and optional (dst, oif, tos):
- - (main) check, that source is valid i.e. not broadcast or our local
- address.
- - figure out what "logical" interface this packet arrived
- and calculate "specific destination" address.
- - check, that packet arrived from expected physical interface.
+ * - (main) check, that source is valid i.e. not broadcast or our local
+ * address.
+ * - figure out what "logical" interface this packet arrived
+ * and calculate "specific destination" address.
+ * - check, that packet arrived from expected physical interface.
+ * called with rcu_read_lock()
*/
-
int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
struct net_device *dev, __be32 *spec_dst,
u32 *itag, u32 mark)
{
struct in_device *in_dev;
- struct flowi fl = { .nl_u = { .ip4_u =
- { .daddr = src,
- .saddr = dst,
- .tos = tos } },
- .mark = mark,
- .iif = oif };
-
+ struct flowi fl = {
+ .nl_u = {
+ .ip4_u = {
+ .daddr = src,
+ .saddr = dst,
+ .tos = tos
+ }
+ },
+ .mark = mark,
+ .iif = oif
+ };
struct fib_result res;
int no_addr, rpf, accept_local;
+ bool dev_match;
int ret;
struct net *net;
no_addr = rpf = accept_local = 0;
- rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
no_addr = in_dev->ifa_list == NULL;
@@ -259,7 +272,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
if (mark && !IN_DEV_SRC_VMARK(in_dev))
fl.mark = 0;
}
- rcu_read_unlock();
if (in_dev == NULL)
goto e_inval;
@@ -269,21 +281,29 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
goto last_resort;
if (res.type != RTN_UNICAST) {
if (res.type != RTN_LOCAL || !accept_local)
- goto e_inval_res;
+ goto e_inval;
}
*spec_dst = FIB_RES_PREFSRC(res);
fib_combine_itag(itag, &res);
+ dev_match = false;
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (FIB_RES_DEV(res) == dev || res.fi->fib_nhs > 1)
+ for (ret = 0; ret < res.fi->fib_nhs; ret++) {
+ struct fib_nh *nh = &res.fi->fib_nh[ret];
+
+ if (nh->nh_dev == dev) {
+ dev_match = true;
+ break;
+ }
+ }
#else
if (FIB_RES_DEV(res) == dev)
+ dev_match = true;
#endif
- {
+ if (dev_match) {
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
- fib_res_put(&res);
return ret;
}
- fib_res_put(&res);
if (no_addr)
goto last_resort;
if (rpf == 1)
@@ -296,7 +316,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
*spec_dst = FIB_RES_PREFSRC(res);
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
}
- fib_res_put(&res);
}
return ret;
@@ -307,8 +326,6 @@ last_resort:
*itag = 0;
return 0;
-e_inval_res:
- fib_res_put(&res);
e_inval:
return -EINVAL;
e_rpf:
@@ -461,9 +478,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
}
/*
- * Handle IP routing ioctl calls. These are used to manipulate the routing tables
+ * Handle IP routing ioctl calls.
+ * These are used to manipulate the routing tables
*/
-
int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
struct fib_config cfg;
@@ -507,7 +524,7 @@ int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
return -EINVAL;
}
-const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = {
+const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_DST] = { .type = NLA_U32 },
[RTA_SRC] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
@@ -521,7 +538,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX+1] = {
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct fib_config *cfg)
+ struct nlmsghdr *nlh, struct fib_config *cfg)
{
struct nlattr *attr;
int err, remaining;
@@ -676,12 +693,11 @@ out:
}
/* Prepare and feed intra-kernel routing request.
- Really, it should be netlink message, but :-( netlink
- can be not configured, so that we feed it directly
- to fib engine. It is legal, because all events occur
- only when netlink is already locked.
+ * Really, it should be netlink message, but :-( netlink
+ * can be not configured, so that we feed it directly
+ * to fib engine. It is legal, because all events occur
+ * only when netlink is already locked.
*/
-
static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
{
struct net *net = dev_net(ifa->ifa_dev->dev);
@@ -727,9 +743,9 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
struct in_ifaddr *prim = ifa;
__be32 mask = ifa->ifa_mask;
__be32 addr = ifa->ifa_local;
- __be32 prefix = ifa->ifa_address&mask;
+ __be32 prefix = ifa->ifa_address & mask;
- if (ifa->ifa_flags&IFA_F_SECONDARY) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, prefix, mask);
if (prim == NULL) {
printk(KERN_WARNING "fib_add_ifaddr: bug: prim == NULL\n");
@@ -739,22 +755,24 @@ void fib_add_ifaddr(struct in_ifaddr *ifa)
fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
- if (!(dev->flags&IFF_UP))
+ if (!(dev->flags & IFF_UP))
return;
/* Add broadcast address, if it is explicitly assigned. */
if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
- if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags&IFA_F_SECONDARY) &&
+ if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
(prefix != addr || ifa->ifa_prefixlen < 32)) {
- fib_magic(RTM_NEWROUTE, dev->flags&IFF_LOOPBACK ? RTN_LOCAL :
- RTN_UNICAST, prefix, ifa->ifa_prefixlen, prim);
+ fib_magic(RTM_NEWROUTE,
+ dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+ prefix, ifa->ifa_prefixlen, prim);
/* Add network specific broadcasts, when it takes a sense */
if (ifa->ifa_prefixlen < 31) {
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
- fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix|~mask, 32, prim);
+ fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
+ 32, prim);
}
}
}
@@ -765,17 +783,18 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
struct net_device *dev = in_dev->dev;
struct in_ifaddr *ifa1;
struct in_ifaddr *prim = ifa;
- __be32 brd = ifa->ifa_address|~ifa->ifa_mask;
- __be32 any = ifa->ifa_address&ifa->ifa_mask;
+ __be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
+ __be32 any = ifa->ifa_address & ifa->ifa_mask;
#define LOCAL_OK 1
#define BRD_OK 2
#define BRD0_OK 4
#define BRD1_OK 8
unsigned ok = 0;
- if (!(ifa->ifa_flags&IFA_F_SECONDARY))
- fib_magic(RTM_DELROUTE, dev->flags&IFF_LOOPBACK ? RTN_LOCAL :
- RTN_UNICAST, any, ifa->ifa_prefixlen, prim);
+ if (!(ifa->ifa_flags & IFA_F_SECONDARY))
+ fib_magic(RTM_DELROUTE,
+ dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
+ any, ifa->ifa_prefixlen, prim);
else {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
if (prim == NULL) {
@@ -785,9 +804,9 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
}
/* Deletion is more complicated than add.
- We should take care of not to delete too much :-)
-
- Scan address list to be sure that addresses are really gone.
+ * We should take care of not to delete too much :-)
+ *
+ * Scan address list to be sure that addresses are really gone.
*/
for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
@@ -801,23 +820,23 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
ok |= BRD0_OK;
}
- if (!(ok&BRD_OK))
+ if (!(ok & BRD_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
- if (!(ok&BRD1_OK))
+ if (!(ok & BRD1_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
- if (!(ok&BRD0_OK))
+ if (!(ok & BRD0_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
- if (!(ok&LOCAL_OK)) {
+ if (!(ok & LOCAL_OK)) {
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
/* Check, that this local address finally disappeared. */
if (inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
/* And the last, but not the least thing.
- We must flush stray FIB entries.
-
- First of all, we scan fib_info list searching
- for stray nexthop entries, then ignite fib_flush.
- */
+ * We must flush stray FIB entries.
+ *
+ * First of all, we scan fib_info list searching
+ * for stray nexthop entries, then ignite fib_flush.
+ */
if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
fib_flush(dev_net(dev));
}
@@ -828,14 +847,20 @@ static void fib_del_ifaddr(struct in_ifaddr *ifa)
#undef BRD1_OK
}
-static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
+static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
{
struct fib_result res;
- struct flowi fl = { .mark = frn->fl_mark,
- .nl_u = { .ip4_u = { .daddr = frn->fl_addr,
- .tos = frn->fl_tos,
- .scope = frn->fl_scope } } };
+ struct flowi fl = {
+ .mark = frn->fl_mark,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = frn->fl_addr,
+ .tos = frn->fl_tos,
+ .scope = frn->fl_scope
+ }
+ }
+ };
#ifdef CONFIG_IP_MULTIPLE_TABLES
res.r = NULL;
@@ -846,15 +871,16 @@ static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb )
local_bh_disable();
frn->tb_id = tb->tb_id;
- frn->err = fib_table_lookup(tb, &fl, &res);
+ rcu_read_lock();
+ frn->err = fib_table_lookup(tb, &fl, &res, FIB_LOOKUP_NOREF);
if (!frn->err) {
frn->prefixlen = res.prefixlen;
frn->nh_sel = res.nh_sel;
frn->type = res.type;
frn->scope = res.scope;
- fib_res_put(&res);
}
+ rcu_read_unlock();
local_bh_enable();
}
}
@@ -883,8 +909,8 @@ static void nl_fib_input(struct sk_buff *skb)
nl_fib_lookup(frn, tb);
- pid = NETLINK_CB(skb).pid; /* pid of sending process */
- NETLINK_CB(skb).pid = 0; /* from kernel */
+ pid = NETLINK_CB(skb).pid; /* pid of sending process */
+ NETLINK_CB(skb).pid = 0; /* from kernel */
NETLINK_CB(skb).dst_group = 0; /* unicast */
netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
}
@@ -931,7 +957,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
fib_del_ifaddr(ifa);
if (ifa->ifa_dev->ifa_list == NULL) {
/* Last address was deleted from this interface.
- Disable IP.
+ * Disable IP.
*/
fib_disable_ip(dev, 1, 0);
} else {
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 4ed7e0dea1bc..83cca68e259c 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -244,7 +244,8 @@ fn_new_zone(struct fn_hash *table, int z)
}
int fib_table_lookup(struct fib_table *tb,
- const struct flowi *flp, struct fib_result *res)
+ const struct flowi *flp, struct fib_result *res,
+ int fib_flags)
{
int err;
struct fn_zone *fz;
@@ -264,7 +265,7 @@ int fib_table_lookup(struct fib_table *tb,
err = fib_semantic_match(&f->fn_alias,
flp, res,
- fz->fz_order);
+ fz->fz_order, fib_flags);
if (err <= 0)
goto out;
}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index 637b133973bd..b9c9a9f2aee5 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -22,7 +22,7 @@ struct fib_alias {
/* Exported by fib_semantics.c */
extern int fib_semantic_match(struct list_head *head,
const struct flowi *flp,
- struct fib_result *res, int prefixlen);
+ struct fib_result *res, int prefixlen, int fib_flags);
extern void fib_release_info(struct fib_info *);
extern struct fib_info *fib_create_info(struct fib_config *cfg);
extern int fib_nh_match(struct fib_config *cfg, struct fib_info *fi);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 76daeb5ff564..7981a24f5c7b 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -6,7 +6,7 @@
* IPv4 Forwarding Information Base: policy rules.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
- * Thomas Graf <tgraf@suug.ch>
+ * Thomas Graf <tgraf@suug.ch>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -14,7 +14,7 @@
* 2 of the License, or (at your option) any later version.
*
* Fixes:
- * Rani Assaf : local_rule cannot be deleted
+ * Rani Assaf : local_rule cannot be deleted
* Marc Boucher : routing by fwmark
*/
@@ -32,8 +32,7 @@
#include <net/ip_fib.h>
#include <net/fib_rules.h>
-struct fib4_rule
-{
+struct fib4_rule {
struct fib_rule common;
u8 dst_len;
u8 src_len;
@@ -58,6 +57,7 @@ int fib_lookup(struct net *net, struct flowi *flp, struct fib_result *res)
{
struct fib_lookup_arg arg = {
.result = res,
+ .flags = FIB_LOOKUP_NOREF,
};
int err;
@@ -91,10 +91,11 @@ static int fib4_rule_action(struct fib_rule *rule, struct flowi *flp,
goto errout;
}
- if ((tbl = fib_get_table(rule->fr_net, rule->table)) == NULL)
+ tbl = fib_get_table(rule->fr_net, rule->table);
+ if (!tbl)
goto errout;
- err = fib_table_lookup(tbl, flp, (struct fib_result *) arg->result);
+ err = fib_table_lookup(tbl, flp, (struct fib_result *) arg->result, arg->flags);
if (err > 0)
err = -EAGAIN;
errout:
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 20f09c5b31e8..0f80dfc2f7fb 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -60,21 +60,30 @@ static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
static DEFINE_SPINLOCK(fib_multipath_lock);
-#define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
-for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
-
-#define change_nexthops(fi) { int nhsel; struct fib_nh *nexthop_nh; \
-for (nhsel=0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nexthop_nh++, nhsel++)
+#define for_nexthops(fi) { \
+ int nhsel; const struct fib_nh *nh; \
+ for (nhsel = 0, nh = (fi)->fib_nh; \
+ nhsel < (fi)->fib_nhs; \
+ nh++, nhsel++)
+
+#define change_nexthops(fi) { \
+ int nhsel; struct fib_nh *nexthop_nh; \
+ for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
+ nhsel < (fi)->fib_nhs; \
+ nexthop_nh++, nhsel++)
#else /* CONFIG_IP_ROUTE_MULTIPATH */
/* Hope, that gcc will optimize it to get rid of dummy loop */
-#define for_nexthops(fi) { int nhsel = 0; const struct fib_nh * nh = (fi)->fib_nh; \
-for (nhsel=0; nhsel < 1; nhsel++)
+#define for_nexthops(fi) { \
+ int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
+ for (nhsel = 0; nhsel < 1; nhsel++)
-#define change_nexthops(fi) { int nhsel = 0; struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
-for (nhsel=0; nhsel < 1; nhsel++)
+#define change_nexthops(fi) { \
+ int nhsel; \
+ struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
+ for (nhsel = 0; nhsel < 1; nhsel++)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
@@ -86,63 +95,70 @@ static const struct
int error;
u8 scope;
} fib_props[RTN_MAX + 1] = {
- {
+ [RTN_UNSPEC] = {
.error = 0,
.scope = RT_SCOPE_NOWHERE,
- }, /* RTN_UNSPEC */
- {
+ },
+ [RTN_UNICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
- }, /* RTN_UNICAST */
- {
+ },
+ [RTN_LOCAL] = {
.error = 0,
.scope = RT_SCOPE_HOST,
- }, /* RTN_LOCAL */
- {
+ },
+ [RTN_BROADCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
- }, /* RTN_BROADCAST */
- {
+ },
+ [RTN_ANYCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
- }, /* RTN_ANYCAST */
- {
+ },
+ [RTN_MULTICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
- }, /* RTN_MULTICAST */
- {
+ },
+ [RTN_BLACKHOLE] = {
.error = -EINVAL,
.scope = RT_SCOPE_UNIVERSE,
- }, /* RTN_BLACKHOLE */
- {
+ },
+ [RTN_UNREACHABLE] = {
.error = -EHOSTUNREACH,
.scope = RT_SCOPE_UNIVERSE,
- }, /* RTN_UNREACHABLE */
- {
+ },
+ [RTN_PROHIBIT] = {
.error = -EACCES,
.scope = RT_SCOPE_UNIVERSE,
- }, /* RTN_PROHIBIT */
- {
+ },
+ [RTN_THROW] = {
.error = -EAGAIN,
.scope = RT_SCOPE_UNIVERSE,
- }, /* RTN_THROW */
- {
+ },
+ [RTN_NAT] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
- }, /* RTN_NAT */
- {
+ },
+ [RTN_XRESOLVE] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
- }, /* RTN_XRESOLVE */
+ },
};
/* Release a nexthop info record */
+static void free_fib_info_rcu(struct rcu_head *head)
+{
+ struct fib_info *fi = container_of(head, struct fib_info, rcu);
+
+ kfree(fi);
+}
+
void free_fib_info(struct fib_info *fi)
{
if (fi->fib_dead == 0) {
- printk(KERN_WARNING "Freeing alive fib_info %p\n", fi);
+ pr_warning("Freeing alive fib_info %p\n", fi);
return;
}
change_nexthops(fi) {
@@ -152,7 +168,7 @@ void free_fib_info(struct fib_info *fi)
} endfor_nexthops(fi);
fib_info_cnt--;
release_net(fi->fib_net);
- kfree(fi);
+ call_rcu(&fi->rcu, free_fib_info_rcu);
}
void fib_release_info(struct fib_info *fi)
@@ -173,7 +189,7 @@ void fib_release_info(struct fib_info *fi)
spin_unlock_bh(&fib_info_lock);
}
-static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
+static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
{
const struct fib_nh *onh = ofi->fib_nh;
@@ -187,7 +203,7 @@ static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *
#ifdef CONFIG_NET_CLS_ROUTE
nh->nh_tclassid != onh->nh_tclassid ||
#endif
- ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD))
+ ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
return -1;
onh++;
} endfor_nexthops(fi);
@@ -238,7 +254,7 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
nfi->fib_priority == fi->fib_priority &&
memcmp(nfi->fib_metrics, fi->fib_metrics,
sizeof(fi->fib_metrics)) == 0 &&
- ((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 &&
+ ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
(nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
return fi;
}
@@ -247,9 +263,8 @@ static struct fib_info *fib_find_info(const struct fib_info *nfi)
}
/* Check, that the gateway is already configured.
- Used only by redirect accept routine.
+ * Used only by redirect accept routine.
*/
-
int ip_fib_check_default(__be32 gw, struct net_device *dev)
{
struct hlist_head *head;
@@ -264,7 +279,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev)
hlist_for_each_entry(nh, node, head, nh_hash) {
if (nh->nh_dev == dev &&
nh->nh_gw == gw &&
- !(nh->nh_flags&RTNH_F_DEAD)) {
+ !(nh->nh_flags & RTNH_F_DEAD)) {
spin_unlock(&fib_info_lock);
return 0;
}
@@ -362,10 +377,10 @@ int fib_detect_death(struct fib_info *fi, int order,
}
if (state == NUD_REACHABLE)
return 0;
- if ((state&NUD_VALID) && order != dflt)
+ if ((state & NUD_VALID) && order != dflt)
return 0;
- if ((state&NUD_VALID) ||
- (*last_idx<0 && order > dflt)) {
+ if ((state & NUD_VALID) ||
+ (*last_idx < 0 && order > dflt)) {
*last_resort = fi;
*last_idx = order;
}
@@ -476,75 +491,76 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
/*
- Picture
- -------
-
- Semantics of nexthop is very messy by historical reasons.
- We have to take into account, that:
- a) gateway can be actually local interface address,
- so that gatewayed route is direct.
- b) gateway must be on-link address, possibly
- described not by an ifaddr, but also by a direct route.
- c) If both gateway and interface are specified, they should not
- contradict.
- d) If we use tunnel routes, gateway could be not on-link.
-
- Attempt to reconcile all of these (alas, self-contradictory) conditions
- results in pretty ugly and hairy code with obscure logic.
-
- I chose to generalized it instead, so that the size
- of code does not increase practically, but it becomes
- much more general.
- Every prefix is assigned a "scope" value: "host" is local address,
- "link" is direct route,
- [ ... "site" ... "interior" ... ]
- and "universe" is true gateway route with global meaning.
-
- Every prefix refers to a set of "nexthop"s (gw, oif),
- where gw must have narrower scope. This recursion stops
- when gw has LOCAL scope or if "nexthop" is declared ONLINK,
- which means that gw is forced to be on link.
-
- Code is still hairy, but now it is apparently logically
- consistent and very flexible. F.e. as by-product it allows
- to co-exists in peace independent exterior and interior
- routing processes.
-
- Normally it looks as following.
-
- {universe prefix} -> (gw, oif) [scope link]
- |
- |-> {link prefix} -> (gw, oif) [scope local]
- |
- |-> {local prefix} (terminal node)
+ * Picture
+ * -------
+ *
+ * Semantics of nexthop is very messy by historical reasons.
+ * We have to take into account, that:
+ * a) gateway can be actually local interface address,
+ * so that gatewayed route is direct.
+ * b) gateway must be on-link address, possibly
+ * described not by an ifaddr, but also by a direct route.
+ * c) If both gateway and interface are specified, they should not
+ * contradict.
+ * d) If we use tunnel routes, gateway could be not on-link.
+ *
+ * Attempt to reconcile all of these (alas, self-contradictory) conditions
+ * results in pretty ugly and hairy code with obscure logic.
+ *
+ * I chose to generalized it instead, so that the size
+ * of code does not increase practically, but it becomes
+ * much more general.
+ * Every prefix is assigned a "scope" value: "host" is local address,
+ * "link" is direct route,
+ * [ ... "site" ... "interior" ... ]
+ * and "universe" is true gateway route with global meaning.
+ *
+ * Every prefix refers to a set of "nexthop"s (gw, oif),
+ * where gw must have narrower scope. This recursion stops
+ * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
+ * which means that gw is forced to be on link.
+ *
+ * Code is still hairy, but now it is apparently logically
+ * consistent and very flexible. F.e. as by-product it allows
+ * to co-exists in peace independent exterior and interior
+ * routing processes.
+ *
+ * Normally it looks as following.
+ *
+ * {universe prefix} -> (gw, oif) [scope link]
+ * |
+ * |-> {link prefix} -> (gw, oif) [scope local]
+ * |
+ * |-> {local prefix} (terminal node)
*/
-
static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
struct fib_nh *nh)
{
int err;
struct net *net;
+ struct net_device *dev;
net = cfg->fc_nlinfo.nl_net;
if (nh->nh_gw) {
struct fib_result res;
- if (nh->nh_flags&RTNH_F_ONLINK) {
- struct net_device *dev;
+ if (nh->nh_flags & RTNH_F_ONLINK) {
if (cfg->fc_scope >= RT_SCOPE_LINK)
return -EINVAL;
if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
return -EINVAL;
- if ((dev = __dev_get_by_index(net, nh->nh_oif)) == NULL)
+ dev = __dev_get_by_index(net, nh->nh_oif);
+ if (!dev)
return -ENODEV;
- if (!(dev->flags&IFF_UP))
+ if (!(dev->flags & IFF_UP))
return -ENETDOWN;
nh->nh_dev = dev;
dev_hold(dev);
nh->nh_scope = RT_SCOPE_LINK;
return 0;
}
+ rcu_read_lock();
{
struct flowi fl = {
.nl_u = {
@@ -559,34 +575,38 @@ static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
/* It is not necessary, but requires a bit of thinking */
if (fl.fl4_scope < RT_SCOPE_LINK)
fl.fl4_scope = RT_SCOPE_LINK;
- if ((err = fib_lookup(net, &fl, &res)) != 0)
+ err = fib_lookup(net, &fl, &res);
+ if (err) {
+ rcu_read_unlock();
return err;
+ }
}
err = -EINVAL;
if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
goto out;
nh->nh_scope = res.scope;
nh->nh_oif = FIB_RES_OIF(res);
- if ((nh->nh_dev = FIB_RES_DEV(res)) == NULL)
+ nh->nh_dev = dev = FIB_RES_DEV(res);
+ if (!dev)
goto out;
- dev_hold(nh->nh_dev);
+ dev_hold(dev);
err = -ENETDOWN;
- if (!(nh->nh_dev->flags & IFF_UP))
+ if (!(dev->flags & IFF_UP))
goto out;
err = 0;
out:
- fib_res_put(&res);
+ rcu_read_unlock();
return err;
} else {
struct in_device *in_dev;
- if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK))
+ if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
return -EINVAL;
in_dev = inetdev_by_index(net, nh->nh_oif);
if (in_dev == NULL)
return -ENODEV;
- if (!(in_dev->dev->flags&IFF_UP)) {
+ if (!(in_dev->dev->flags & IFF_UP)) {
in_dev_put(in_dev);
return -ENETDOWN;
}
@@ -602,7 +622,9 @@ static inline unsigned int fib_laddr_hashfn(__be32 val)
{
unsigned int mask = (fib_hash_size - 1);
- return ((__force u32)val ^ ((__force u32)val >> 7) ^ ((__force u32)val >> 14)) & mask;
+ return ((__force u32)val ^
+ ((__force u32)val >> 7) ^
+ ((__force u32)val >> 14)) & mask;
}
static struct hlist_head *fib_hash_alloc(int bytes)
@@ -611,7 +633,8 @@ static struct hlist_head *fib_hash_alloc(int bytes)
return kzalloc(bytes, GFP_KERNEL);
else
return (struct hlist_head *)
- __get_free_pages(GFP_KERNEL | __GFP_ZERO, get_order(bytes));
+ __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(bytes));
}
static void fib_hash_free(struct hlist_head *hash, int bytes)
@@ -806,7 +829,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
goto failure;
} else {
change_nexthops(fi) {
- if ((err = fib_check_nh(cfg, fi, nexthop_nh)) != 0)
+ err = fib_check_nh(cfg, fi, nexthop_nh);
+ if (err != 0)
goto failure;
} endfor_nexthops(fi)
}
@@ -819,7 +843,8 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
}
link_it:
- if ((ofi = fib_find_info(fi)) != NULL) {
+ ofi = fib_find_info(fi);
+ if (ofi) {
fi->fib_dead = 1;
free_fib_info(fi);
ofi->fib_treeref++;
@@ -864,7 +889,7 @@ failure:
/* Note! fib_semantic_match intentionally uses RCU list functions. */
int fib_semantic_match(struct list_head *head, const struct flowi *flp,
- struct fib_result *res, int prefixlen)
+ struct fib_result *res, int prefixlen, int fib_flags)
{
struct fib_alias *fa;
int nh_sel = 0;
@@ -895,7 +920,7 @@ int fib_semantic_match(struct list_head *head, const struct flowi *flp,
case RTN_ANYCAST:
case RTN_MULTICAST:
for_nexthops(fi) {
- if (nh->nh_flags&RTNH_F_DEAD)
+ if (nh->nh_flags & RTNH_F_DEAD)
continue;
if (!flp->oif || flp->oif == nh->nh_oif)
break;
@@ -906,16 +931,15 @@ int fib_semantic_match(struct list_head *head, const struct flowi *flp,
goto out_fill_res;
}
#else
- if (nhsel < 1) {
+ if (nhsel < 1)
goto out_fill_res;
- }
#endif
endfor_nexthops(fi);
continue;
default:
- printk(KERN_WARNING "fib_semantic_match bad type %#x\n",
- fa->fa_type);
+ pr_warning("fib_semantic_match bad type %#x\n",
+ fa->fa_type);
return -EINVAL;
}
}
@@ -929,7 +953,8 @@ out_fill_res:
res->type = fa->fa_type;
res->scope = fa->fa_scope;
res->fi = fa->fa_info;
- atomic_inc(&res->fi->fib_clntref);
+ if (!(fib_flags & FIB_LOOKUP_NOREF))
+ atomic_inc(&res->fi->fib_clntref);
return 0;
}
@@ -1028,10 +1053,10 @@ nla_put_failure:
}
/*
- Update FIB if:
- - local address disappeared -> we must delete all the entries
- referring to it.
- - device went down -> we must shutdown all nexthops going via it.
+ * Update FIB if:
+ * - local address disappeared -> we must delete all the entries
+ * referring to it.
+ * - device went down -> we must shutdown all nexthops going via it.
*/
int fib_sync_down_addr(struct net *net, __be32 local)
{
@@ -1078,7 +1103,7 @@ int fib_sync_down_dev(struct net_device *dev, int force)
prev_fi = fi;
dead = 0;
change_nexthops(fi) {
- if (nexthop_nh->nh_flags&RTNH_F_DEAD)
+ if (nexthop_nh->nh_flags & RTNH_F_DEAD)
dead++;
else if (nexthop_nh->nh_dev == dev &&
nexthop_nh->nh_scope != scope) {
@@ -1110,10 +1135,9 @@ int fib_sync_down_dev(struct net_device *dev, int force)
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/*
- Dead device goes up. We wake up dead nexthops.
- It takes sense only on multipath routes.
+ * Dead device goes up. We wake up dead nexthops.
+ * It takes sense only on multipath routes.
*/
-
int fib_sync_up(struct net_device *dev)
{
struct fib_info *prev_fi;
@@ -1123,7 +1147,7 @@ int fib_sync_up(struct net_device *dev)
struct fib_nh *nh;
int ret;
- if (!(dev->flags&IFF_UP))
+ if (!(dev->flags & IFF_UP))
return 0;
prev_fi = NULL;
@@ -1142,12 +1166,12 @@ int fib_sync_up(struct net_device *dev)
prev_fi = fi;
alive = 0;
change_nexthops(fi) {
- if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
+ if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
alive++;
continue;
}
if (nexthop_nh->nh_dev == NULL ||
- !(nexthop_nh->nh_dev->flags&IFF_UP))
+ !(nexthop_nh->nh_dev->flags & IFF_UP))
continue;
if (nexthop_nh->nh_dev != dev ||
!__in_dev_get_rtnl(dev))
@@ -1169,10 +1193,9 @@ int fib_sync_up(struct net_device *dev)
}
/*
- The algorithm is suboptimal, but it provides really
- fair weighted route distribution.
+ * The algorithm is suboptimal, but it provides really
+ * fair weighted route distribution.
*/
-
void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
{
struct fib_info *fi = res->fi;
@@ -1182,7 +1205,7 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
if (fi->fib_power <= 0) {
int power = 0;
change_nexthops(fi) {
- if (!(nexthop_nh->nh_flags&RTNH_F_DEAD)) {
+ if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
power += nexthop_nh->nh_weight;
nexthop_nh->nh_power = nexthop_nh->nh_weight;
}
@@ -1198,15 +1221,16 @@ void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
/* w should be random number [0..fi->fib_power-1],
- it is pretty bad approximation.
+ * it is pretty bad approximation.
*/
w = jiffies % fi->fib_power;
change_nexthops(fi) {
- if (!(nexthop_nh->nh_flags&RTNH_F_DEAD) &&
+ if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
nexthop_nh->nh_power) {
- if ((w -= nexthop_nh->nh_power) <= 0) {
+ w -= nexthop_nh->nh_power;
+ if (w <= 0) {
nexthop_nh->nh_power--;
fi->fib_power--;
res->nh_sel = nhsel;
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 79d057a939ba..271c89bdf049 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -186,7 +186,7 @@ static inline struct tnode *node_parent_rcu(struct node *node)
{
struct tnode *ret = node_parent(node);
- return rcu_dereference(ret);
+ return rcu_dereference_rtnl(ret);
}
/* Same as rcu_assign_pointer
@@ -209,9 +209,7 @@ static inline struct node *tnode_get_child_rcu(struct tnode *tn, unsigned int i)
{
struct node *ret = tnode_get_child(tn, i);
- return rcu_dereference_check(ret,
- rcu_read_lock_held() ||
- lockdep_rtnl_is_held());
+ return rcu_dereference_rtnl(ret);
}
static inline int tnode_child_length(const struct tnode *tn)
@@ -457,8 +455,8 @@ static struct tnode *tnode_new(t_key key, int pos, int bits)
tn->empty_children = 1<<bits;
}
- pr_debug("AT %p s=%u %lu\n", tn, (unsigned int) sizeof(struct tnode),
- (unsigned long) (sizeof(struct node) << bits));
+ pr_debug("AT %p s=%zu %zu\n", tn, sizeof(struct tnode),
+ sizeof(struct node) << bits);
return tn;
}
@@ -607,11 +605,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Keep root node larger */
- if (!node_parent((struct node*) tn)) {
+ if (!node_parent((struct node *)tn)) {
inflate_threshold_use = inflate_threshold_root;
halve_threshold_use = halve_threshold_root;
- }
- else {
+ } else {
inflate_threshold_use = inflate_threshold;
halve_threshold_use = halve_threshold;
}
@@ -637,7 +634,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
check_tnode(tn);
/* Return if at least one inflate is run */
- if( max_work != MAX_WORK)
+ if (max_work != MAX_WORK)
return (struct node *) tn;
/*
@@ -964,9 +961,7 @@ fib_find_node(struct trie *t, u32 key)
struct node *n;
pos = 0;
- n = rcu_dereference_check(t->trie,
- rcu_read_lock_held() ||
- lockdep_rtnl_is_held());
+ n = rcu_dereference_rtnl(t->trie);
while (n != NULL && NODE_TYPE(n) == T_TNODE) {
tn = (struct tnode *) n;
@@ -1347,7 +1342,7 @@ err:
/* should be called with rcu_read_lock */
static int check_leaf(struct trie *t, struct leaf *l,
t_key key, const struct flowi *flp,
- struct fib_result *res)
+ struct fib_result *res, int fib_flags)
{
struct leaf_info *li;
struct hlist_head *hhead = &l->list;
@@ -1361,7 +1356,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
if (l->key != (key & ntohl(mask)))
continue;
- err = fib_semantic_match(&li->falh, flp, res, plen);
+ err = fib_semantic_match(&li->falh, flp, res, plen, fib_flags);
#ifdef CONFIG_IP_FIB_TRIE_STATS
if (err <= 0)
@@ -1377,7 +1372,7 @@ static int check_leaf(struct trie *t, struct leaf *l,
}
int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
- struct fib_result *res)
+ struct fib_result *res, int fib_flags)
{
struct trie *t = (struct trie *) tb->tb_data;
int ret;
@@ -1404,7 +1399,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
/* Just a leaf? */
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res);
+ ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
goto found;
}
@@ -1429,7 +1424,7 @@ int fib_table_lookup(struct fib_table *tb, const struct flowi *flp,
}
if (IS_LEAF(n)) {
- ret = check_leaf(t, (struct leaf *)n, key, flp, res);
+ ret = check_leaf(t, (struct leaf *)n, key, flp, res, fib_flags);
if (ret > 0)
goto backtrace;
goto found;
@@ -1746,14 +1741,14 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct node *c)
/* Node empty, walk back up to parent */
c = (struct node *) p;
- } while ( (p = node_parent_rcu(c)) != NULL);
+ } while ((p = node_parent_rcu(c)) != NULL);
return NULL; /* Root of trie */
}
static struct leaf *trie_firstleaf(struct trie *t)
{
- struct tnode *n = (struct tnode *) rcu_dereference(t->trie);
+ struct tnode *n = (struct tnode *)rcu_dereference_rtnl(t->trie);
if (!n)
return NULL;
@@ -2039,14 +2034,14 @@ struct fib_trie_iter {
struct seq_net_private p;
struct fib_table *tb;
struct tnode *tnode;
- unsigned index;
- unsigned depth;
+ unsigned int index;
+ unsigned int depth;
};
static struct node *fib_trie_get_next(struct fib_trie_iter *iter)
{
struct tnode *tn = iter->tnode;
- unsigned cindex = iter->index;
+ unsigned int cindex = iter->index;
struct tnode *p;
/* A single entry routing table */
@@ -2155,7 +2150,7 @@ static void trie_collect_stats(struct trie *t, struct trie_stat *s)
*/
static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
{
- unsigned i, max, pointers, bytes, avdepth;
+ unsigned int i, max, pointers, bytes, avdepth;
if (stat->leaves)
avdepth = stat->totdepth*100 / stat->leaves;
@@ -2352,7 +2347,8 @@ static void fib_trie_seq_stop(struct seq_file *seq, void *v)
static void seq_indent(struct seq_file *seq, int n)
{
- while (n-- > 0) seq_puts(seq, " ");
+ while (n-- > 0)
+ seq_puts(seq, " ");
}
static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
@@ -2384,7 +2380,7 @@ static const char *const rtn_type_names[__RTN_MAX] = {
[RTN_XRESOLVE] = "XRESOLVE",
};
-static inline const char *rtn_type(char *buf, size_t len, unsigned t)
+static inline const char *rtn_type(char *buf, size_t len, unsigned int t)
{
if (t < __RTN_MAX && rtn_type_names[t])
return rtn_type_names[t];
@@ -2540,13 +2536,12 @@ static void fib_route_seq_stop(struct seq_file *seq, void *v)
rcu_read_unlock();
}
-static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
+static unsigned int fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
{
- static unsigned type2flags[RTN_MAX + 1] = {
- [7] = RTF_REJECT, [8] = RTF_REJECT,
- };
- unsigned flags = type2flags[type];
+ unsigned int flags = 0;
+ if (type == RTN_UNREACHABLE || type == RTN_PROHIBIT)
+ flags = RTF_REJECT;
if (fi && fi->fib_nh->nh_gw)
flags |= RTF_GATEWAY;
if (mask == htonl(0xFFFFFFFF))
@@ -2558,7 +2553,7 @@ static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi)
/*
* This outputs /proc/net/route.
* The format of the file is not supposed to be changed
- * and needs to be same as fib_hash output to avoid breaking
+ * and needs to be same as fib_hash output to avoid breaking
* legacy utilities
*/
static int fib_route_seq_show(struct seq_file *seq, void *v)
@@ -2583,7 +2578,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
list_for_each_entry_rcu(fa, &li->falh, fa_list) {
const struct fib_info *fi = fa->fa_info;
- unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
+ unsigned int flags = fib_flag_trans(fa->fa_type, mask, fi);
int len;
if (fa->fa_type == RTN_BROADCAST
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
new file mode 100644
index 000000000000..caea6885fdbd
--- /dev/null
+++ b/net/ipv4/gre.c
@@ -0,0 +1,151 @@
+/*
+ * GRE over IPv4 demultiplexer driver
+ *
+ * Authors: Dmitry Kozlov (xeb@mail.ru)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <linux/spinlock.h>
+#include <net/protocol.h>
+#include <net/gre.h>
+
+
+static const struct gre_protocol *gre_proto[GREPROTO_MAX] __read_mostly;
+static DEFINE_SPINLOCK(gre_proto_lock);
+
+int gre_add_protocol(const struct gre_protocol *proto, u8 version)
+{
+ if (version >= GREPROTO_MAX)
+ goto err_out;
+
+ spin_lock(&gre_proto_lock);
+ if (gre_proto[version])
+ goto err_out_unlock;
+
+ rcu_assign_pointer(gre_proto[version], proto);
+ spin_unlock(&gre_proto_lock);
+ return 0;
+
+err_out_unlock:
+ spin_unlock(&gre_proto_lock);
+err_out:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(gre_add_protocol);
+
+int gre_del_protocol(const struct gre_protocol *proto, u8 version)
+{
+ if (version >= GREPROTO_MAX)
+ goto err_out;
+
+ spin_lock(&gre_proto_lock);
+ if (gre_proto[version] != proto)
+ goto err_out_unlock;
+ rcu_assign_pointer(gre_proto[version], NULL);
+ spin_unlock(&gre_proto_lock);
+ synchronize_rcu();
+ return 0;
+
+err_out_unlock:
+ spin_unlock(&gre_proto_lock);
+err_out:
+ return -1;
+}
+EXPORT_SYMBOL_GPL(gre_del_protocol);
+
+static int gre_rcv(struct sk_buff *skb)
+{
+ const struct gre_protocol *proto;
+ u8 ver;
+ int ret;
+
+ if (!pskb_may_pull(skb, 12))
+ goto drop;
+
+ ver = skb->data[1]&0x7f;
+ if (ver >= GREPROTO_MAX)
+ goto drop;
+
+ rcu_read_lock();
+ proto = rcu_dereference(gre_proto[ver]);
+ if (!proto || !proto->handler)
+ goto drop_unlock;
+ ret = proto->handler(skb);
+ rcu_read_unlock();
+ return ret;
+
+drop_unlock:
+ rcu_read_unlock();
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static void gre_err(struct sk_buff *skb, u32 info)
+{
+ const struct gre_protocol *proto;
+ u8 ver;
+
+ if (!pskb_may_pull(skb, 12))
+ goto drop;
+
+ ver = skb->data[1]&0x7f;
+ if (ver >= GREPROTO_MAX)
+ goto drop;
+
+ rcu_read_lock();
+ proto = rcu_dereference(gre_proto[ver]);
+ if (!proto || !proto->err_handler)
+ goto drop_unlock;
+ proto->err_handler(skb, info);
+ rcu_read_unlock();
+ return;
+
+drop_unlock:
+ rcu_read_unlock();
+drop:
+ kfree_skb(skb);
+}
+
+static const struct net_protocol net_gre_protocol = {
+ .handler = gre_rcv,
+ .err_handler = gre_err,
+ .netns_ok = 1,
+};
+
+static int __init gre_init(void)
+{
+ pr_info("GRE over IPv4 demultiplexor driver");
+
+ if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) {
+ pr_err("gre: can't add protocol\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void __exit gre_exit(void)
+{
+ inet_del_protocol(&net_gre_protocol, IPPROTO_GRE);
+}
+
+module_init(gre_init);
+module_exit(gre_exit);
+
+MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver");
+MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)");
+MODULE_LICENSE("GPL");
+
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a0d847c7cba5..96bc7f9475a3 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -379,7 +379,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
inet->tos = ip_hdr(skb)->tos;
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
if (icmp_param->replyopts.optlen) {
ipc.opt = &icmp_param->replyopts;
if (ipc.opt->srr)
@@ -538,7 +538,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
inet_sk(sk)->tos = tos;
ipc.addr = iph->saddr;
ipc.opt = &icmp_param.replyopts;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
{
struct flowi fl = {
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index a1ad0e7180d2..25f339672b28 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -856,6 +856,18 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
igmpv3_clear_delrec(in_dev);
} else if (len < 12) {
return; /* ignore bogus packet; freed by caller */
+ } else if (IGMP_V1_SEEN(in_dev)) {
+ /* This is a v3 query with v1 queriers present */
+ max_delay = IGMP_Query_Response_Interval;
+ group = 0;
+ } else if (IGMP_V2_SEEN(in_dev)) {
+ /* this is a v3 query with v2 queriers present;
+ * Interpretation of the max_delay code is problematic here.
+ * A real v2 host would use ih_code directly, while v3 has a
+ * different encoding. We use the v3 encoding as more likely
+ * to be intended in a v3 query.
+ */
+ max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
} else { /* v3 */
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
return;
@@ -1257,14 +1269,14 @@ void ip_mc_rejoin_group(struct ip_mc_list *im)
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
- if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
- igmp_mod_timer(im, IGMP_Initial_Report_Delay);
- return;
- }
- /* else, v3 */
- im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
- IGMP_Unsolicited_Report_Count;
- igmp_ifc_event(in_dev);
+ /* a failover is happening and switches
+ * must be notified immediately */
+ if (IGMP_V1_SEEN(in_dev))
+ igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
+ else if (IGMP_V2_SEEN(in_dev))
+ igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
+ else
+ igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
#endif
}
EXPORT_SYMBOL(ip_mc_rejoin_group);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index e5fa2ddce320..ba8042665849 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -425,7 +425,7 @@ static int inet_diag_bc_run(const void *bc, int len,
bc += op->no;
}
}
- return (len == 0);
+ return len == 0;
}
static int valid_cc(const void *bc, int len, int cc)
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b7c41654dde5..168440834ade 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -116,11 +116,11 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
struct ip4_create_arg *arg = a;
qp = container_of(q, struct ipq, q);
- return (qp->id == arg->iph->id &&
+ return qp->id == arg->iph->id &&
qp->saddr == arg->iph->saddr &&
qp->daddr == arg->iph->daddr &&
qp->protocol == arg->iph->protocol &&
- qp->user == arg->user);
+ qp->user == arg->user;
}
/* Memory Tracking Functions. */
@@ -542,7 +542,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
- if (skb_has_frags(head)) {
+ if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 945b20a5ad50..9d421f4cf3ef 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -44,8 +44,9 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
+#include <net/gre.h>
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
@@ -63,13 +64,13 @@
We cannot track such dead loops during route installation,
it is infeasible task. The most general solutions would be
to keep skb->encapsulation counter (sort of local ttl),
- and silently drop packet when it expires. It is the best
+ and silently drop packet when it expires. It is a good
solution, but it supposes maintaing new variable in ALL
skb, even if no tunneling is used.
- Current solution: HARD_TX_LOCK lock breaks dead loops.
-
-
+ Current solution: xmit_recursion breaks dead loops. This is a percpu
+ counter, since when we enter the first ndo_xmit(), cpu migration is
+ forbidden. We force an exit if this counter reaches RECURSION_LIMIT
2. Networking dead loops would not kill routers, but would really
kill network. IP hop limit plays role of "t->recursion" in this case,
@@ -128,7 +129,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev);
static int ipgre_net_id __read_mostly;
struct ipgre_net {
- struct ip_tunnel *tunnels[4][HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
struct net_device *fb_tunnel_dev;
};
@@ -158,13 +159,40 @@ struct ipgre_net {
#define tunnels_l tunnels[1]
#define tunnels_wc tunnels[0]
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ipgre_lock);
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
+
/* Given src, dst and key, find appropriate for input tunnel. */
static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
@@ -173,8 +201,8 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
{
struct net *net = dev_net(dev);
int link = dev->ifindex;
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(key);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(key);
struct ip_tunnel *t, *cand = NULL;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
@@ -289,13 +317,13 @@ static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
return NULL;
}
-static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
+static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
- unsigned h = HASH(key);
+ unsigned int h = HASH(key);
int prio = 0;
if (local)
@@ -308,7 +336,7 @@ static struct ip_tunnel **__ipgre_bucket(struct ipgre_net *ign,
return &ign->tunnels[prio][h];
}
-static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
+static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
struct ip_tunnel *t)
{
return __ipgre_bucket(ign, &t->parms);
@@ -316,23 +344,22 @@ static inline struct ip_tunnel **ipgre_bucket(struct ipgre_net *ign,
static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
{
- struct ip_tunnel **tp = ipgre_bucket(ign, t);
+ struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
- spin_lock_bh(&ipgre_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ipgre_lock);
}
static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
{
- struct ip_tunnel **tp;
-
- for (tp = ipgre_bucket(ign, t); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ipgre_lock);
- *tp = t->next;
- spin_unlock_bh(&ipgre_lock);
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = ipgre_bucket(ign, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
@@ -346,10 +373,13 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
__be32 local = parms->iph.saddr;
__be32 key = parms->i_key;
int link = parms->link;
- struct ip_tunnel *t, **tp;
+ struct ip_tunnel *t;
+ struct ip_tunnel __rcu **tp;
struct ipgre_net *ign = net_generic(net, ipgre_net_id);
- for (tp = __ipgre_bucket(ign, parms); (t = *tp) != NULL; tp = &t->next)
+ for (tp = __ipgre_bucket(ign, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next)
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
key == t->parms.i_key &&
@@ -360,7 +390,7 @@ static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
return t;
}
-static struct ip_tunnel * ipgre_tunnel_locate(struct net *net,
+static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
{
struct ip_tunnel *t, *nt;
@@ -582,7 +612,7 @@ static int ipgre_rcv(struct sk_buff *skb)
if ((tunnel = ipgre_tunnel_lookup(skb->dev,
iph->saddr, iph->daddr, key,
gre_proto))) {
- struct net_device_stats *stats = &tunnel->dev->stats;
+ struct pcpu_tstats *tstats;
secpath_reset(skb);
@@ -606,22 +636,22 @@ static int ipgre_rcv(struct sk_buff *skb)
/* Looped back packet, drop it! */
if (skb_rtable(skb)->fl.iif == 0)
goto drop;
- stats->multicast++;
+ tunnel->dev->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
}
#endif
if (((flags&GRE_CSUM) && csum) ||
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
- stats->rx_crc_errors++;
- stats->rx_errors++;
+ tunnel->dev->stats.rx_crc_errors++;
+ tunnel->dev->stats.rx_errors++;
goto drop;
}
if (tunnel->parms.i_flags&GRE_SEQ) {
if (!(flags&GRE_SEQ) ||
(tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
- stats->rx_fifo_errors++;
- stats->rx_errors++;
+ tunnel->dev->stats.rx_fifo_errors++;
+ tunnel->dev->stats.rx_errors++;
goto drop;
}
tunnel->i_seqno = seqno + 1;
@@ -630,8 +660,8 @@ static int ipgre_rcv(struct sk_buff *skb)
/* Warning: All skb pointers will be invalidated! */
if (tunnel->dev->type == ARPHRD_ETHER) {
if (!pskb_may_pull(skb, ETH_HLEN)) {
- stats->rx_length_errors++;
- stats->rx_errors++;
+ tunnel->dev->stats.rx_length_errors++;
+ tunnel->dev->stats.rx_errors++;
goto drop;
}
@@ -640,14 +670,19 @@ static int ipgre_rcv(struct sk_buff *skb)
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
- skb_tunnel_rx(skb, tunnel->dev);
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, tunnel->dev);
skb_reset_network_header(skb);
ipgre_ecn_decapsulate(iph, skb);
netif_rx(skb);
+
rcu_read_unlock();
- return(0);
+ return 0;
}
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
@@ -655,20 +690,19 @@ drop:
rcu_read_unlock();
drop_nolock:
kfree_skb(skb);
- return(0);
+ return 0;
}
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct pcpu_tstats *tstats;
struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *tiph;
u8 tos;
__be16 df;
struct rtable *rt; /* Route to the other host */
- struct net_device *tdev; /* Device to other host */
+ struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int gre_hlen;
@@ -690,7 +724,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
/* NBMA tunnel */
if (skb_dst(skb) == NULL) {
- stats->tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
goto tx_error;
}
@@ -699,7 +733,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if ((dst = rt->rt_gateway) == 0)
goto tx_error_icmp;
}
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct in6_addr *addr6;
int addr_type;
@@ -736,14 +770,20 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
{
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = dst,
- .saddr = tiph->saddr,
- .tos = RT_TOS(tos) } },
- .proto = IPPROTO_GRE };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = dst,
+ .saddr = tiph->saddr,
+ .tos = RT_TOS(tos)
+ }
+ },
+ .proto = IPPROTO_GRE
+ }
+;
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error;
}
}
@@ -751,7 +791,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if (tdev == dev) {
ip_rt_put(rt);
- stats->collisions++;
+ dev->stats.collisions++;
goto tx_error;
}
@@ -774,7 +814,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
goto tx_error;
}
}
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6)) {
struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
@@ -814,7 +854,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
dev->needed_headroom = max_headroom;
if (!new_skb) {
ip_rt_put(rt);
- txq->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -850,7 +890,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
if ((iph->ttl = tiph->ttl) == 0) {
if (skb->protocol == htons(ETH_P_IP))
iph->ttl = old_iph->ttl;
-#ifdef CONFIG_IPV6
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
#endif
@@ -881,15 +921,15 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
}
nf_reset(skb);
-
- IPTUNNEL_XMIT();
+ tstats = this_cpu_ptr(dev->tstats);
+ __IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- stats->tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -909,13 +949,19 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
/* Guess output device to choose reasonable mtu and needed_headroom */
if (iph->daddr) {
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = iph->daddr,
- .saddr = iph->saddr,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_GRE };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_GRE
+ };
struct rtable *rt;
+
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
tdev = rt->dst.dev;
ip_rt_put(rt);
@@ -1012,7 +1058,7 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
break;
}
} else {
- unsigned nflags = 0;
+ unsigned int nflags = 0;
t = netdev_priv(dev);
@@ -1125,7 +1171,7 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
- const void *daddr, const void *saddr, unsigned len)
+ const void *daddr, const void *saddr, unsigned int len)
{
struct ip_tunnel *t = netdev_priv(dev);
struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
@@ -1167,13 +1213,19 @@ static int ipgre_open(struct net_device *dev)
struct ip_tunnel *t = netdev_priv(dev);
if (ipv4_is_multicast(t->parms.iph.daddr)) {
- struct flowi fl = { .oif = t->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = t->parms.iph.daddr,
- .saddr = t->parms.iph.saddr,
- .tos = RT_TOS(t->parms.iph.tos) } },
- .proto = IPPROTO_GRE };
+ struct flowi fl = {
+ .oif = t->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = t->parms.iph.daddr,
+ .saddr = t->parms.iph.saddr,
+ .tos = RT_TOS(t->parms.iph.tos)
+ }
+ },
+ .proto = IPPROTO_GRE
+ };
struct rtable *rt;
+
if (ip_route_output_key(dev_net(dev), &rt, &fl))
return -EADDRNOTAVAIL;
dev = rt->dst.dev;
@@ -1213,12 +1265,19 @@ static const struct net_device_ops ipgre_netdev_ops = {
.ndo_start_xmit = ipgre_tunnel_xmit,
.ndo_do_ioctl = ipgre_tunnel_ioctl,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
+ .ndo_get_stats = ipgre_get_stats,
};
+static void ipgre_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
static void ipgre_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipgre_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipgre_dev_free;
dev->type = ARPHRD_IPGRE;
dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
@@ -1256,6 +1315,10 @@ static int ipgre_tunnel_init(struct net_device *dev)
} else
dev->header_ops = &ipgre_header_ops;
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
return 0;
}
@@ -1274,14 +1337,13 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
tunnel->hlen = sizeof(struct iphdr) + 4;
dev_hold(dev);
- ign->tunnels_wc[0] = tunnel;
+ rcu_assign_pointer(ign->tunnels_wc[0], tunnel);
}
-static const struct net_protocol ipgre_protocol = {
- .handler = ipgre_rcv,
- .err_handler = ipgre_err,
- .netns_ok = 1,
+static const struct gre_protocol ipgre_protocol = {
+ .handler = ipgre_rcv,
+ .err_handler = ipgre_err,
};
static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
@@ -1291,11 +1353,13 @@ static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
for (prio = 0; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t = ign->tunnels[prio][h];
+ struct ip_tunnel *t;
+
+ t = rtnl_dereference(ign->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
}
@@ -1441,6 +1505,10 @@ static int ipgre_tap_init(struct net_device *dev)
ipgre_tunnel_bind_dev(dev);
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
return 0;
}
@@ -1451,6 +1519,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ipgre_tunnel_change_mtu,
+ .ndo_get_stats = ipgre_get_stats,
};
static void ipgre_tap_setup(struct net_device *dev)
@@ -1459,7 +1528,7 @@ static void ipgre_tap_setup(struct net_device *dev)
ether_setup(dev);
dev->netdev_ops = &ipgre_tap_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipgre_dev_free;
dev->iflink = 0;
dev->features |= NETIF_F_NETNS_LOCAL;
@@ -1487,6 +1556,10 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nla
if (!tb[IFLA_MTU])
dev->mtu = mtu;
+ /* Can use a lockless transmit, unless we generate output sequences */
+ if (!(nt->parms.o_flags & GRE_SEQ))
+ dev->features |= NETIF_F_LLTX;
+
err = register_netdevice(dev);
if (err)
goto out;
@@ -1522,7 +1595,7 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
t = nt;
if (dev->type != ARPHRD_ETHER) {
- unsigned nflags = 0;
+ unsigned int nflags = 0;
if (ipv4_is_multicast(p.iph.daddr))
nflags = IFF_BROADCAST;
@@ -1663,7 +1736,7 @@ static int __init ipgre_init(void)
if (err < 0)
return err;
- err = inet_add_protocol(&ipgre_protocol, IPPROTO_GRE);
+ err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
if (err < 0) {
printk(KERN_INFO "ipgre init: can't add protocol\n");
goto add_proto_failed;
@@ -1683,7 +1756,7 @@ out:
tap_ops_failed:
rtnl_link_unregister(&ipgre_link_ops);
rtnl_link_failed:
- inet_del_protocol(&ipgre_protocol, IPPROTO_GRE);
+ gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
add_proto_failed:
unregister_pernet_device(&ipgre_net_ops);
goto out;
@@ -1693,7 +1766,7 @@ static void __exit ipgre_fini(void)
{
rtnl_link_unregister(&ipgre_tap_ops);
rtnl_link_unregister(&ipgre_link_ops);
- if (inet_del_protocol(&ipgre_protocol, IPPROTO_GRE) < 0)
+ if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
printk(KERN_INFO "ipgre close: can't remove protocol\n");
unregister_pernet_device(&ipgre_net_ops);
}
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ba9836c488ed..1906fa35860c 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -466,7 +466,7 @@ error:
}
return -EINVAL;
}
-
+EXPORT_SYMBOL(ip_options_compile);
/*
* Undo all the changes done by ip_options_compile().
@@ -646,3 +646,4 @@ int ip_options_rcv_srr(struct sk_buff *skb)
}
return 0;
}
+EXPORT_SYMBOL(ip_options_rcv_srr);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 04b69896df5f..439d2a34ee44 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -487,10 +487,9 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
* LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment.
*/
- if (skb_has_frags(skb)) {
- struct sk_buff *frag;
+ if (skb_has_frag_list(skb)) {
+ struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
- int truesizes = 0;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -503,18 +502,18 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
- truesizes += frag->truesize;
+ skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
@@ -524,7 +523,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
@@ -576,6 +574,15 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
@@ -837,10 +844,9 @@ int ip_append_data(struct sock *sk,
inet->cork.length = 0;
sk->sk_sndmsg_page = NULL;
sk->sk_sndmsg_off = 0;
- if ((exthdrlen = rt->dst.header_len) != 0) {
- length += exthdrlen;
- transhdrlen += exthdrlen;
- }
+ exthdrlen = rt->dst.header_len;
+ length += exthdrlen;
+ transhdrlen += exthdrlen;
} else {
rt = (struct rtable *)inet->cork.dst;
if (inet->cork.flags & IPCORK_OPT)
@@ -927,16 +933,19 @@ alloc_new_skb:
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
- alloclen = datalen + fragheaderlen;
+ alloclen = fraglen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
- if (datalen == length + fraggap)
+ if (datalen == length + fraggap) {
alloclen += rt->dst.trailer_len;
-
+ /* make sure mtu is not reached */
+ if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
+ datalen -= ALIGN(rt->dst.trailer_len, 8);
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len + 15,
@@ -953,7 +962,7 @@ alloc_new_skb:
else
/* only the initial fragment is
time stamped */
- ipc->shtx.flags = 0;
+ ipc->tx_flags = 0;
}
if (skb == NULL)
goto error;
@@ -964,7 +973,7 @@ alloc_new_skb:
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
- *skb_tx(skb) = ipc->shtx;
+ skb_shinfo(skb)->tx_flags = ipc->tx_flags;
/*
* Find where to start putting bytes.
@@ -1384,7 +1393,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
daddr = ipc.addr = rt->rt_src;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
if (replyopts.opt.optlen) {
ipc.opt = &replyopts.opt;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 6c40a8c46e79..64b70ad162e3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_HDRINCL:
val = inet->hdrincl;
break;
+ case IP_NODEFRAG:
+ val = inet->nodefrag;
+ break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ec036731a70b..e9b816e6cd73 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -122,31 +122,59 @@
static int ipip_net_id __read_mostly;
struct ipip_net {
- struct ip_tunnel *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel *tunnels_r[HASH_SIZE];
- struct ip_tunnel *tunnels_l[HASH_SIZE];
- struct ip_tunnel *tunnels_wc[1];
- struct ip_tunnel **tunnels[4];
+ struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_wc[1];
+ struct ip_tunnel __rcu **tunnels[4];
struct net_device *fb_tunnel_dev;
};
-static void ipip_tunnel_init(struct net_device *dev);
+static int ipip_tunnel_init(struct net_device *dev);
static void ipip_tunnel_setup(struct net_device *dev);
+static void ipip_dev_free(struct net_device *dev);
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ipip_lock);
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ipip_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
+
static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
__be32 remote, __be32 local)
{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(local);
struct ip_tunnel *t;
struct ipip_net *ipn = net_generic(net, ipip_net_id);
@@ -169,12 +197,12 @@ static struct ip_tunnel * ipip_tunnel_lookup(struct net *net,
return NULL;
}
-static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
+static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- unsigned h = 0;
+ unsigned int h = 0;
int prio = 0;
if (remote) {
@@ -188,7 +216,7 @@ static struct ip_tunnel **__ipip_bucket(struct ipip_net *ipn,
return &ipn->tunnels[prio][h];
}
-static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
+static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
struct ip_tunnel *t)
{
return __ipip_bucket(ipn, &t->parms);
@@ -196,13 +224,14 @@ static inline struct ip_tunnel **ipip_bucket(struct ipip_net *ipn,
static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp;
-
- for (tp = ipip_bucket(ipn, t); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ipip_lock);
- *tp = t->next;
- spin_unlock_bh(&ipip_lock);
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = ipip_bucket(ipn, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
@@ -210,12 +239,10 @@ static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp = ipip_bucket(ipn, t);
+ struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
- spin_lock_bh(&ipip_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ipip_lock);
}
static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
@@ -223,12 +250,15 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- struct ip_tunnel *t, **tp, *nt;
+ struct ip_tunnel *t, *nt;
+ struct ip_tunnel __rcu **tp;
struct net_device *dev;
char name[IFNAMSIZ];
struct ipip_net *ipn = net_generic(net, ipip_net_id);
- for (tp = __ipip_bucket(ipn, parms); (t = *tp) != NULL; tp = &t->next) {
+ for (tp = __ipip_bucket(ipn, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
return t;
}
@@ -238,7 +268,7 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
- sprintf(name, "tunl%%d");
+ strcpy(name, "tunl%d");
dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
if (dev == NULL)
@@ -254,7 +284,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
nt = netdev_priv(dev);
nt->parms = *parms;
- ipip_tunnel_init(dev);
+ if (ipip_tunnel_init(dev) < 0)
+ goto failed_free;
if (register_netdevice(dev) < 0)
goto failed_free;
@@ -264,20 +295,19 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
return nt;
failed_free:
- free_netdev(dev);
+ ipip_dev_free(dev);
return NULL;
}
+/* called with RTNL */
static void ipip_tunnel_uninit(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct ipip_net *ipn = net_generic(net, ipip_net_id);
- if (dev == ipn->fb_tunnel_dev) {
- spin_lock_bh(&ipip_lock);
- ipn->tunnels_wc[0] = NULL;
- spin_unlock_bh(&ipip_lock);
- } else
+ if (dev == ipn->fb_tunnel_dev)
+ rcu_assign_pointer(ipn->tunnels_wc[0], NULL);
+ else
ipip_tunnel_unlink(ipn, netdev_priv(dev));
dev_put(dev);
}
@@ -359,8 +389,10 @@ static int ipip_rcv(struct sk_buff *skb)
const struct iphdr *iph = ip_hdr(skb);
rcu_read_lock();
- if ((tunnel = ipip_tunnel_lookup(dev_net(skb->dev),
- iph->saddr, iph->daddr)) != NULL) {
+ tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+ if (tunnel != NULL) {
+ struct pcpu_tstats *tstats;
+
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
rcu_read_unlock();
kfree_skb(skb);
@@ -374,10 +406,16 @@ static int ipip_rcv(struct sk_buff *skb)
skb->protocol = htons(ETH_P_IP);
skb->pkt_type = PACKET_HOST;
- skb_tunnel_rx(skb, tunnel->dev);
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, tunnel->dev);
ipip_ecn_decapsulate(iph, skb);
+
netif_rx(skb);
+
rcu_read_unlock();
return 0;
}
@@ -394,13 +432,12 @@ static int ipip_rcv(struct sk_buff *skb)
static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct pcpu_tstats *tstats;
struct iphdr *tiph = &tunnel->parms.iph;
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
struct rtable *rt; /* Route to the other host */
- struct net_device *tdev; /* Device to other host */
+ struct net_device *tdev; /* Device to other host */
struct iphdr *old_iph = ip_hdr(skb);
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
@@ -410,13 +447,13 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
- if (tos&1)
+ if (tos & 1)
tos = old_iph->tos;
if (!dst) {
/* NBMA tunnel */
if ((rt = skb_rtable(skb)) == NULL) {
- stats->tx_fifo_errors++;
+ dev->stats.tx_fifo_errors++;
goto tx_error;
}
if ((dst = rt->rt_gateway) == 0)
@@ -424,14 +461,20 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
}
{
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = dst,
- .saddr = tiph->saddr,
- .tos = RT_TOS(tos) } },
- .proto = IPPROTO_IPIP };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = dst,
+ .saddr = tiph->saddr,
+ .tos = RT_TOS(tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
+
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
}
@@ -439,7 +482,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
if (tdev == dev) {
ip_rt_put(rt);
- stats->collisions++;
+ dev->stats.collisions++;
goto tx_error;
}
@@ -449,7 +492,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
- stats->collisions++;
+ dev->stats.collisions++;
ip_rt_put(rt);
goto tx_error;
}
@@ -485,7 +528,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
- txq->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -522,14 +565,14 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->ttl = old_iph->ttl;
nf_reset(skb);
-
- IPTUNNEL_XMIT();
+ tstats = this_cpu_ptr(dev->tstats);
+ __IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- stats->tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -544,13 +587,19 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
iph = &tunnel->parms.iph;
if (iph->daddr) {
- struct flowi fl = { .oif = tunnel->parms.link,
- .nl_u = { .ip4_u =
- { .daddr = iph->daddr,
- .saddr = iph->saddr,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_IPIP };
+ struct flowi fl = {
+ .oif = tunnel->parms.link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
struct rtable *rt;
+
if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
tdev = rt->dst.dev;
ip_rt_put(rt);
@@ -696,13 +745,19 @@ static const struct net_device_ops ipip_netdev_ops = {
.ndo_start_xmit = ipip_tunnel_xmit,
.ndo_do_ioctl = ipip_tunnel_ioctl,
.ndo_change_mtu = ipip_tunnel_change_mtu,
-
+ .ndo_get_stats = ipip_get_stats,
};
+static void ipip_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
static void ipip_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipip_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipip_dev_free;
dev->type = ARPHRD_TUNNEL;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -711,10 +766,11 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
}
-static void ipip_tunnel_init(struct net_device *dev)
+static int ipip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -725,9 +781,15 @@ static void ipip_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip_tunnel_bind_dev(dev);
+
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
}
-static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
+static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -740,11 +802,16 @@ static void __net_init ipip_fb_tunnel_init(struct net_device *dev)
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
dev_hold(dev);
- ipn->tunnels_wc[0] = tunnel;
+ rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
+ return 0;
}
-static struct xfrm_tunnel ipip_handler = {
+static struct xfrm_tunnel ipip_handler __read_mostly = {
.handler = ipip_rcv,
.err_handler = ipip_err,
.priority = 1,
@@ -760,11 +827,12 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
for (prio = 1; prio < 4; prio++) {
int h;
for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t = ipn->tunnels[prio][h];
+ struct ip_tunnel *t;
+ t = rtnl_dereference(ipn->tunnels[prio][h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, head);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
}
@@ -789,7 +857,9 @@ static int __net_init ipip_init_net(struct net *net)
}
dev_net_set(ipn->fb_tunnel_dev, net);
- ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
+ err = ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
+ if (err)
+ goto err_reg_dev;
if ((err = register_netdev(ipn->fb_tunnel_dev)))
goto err_reg_dev;
@@ -797,7 +867,7 @@ static int __net_init ipip_init_net(struct net *net)
return 0;
err_reg_dev:
- free_netdev(ipn->fb_tunnel_dev);
+ ipip_dev_free(ipn->fb_tunnel_dev);
err_alloc_dev:
/* nothing */
return err;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 179fcab866fc..86dd5691af46 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -75,7 +75,7 @@ struct mr_table {
struct net *net;
#endif
u32 id;
- struct sock *mroute_sk;
+ struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
struct list_head mfc_cache_array[MFC_LINES];
@@ -98,7 +98,7 @@ struct ipmr_result {
};
/* Big lock, protecting vif table, mrt cache and mroute socket state.
- Note that the changes are semaphored via rtnl_lock.
+ * Note that the changes are semaphored via rtnl_lock.
*/
static DEFINE_RWLOCK(mrt_lock);
@@ -113,11 +113,11 @@ static DEFINE_RWLOCK(mrt_lock);
static DEFINE_SPINLOCK(mfc_unres_lock);
/* We return to original Alan's scheme. Hash table of resolved
- entries is changed only in process context and protected
- with weak lock mrt_lock. Queue of unresolved entries is protected
- with strong spinlock mfc_unres_lock.
-
- In this case data path is free of exclusive locks at all.
+ * entries is changed only in process context and protected
+ * with weak lock mrt_lock. Queue of unresolved entries is protected
+ * with strong spinlock mfc_unres_lock.
+ *
+ * In this case data path is free of exclusive locks at all.
*/
static struct kmem_cache *mrt_cachep __read_mostly;
@@ -396,9 +396,9 @@ struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
set_fs(KERNEL_DS);
err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
set_fs(oldfs);
- } else
+ } else {
err = -EOPNOTSUPP;
-
+ }
dev = NULL;
if (err == 0 &&
@@ -495,7 +495,8 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
dev->iflink = 0;
rcu_read_lock();
- if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
+ in_dev = __in_dev_get_rcu(dev);
+ if (!in_dev) {
rcu_read_unlock();
goto failure;
}
@@ -552,9 +553,10 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
mrt->mroute_reg_vif_num = -1;
#endif
- if (vifi+1 == mrt->maxvif) {
+ if (vifi + 1 == mrt->maxvif) {
int tmp;
- for (tmp=vifi-1; tmp>=0; tmp--) {
+
+ for (tmp = vifi - 1; tmp >= 0; tmp--) {
if (VIF_EXISTS(mrt, tmp))
break;
}
@@ -565,25 +567,33 @@ static int vif_delete(struct mr_table *mrt, int vifi, int notify,
dev_set_allmulti(dev, -1);
- if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev) {
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
ip_rt_multicast_event(in_dev);
}
- if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER) && !notify)
+ if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
return 0;
}
-static inline void ipmr_cache_free(struct mfc_cache *c)
+static void ipmr_cache_free_rcu(struct rcu_head *head)
{
+ struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
+
kmem_cache_free(mrt_cachep, c);
}
+static inline void ipmr_cache_free(struct mfc_cache *c)
+{
+ call_rcu(&c->rcu, ipmr_cache_free_rcu);
+}
+
/* Destroy an unresolved cache entry, killing queued skbs
- and reporting error to netlink readers.
+ * and reporting error to netlink readers.
*/
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
@@ -605,8 +615,9 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
memset(&e->msg, 0, sizeof(e->msg));
rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
- } else
+ } else {
kfree_skb(skb);
+ }
}
ipmr_cache_free(c);
@@ -724,13 +735,13 @@ static int vif_add(struct net *net, struct mr_table *mrt,
case 0:
if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
- if (dev && dev->ip_ptr == NULL) {
+ if (dev && __in_dev_get_rtnl(dev) == NULL) {
dev_put(dev);
return -EADDRNOTAVAIL;
}
- } else
+ } else {
dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
-
+ }
if (!dev)
return -EADDRNOTAVAIL;
err = dev_set_allmulti(dev, 1);
@@ -743,16 +754,16 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return -EINVAL;
}
- if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
+ in_dev = __in_dev_get_rtnl(dev);
+ if (!in_dev) {
dev_put(dev);
return -EADDRNOTAVAIL;
}
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
ip_rt_multicast_event(in_dev);
- /*
- * Fill in the VIF structures
- */
+ /* Fill in the VIF structures */
+
v->rate_limit = vifc->vifc_rate_limit;
v->local = vifc->vifc_lcl_addr.s_addr;
v->remote = vifc->vifc_rmt_addr.s_addr;
@@ -765,14 +776,14 @@ static int vif_add(struct net *net, struct mr_table *mrt,
v->pkt_in = 0;
v->pkt_out = 0;
v->link = dev->ifindex;
- if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
+ if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
v->link = dev->iflink;
/* And finish update writing critical data */
write_lock_bh(&mrt_lock);
v->dev = dev;
#ifdef CONFIG_IP_PIMSM
- if (v->flags&VIFF_REGISTER)
+ if (v->flags & VIFF_REGISTER)
mrt->mroute_reg_vif_num = vifi;
#endif
if (vifi+1 > mrt->maxvif)
@@ -781,6 +792,7 @@ static int vif_add(struct net *net, struct mr_table *mrt,
return 0;
}
+/* called with rcu_read_lock() */
static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
__be32 origin,
__be32 mcastgrp)
@@ -788,7 +800,7 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
int line = MFC_HASH(mcastgrp, origin);
struct mfc_cache *c;
- list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
+ list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
return c;
}
@@ -801,19 +813,20 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
- if (c == NULL)
- return NULL;
- c->mfc_un.res.minvif = MAXVIFS;
+
+ if (c)
+ c->mfc_un.res.minvif = MAXVIFS;
return c;
}
static struct mfc_cache *ipmr_cache_alloc_unres(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
- if (c == NULL)
- return NULL;
- skb_queue_head_init(&c->mfc_un.unres.unresolved);
- c->mfc_un.unres.expires = jiffies + 10*HZ;
+
+ if (c) {
+ skb_queue_head_init(&c->mfc_un.unres.unresolved);
+ c->mfc_un.unres.expires = jiffies + 10*HZ;
+ }
return c;
}
@@ -827,17 +840,15 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
struct sk_buff *skb;
struct nlmsgerr *e;
- /*
- * Play the pending entries through our router
- */
+ /* Play the pending entries through our router */
while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
- nlh->nlmsg_len = (skb_tail_pointer(skb) -
- (u8 *)nlh);
+ nlh->nlmsg_len = skb_tail_pointer(skb) -
+ (u8 *)nlh;
} else {
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
@@ -848,8 +859,9 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
}
rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
- } else
+ } else {
ip_mr_forward(net, mrt, skb, c, 0);
+ }
}
}
@@ -867,6 +879,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
const int ihl = ip_hdrlen(pkt);
struct igmphdr *igmp;
struct igmpmsg *msg;
+ struct sock *mroute_sk;
int ret;
#ifdef CONFIG_IP_PIMSM
@@ -882,9 +895,9 @@ static int ipmr_cache_report(struct mr_table *mrt,
#ifdef CONFIG_IP_PIMSM
if (assert == IGMPMSG_WHOLEPKT) {
/* Ugly, but we have no choice with this interface.
- Duplicate old header, fix ihl, length etc.
- And all this only to mangle msg->im_msgtype and
- to set msg->im_mbz to "mbz" :-)
+ * Duplicate old header, fix ihl, length etc.
+ * And all this only to mangle msg->im_msgtype and
+ * to set msg->im_mbz to "mbz" :-)
*/
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
@@ -901,39 +914,38 @@ static int ipmr_cache_report(struct mr_table *mrt,
#endif
{
- /*
- * Copy the IP header
- */
+ /* Copy the IP header */
skb->network_header = skb->tail;
skb_put(skb, ihl);
skb_copy_to_linear_data(skb, pkt->data, ihl);
- ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
+ ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
msg = (struct igmpmsg *)skb_network_header(skb);
msg->im_vif = vifi;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
- /*
- * Add our header
- */
+ /* Add our header */
- igmp=(struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
+ igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
igmp->type =
msg->im_msgtype = assert;
- igmp->code = 0;
- ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
+ igmp->code = 0;
+ ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
skb->transport_header = skb->network_header;
}
- if (mrt->mroute_sk == NULL) {
+ rcu_read_lock();
+ mroute_sk = rcu_dereference(mrt->mroute_sk);
+ if (mroute_sk == NULL) {
+ rcu_read_unlock();
kfree_skb(skb);
return -EINVAL;
}
- /*
- * Deliver to mrouted
- */
- ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
+ /* Deliver to mrouted */
+
+ ret = sock_queue_rcv_skb(mroute_sk, skb);
+ rcu_read_unlock();
if (ret < 0) {
if (net_ratelimit())
printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -965,9 +977,7 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
}
if (!found) {
- /*
- * Create a new entry if allowable
- */
+ /* Create a new entry if allowable */
if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
(c = ipmr_cache_alloc_unres()) == NULL) {
@@ -977,16 +987,14 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
return -ENOBUFS;
}
- /*
- * Fill in the new cache entry
- */
+ /* Fill in the new cache entry */
+
c->mfc_parent = -1;
c->mfc_origin = iph->saddr;
c->mfc_mcastgrp = iph->daddr;
- /*
- * Reflect first query at mrouted.
- */
+ /* Reflect first query at mrouted. */
+
err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
@@ -1006,10 +1014,9 @@ ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
}
- /*
- * See if we can append the packet
- */
- if (c->mfc_un.unres.unresolved.qlen>3) {
+ /* See if we can append the packet */
+
+ if (c->mfc_un.unres.unresolved.qlen > 3) {
kfree_skb(skb);
err = -ENOBUFS;
} else {
@@ -1035,9 +1042,7 @@ static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
- write_lock_bh(&mrt_lock);
- list_del(&c->list);
- write_unlock_bh(&mrt_lock);
+ list_del_rcu(&c->list);
ipmr_cache_free(c);
return 0;
@@ -1090,9 +1095,7 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
- write_lock_bh(&mrt_lock);
- list_add(&c->list, &mrt->mfc_cache_array[line]);
- write_unlock_bh(&mrt_lock);
+ list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
/*
* Check to see if we resolved a queued list. If so we
@@ -1130,26 +1133,21 @@ static void mroute_clean_tables(struct mr_table *mrt)
LIST_HEAD(list);
struct mfc_cache *c, *next;
- /*
- * Shut down all active vif entries
- */
+ /* Shut down all active vif entries */
+
for (i = 0; i < mrt->maxvif; i++) {
- if (!(mrt->vif_table[i].flags&VIFF_STATIC))
+ if (!(mrt->vif_table[i].flags & VIFF_STATIC))
vif_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
- /*
- * Wipe the cache
- */
+ /* Wipe the cache */
+
for (i = 0; i < MFC_LINES; i++) {
list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
- if (c->mfc_flags&MFC_STATIC)
+ if (c->mfc_flags & MFC_STATIC)
continue;
- write_lock_bh(&mrt_lock);
- list_del(&c->list);
- write_unlock_bh(&mrt_lock);
-
+ list_del_rcu(&c->list);
ipmr_cache_free(c);
}
}
@@ -1164,6 +1162,9 @@ static void mroute_clean_tables(struct mr_table *mrt)
}
}
+/* called from ip_ra_control(), before an RCU grace period,
+ * we dont need to call synchronize_rcu() here
+ */
static void mrtsock_destruct(struct sock *sk)
{
struct net *net = sock_net(sk);
@@ -1171,13 +1172,9 @@ static void mrtsock_destruct(struct sock *sk)
rtnl_lock();
ipmr_for_each_table(mrt, net) {
- if (sk == mrt->mroute_sk) {
+ if (sk == rtnl_dereference(mrt->mroute_sk)) {
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
-
- write_lock_bh(&mrt_lock);
- mrt->mroute_sk = NULL;
- write_unlock_bh(&mrt_lock);
-
+ rcu_assign_pointer(mrt->mroute_sk, NULL);
mroute_clean_tables(mrt);
}
}
@@ -1204,7 +1201,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOENT;
if (optname != MRT_INIT) {
- if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
+ if (sk != rcu_dereference_raw(mrt->mroute_sk) &&
+ !capable(CAP_NET_ADMIN))
return -EACCES;
}
@@ -1217,23 +1215,20 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENOPROTOOPT;
rtnl_lock();
- if (mrt->mroute_sk) {
+ if (rtnl_dereference(mrt->mroute_sk)) {
rtnl_unlock();
return -EADDRINUSE;
}
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
- write_lock_bh(&mrt_lock);
- mrt->mroute_sk = sk;
- write_unlock_bh(&mrt_lock);
-
+ rcu_assign_pointer(mrt->mroute_sk, sk);
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
}
rtnl_unlock();
return ret;
case MRT_DONE:
- if (sk != mrt->mroute_sk)
+ if (sk != rcu_dereference_raw(mrt->mroute_sk))
return -EACCES;
return ip_ra_control(sk, 0, NULL);
case MRT_ADD_VIF:
@@ -1246,7 +1241,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -ENFILE;
rtnl_lock();
if (optname == MRT_ADD_VIF) {
- ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
+ ret = vif_add(net, mrt, &vif,
+ sk == rtnl_dereference(mrt->mroute_sk));
} else {
ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
}
@@ -1267,7 +1263,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
if (optname == MRT_DEL_MFC)
ret = ipmr_mfc_delete(mrt, &mfc);
else
- ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
+ ret = ipmr_mfc_add(net, mrt, &mfc,
+ sk == rtnl_dereference(mrt->mroute_sk));
rtnl_unlock();
return ret;
/*
@@ -1276,7 +1273,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
case MRT_ASSERT:
{
int v;
- if (get_user(v,(int __user *)optval))
+ if (get_user(v, (int __user *)optval))
return -EFAULT;
mrt->mroute_do_assert = (v) ? 1 : 0;
return 0;
@@ -1286,7 +1283,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
{
int v;
- if (get_user(v,(int __user *)optval))
+ if (get_user(v, (int __user *)optval))
return -EFAULT;
v = (v) ? 1 : 0;
@@ -1309,14 +1306,16 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
return -EINVAL;
if (get_user(v, (u32 __user *)optval))
return -EFAULT;
- if (sk == mrt->mroute_sk)
- return -EBUSY;
rtnl_lock();
ret = 0;
- if (!ipmr_new_table(net, v))
- ret = -ENOMEM;
- raw_sk(sk)->ipmr_table = v;
+ if (sk == rtnl_dereference(mrt->mroute_sk)) {
+ ret = -EBUSY;
+ } else {
+ if (!ipmr_new_table(net, v))
+ ret = -ENOMEM;
+ raw_sk(sk)->ipmr_table = v;
+ }
rtnl_unlock();
return ret;
}
@@ -1347,9 +1346,9 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
if (optname != MRT_VERSION &&
#ifdef CONFIG_IP_PIMSM
- optname!=MRT_PIM &&
+ optname != MRT_PIM &&
#endif
- optname!=MRT_ASSERT)
+ optname != MRT_ASSERT)
return -ENOPROTOOPT;
if (get_user(olr, optlen))
@@ -1416,19 +1415,19 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
- read_lock(&mrt_lock);
+ rcu_read_lock();
c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->mfc_un.res.pkt;
sr.bytecnt = c->mfc_un.res.bytes;
sr.wrong_if = c->mfc_un.res.wrong_if;
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
@@ -1465,7 +1464,7 @@ static struct notifier_block ip_mr_notifier = {
};
/*
- * Encapsulate a packet by attaching a valid IPIP header to it.
+ * Encapsulate a packet by attaching a valid IPIP header to it.
* This avoids tunnel drivers and other mess and gives us the speed so
* important for multicast video.
*/
@@ -1480,7 +1479,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
skb_reset_network_header(skb);
iph = ip_hdr(skb);
- iph->version = 4;
+ iph->version = 4;
iph->tos = old_iph->tos;
iph->ttl = old_iph->ttl;
iph->frag_off = 0;
@@ -1498,7 +1497,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
static inline int ipmr_forward_finish(struct sk_buff *skb)
{
- struct ip_options * opt = &(IPCB(skb)->opt);
+ struct ip_options *opt = &(IPCB(skb)->opt);
IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
@@ -1535,22 +1534,34 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
}
#endif
- if (vif->flags&VIFF_TUNNEL) {
- struct flowi fl = { .oif = vif->link,
- .nl_u = { .ip4_u =
- { .daddr = vif->remote,
- .saddr = vif->local,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_IPIP };
+ if (vif->flags & VIFF_TUNNEL) {
+ struct flowi fl = {
+ .oif = vif->link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = vif->remote,
+ .saddr = vif->local,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
+
if (ip_route_output_key(net, &rt, &fl))
goto out_free;
encap = sizeof(struct iphdr);
} else {
- struct flowi fl = { .oif = vif->link,
- .nl_u = { .ip4_u =
- { .daddr = iph->daddr,
- .tos = RT_TOS(iph->tos) } },
- .proto = IPPROTO_IPIP };
+ struct flowi fl = {
+ .oif = vif->link,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .tos = RT_TOS(iph->tos)
+ }
+ },
+ .proto = IPPROTO_IPIP
+ };
+
if (ip_route_output_key(net, &rt, &fl))
goto out_free;
}
@@ -1559,8 +1570,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
- allow to send ICMP, so that packets will disappear
- to blackhole.
+ * allow to send ICMP, so that packets will disappear
+ * to blackhole.
*/
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
@@ -1583,7 +1594,8 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
ip_decrease_ttl(ip_hdr(skb));
/* FIXME: forward and output firewalls used to be called here.
- * What do we do with netfilter? -- RR */
+ * What do we do with netfilter? -- RR
+ */
if (vif->flags & VIFF_TUNNEL) {
ip_encap(skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */
@@ -1644,15 +1656,15 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (skb_rtable(skb)->fl.iif == 0) {
/* It is our own packet, looped back.
- Very complicated situation...
-
- The best workaround until routing daemons will be
- fixed is not to redistribute packet, if it was
- send through wrong interface. It means, that
- multicast applications WILL NOT work for
- (S,G), which have default multicast route pointing
- to wrong oif. In any case, it is not a good
- idea to use multicasting applications on router.
+ * Very complicated situation...
+ *
+ * The best workaround until routing daemons will be
+ * fixed is not to redistribute packet, if it was
+ * send through wrong interface. It means, that
+ * multicast applications WILL NOT work for
+ * (S,G), which have default multicast route pointing
+ * to wrong oif. In any case, it is not a good
+ * idea to use multicasting applications on router.
*/
goto dont_forward;
}
@@ -1662,9 +1674,9 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
- so that we cannot check that packet arrived on an oif.
- It is bad, but otherwise we would need to move pretty
- large chunk of pimd to kernel. Ough... --ANK
+ * so that we cannot check that packet arrived on an oif.
+ * It is bad, but otherwise we would need to move pretty
+ * large chunk of pimd to kernel. Ough... --ANK
*/
(mrt->mroute_do_pim ||
cache->mfc_un.res.ttls[true_vifi] < 255) &&
@@ -1682,10 +1694,12 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
/*
* Forward the frame
*/
- for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
+ for (ct = cache->mfc_un.res.maxvif - 1;
+ ct >= cache->mfc_un.res.minvif; ct--) {
if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
if (skb2)
ipmr_queue_xmit(net, mrt, skb2, cache,
psend);
@@ -1696,6 +1710,7 @@ static int ip_mr_forward(struct net *net, struct mr_table *mrt,
if (psend != -1) {
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
if (skb2)
ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else {
@@ -1713,6 +1728,7 @@ dont_forward:
/*
* Multicast packets for forwarding arrive here
+ * Called with rcu_read_lock();
*/
int ip_mr_input(struct sk_buff *skb)
@@ -1724,9 +1740,9 @@ int ip_mr_input(struct sk_buff *skb)
int err;
/* Packet is looped back after forward, it should not be
- forwarded second time, but still can be delivered locally.
+ * forwarded second time, but still can be delivered locally.
*/
- if (IPCB(skb)->flags&IPSKB_FORWARDED)
+ if (IPCB(skb)->flags & IPSKB_FORWARDED)
goto dont_forward;
err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
@@ -1736,28 +1752,28 @@ int ip_mr_input(struct sk_buff *skb)
}
if (!local) {
- if (IPCB(skb)->opt.router_alert) {
- if (ip_call_ra_chain(skb))
- return 0;
- } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
- /* IGMPv1 (and broken IGMPv2 implementations sort of
- Cisco IOS <= 11.2(8)) do not put router alert
- option to IGMP packets destined to routable
- groups. It is very bad, because it means
- that we can forward NO IGMP messages.
- */
- read_lock(&mrt_lock);
- if (mrt->mroute_sk) {
- nf_reset(skb);
- raw_rcv(mrt->mroute_sk, skb);
- read_unlock(&mrt_lock);
- return 0;
- }
- read_unlock(&mrt_lock);
+ if (IPCB(skb)->opt.router_alert) {
+ if (ip_call_ra_chain(skb))
+ return 0;
+ } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
+ /* IGMPv1 (and broken IGMPv2 implementations sort of
+ * Cisco IOS <= 11.2(8)) do not put router alert
+ * option to IGMP packets destined to routable
+ * groups. It is very bad, because it means
+ * that we can forward NO IGMP messages.
+ */
+ struct sock *mroute_sk;
+
+ mroute_sk = rcu_dereference(mrt->mroute_sk);
+ if (mroute_sk) {
+ nf_reset(skb);
+ raw_rcv(mroute_sk, skb);
+ return 0;
+ }
}
}
- read_lock(&mrt_lock);
+ /* already under rcu_read_lock() */
cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
/*
@@ -1769,13 +1785,12 @@ int ip_mr_input(struct sk_buff *skb)
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
ip_local_deliver(skb);
- if (skb2 == NULL) {
- read_unlock(&mrt_lock);
+ if (skb2 == NULL)
return -ENOBUFS;
- }
skb = skb2;
}
+ read_lock(&mrt_lock);
vif = ipmr_find_vif(mrt, skb->dev);
if (vif >= 0) {
int err2 = ipmr_cache_unresolved(mrt, vif, skb);
@@ -1788,8 +1803,8 @@ int ip_mr_input(struct sk_buff *skb)
return -ENODEV;
}
+ read_lock(&mrt_lock);
ip_mr_forward(net, mrt, skb, cache, local);
-
read_unlock(&mrt_lock);
if (local)
@@ -1805,6 +1820,7 @@ dont_forward:
}
#ifdef CONFIG_IP_PIMSM
+/* called with rcu_read_lock() */
static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
unsigned int pimlen)
{
@@ -1813,10 +1829,10 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/*
- Check that:
- a. packet is really destinted to a multicast group
- b. packet is not a NULL-REGISTER
- c. packet is not truncated
+ * Check that:
+ * a. packet is really sent to a multicast group
+ * b. packet is not a NULL-REGISTER
+ * c. packet is not truncated
*/
if (!ipv4_is_multicast(encap->daddr) ||
encap->tot_len == 0 ||
@@ -1826,26 +1842,23 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
read_lock(&mrt_lock);
if (mrt->mroute_reg_vif_num >= 0)
reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
- if (reg_dev)
- dev_hold(reg_dev);
read_unlock(&mrt_lock);
if (reg_dev == NULL)
return 1;
skb->mac_header = skb->network_header;
- skb_pull(skb, (u8*)encap - skb->data);
+ skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
- skb->ip_summed = 0;
+ skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_HOST;
skb_tunnel_rx(skb, reg_dev);
netif_rx(skb);
- dev_put(reg_dev);
- return 0;
+ return NET_RX_SUCCESS;
}
#endif
@@ -1854,7 +1867,7 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
* Handle IGMP messages of PIMv1
*/
-int pim_rcv_v1(struct sk_buff * skb)
+int pim_rcv_v1(struct sk_buff *skb)
{
struct igmphdr *pim;
struct net *net = dev_net(skb->dev);
@@ -1881,7 +1894,7 @@ drop:
#endif
#ifdef CONFIG_IP_PIMSM_V2
-static int pim_rcv(struct sk_buff * skb)
+static int pim_rcv(struct sk_buff *skb)
{
struct pimreghdr *pim;
struct net *net = dev_net(skb->dev);
@@ -1891,8 +1904,8 @@ static int pim_rcv(struct sk_buff * skb)
goto drop;
pim = (struct pimreghdr *)skb_transport_header(skb);
- if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
- (pim->flags&PIM_NULL_REGISTER) ||
+ if (pim->type != ((PIM_VERSION << 4) | (PIM_REGISTER)) ||
+ (pim->flags & PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
@@ -1958,28 +1971,33 @@ int ipmr_get_route(struct net *net,
if (mrt == NULL)
return -ENOENT;
- read_lock(&mrt_lock);
+ rcu_read_lock();
cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
if (cache == NULL) {
struct sk_buff *skb2;
struct iphdr *iph;
struct net_device *dev;
- int vif;
+ int vif = -1;
if (nowait) {
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -EAGAIN;
}
dev = skb->dev;
- if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
+ read_lock(&mrt_lock);
+ if (dev)
+ vif = ipmr_find_vif(mrt, dev);
+ if (vif < 0) {
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -ENODEV;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return -ENOMEM;
}
@@ -1992,13 +2010,16 @@ int ipmr_get_route(struct net *net,
iph->version = 0;
err = ipmr_cache_unresolved(mrt, vif, skb2);
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return err;
}
- if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
+ read_lock(&mrt_lock);
+ if (!nowait && (rtm->rtm_flags & RTM_F_NOTIFY))
cache->mfc_flags |= MFC_NOTIFY;
err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
read_unlock(&mrt_lock);
+ rcu_read_unlock();
return err;
}
@@ -2050,14 +2071,14 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
s_h = cb->args[1];
s_e = cb->args[2];
- read_lock(&mrt_lock);
+ rcu_read_lock();
ipmr_for_each_table(mrt, net) {
if (t < s_t)
goto next_table;
if (t > s_t)
s_h = 0;
for (h = s_h; h < MFC_LINES; h++) {
- list_for_each_entry(mfc, &mrt->mfc_cache_array[h], list) {
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
if (e < s_e)
goto next_entry;
if (ipmr_fill_mroute(mrt, skb,
@@ -2075,7 +2096,7 @@ next_table:
t++;
}
done:
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
cb->args[2] = e;
cb->args[1] = h;
@@ -2086,7 +2107,8 @@ done:
#ifdef CONFIG_PROC_FS
/*
- * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
+ * The /proc interfaces to multicast routing :
+ * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
*/
struct ipmr_vif_iter {
struct seq_net_private p;
@@ -2208,14 +2230,14 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
struct mr_table *mrt = it->mrt;
struct mfc_cache *mfc;
- read_lock(&mrt_lock);
+ rcu_read_lock();
for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
it->cache = &mrt->mfc_cache_array[it->ct];
- list_for_each_entry(mfc, it->cache, list)
+ list_for_each_entry_rcu(mfc, it->cache, list)
if (pos-- == 0)
return mfc;
}
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
spin_lock_bh(&mfc_unres_lock);
it->cache = &mrt->mfc_unres_queue;
@@ -2274,7 +2296,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
/* exhausted cache_array, show unresolved */
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
it->cache = &mrt->mfc_unres_queue;
it->ct = 0;
@@ -2282,7 +2304,7 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (!list_empty(it->cache))
return list_first_entry(it->cache, struct mfc_cache, list);
- end_of_list:
+end_of_list:
spin_unlock_bh(&mfc_unres_lock);
it->cache = NULL;
@@ -2297,7 +2319,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
if (it->cache == &mrt->mfc_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
else if (it->cache == &mrt->mfc_cache_array[it->ct])
- read_unlock(&mrt_lock);
+ rcu_read_unlock();
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
@@ -2323,7 +2345,7 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
mfc->mfc_un.res.bytes,
mfc->mfc_un.res.wrong_if);
for (n = mfc->mfc_un.res.minvif;
- n < mfc->mfc_un.res.maxvif; n++ ) {
+ n < mfc->mfc_un.res.maxvif; n++) {
if (VIF_EXISTS(mrt, n) &&
mfc->mfc_un.res.ttls[n] < 255)
seq_printf(seq,
@@ -2421,7 +2443,7 @@ int __init ip_mr_init(void)
mrt_cachep = kmem_cache_create("ip_mrt_cache",
sizeof(struct mfc_cache),
- 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
if (!mrt_cachep)
return -ENOMEM;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 6bccba31d132..8b642f152468 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -72,7 +72,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
for (i = 0; i < len; i++)
ret |= (hdr_addr[i] ^ ap->addr[i]) & ap->mask[i];
- return (ret != 0);
+ return ret != 0;
}
/*
@@ -735,6 +735,7 @@ static void get_counters(const struct xt_table_info *t,
if (cpu == curcpu)
continue;
i = 0;
+ local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -742,6 +743,7 @@ static void get_counters(const struct xt_table_info *t,
++i;
}
xt_info_wrunlock(cpu);
+ local_bh_enable();
}
put_cpu();
}
@@ -1418,6 +1420,9 @@ static int translate_compat_table(const char *name,
if (ret != 0)
break;
++i;
+ if (strcmp(arpt_get_target(iter1)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index c439721b165a..d163f2e3b2e9 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -909,6 +909,7 @@ get_counters(const struct xt_table_info *t,
if (cpu == curcpu)
continue;
i = 0;
+ local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -916,6 +917,7 @@ get_counters(const struct xt_table_info *t,
++i; /* macro does multi eval of i */
}
xt_info_wrunlock(cpu);
+ local_bh_enable();
}
put_cpu();
}
@@ -1749,6 +1751,9 @@ translate_compat_table(struct net *net,
if (ret != 0)
break;
++i;
+ if (strcmp(ipt_get_target(iter1)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 3a43cf36db87..1e26a4897655 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -29,6 +29,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/net_namespace.h>
#include <net/checksum.h>
+#include <net/ip.h>
#define CLUSTERIP_VERSION "0.8"
@@ -231,24 +232,22 @@ clusterip_hashfn(const struct sk_buff *skb,
{
const struct iphdr *iph = ip_hdr(skb);
unsigned long hashval;
- u_int16_t sport, dport;
- const u_int16_t *ports;
-
- switch (iph->protocol) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE:
- case IPPROTO_SCTP:
- case IPPROTO_DCCP:
- case IPPROTO_ICMP:
- ports = (const void *)iph+iph->ihl*4;
- sport = ports[0];
- dport = ports[1];
- break;
- default:
+ u_int16_t sport = 0, dport = 0;
+ int poff;
+
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0) {
+ const u_int16_t *ports;
+ u16 _ports[2];
+
+ ports = skb_header_pointer(skb, iph->ihl * 4 + poff, 4, _ports);
+ if (ports) {
+ sport = ports[0];
+ dport = ports[1];
+ }
+ } else {
if (net_ratelimit())
pr_info("unknown protocol %u\n", iph->protocol);
- sport = dport = 0;
}
switch (config->hash_mode) {
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index b254dafaf429..43eec80c0e7c 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -112,6 +112,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set_noref(nskb, skb_dst(oldskb));
+ nskb->protocol = htons(ETH_P_IP);
if (ip_route_me_harder(nskb, addr_type))
goto free_nskb;
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index eab8de32f200..f3a9b42b16c6 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,9 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
+ struct sock *sk = skb->sk;
struct inet_sock *inet = inet_sk(skb->sk);
- if (inet && inet->nodefrag)
+ if (sk && (sk->sk_family == PF_INET) &&
+ inet->nodefrag)
return NF_ACCEPT;
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 1679e2c0963d..ee5f419d0a56 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -893,13 +893,15 @@ static void fast_csum(__sum16 *csum,
unsigned char s[4];
if (offset & 1) {
- s[0] = s[2] = 0;
+ s[0] = ~0;
s[1] = ~*optr;
+ s[2] = 0;
s[3] = *nptr;
} else {
- s[1] = s[3] = 0;
s[0] = ~*optr;
+ s[1] = ~0;
s[2] = *nptr;
+ s[3] = 0;
}
*csum = csum_fold(csum_partial(s, 4, ~csum_unfold(*csum)));
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index f2d297351405..65699c24411c 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -28,8 +28,7 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
-static DEFINE_SPINLOCK(inet_proto_lock);
+const struct net_protocol *inet_protos[MAX_INET_PROTOS] __read_mostly;
/*
* Add a protocol handler to the hash tables
@@ -37,20 +36,9 @@ static DEFINE_SPINLOCK(inet_proto_lock);
int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int hash, ret;
+ int hash = protocol & (MAX_INET_PROTOS - 1);
- hash = protocol & (MAX_INET_PROTOS - 1);
-
- spin_lock_bh(&inet_proto_lock);
- if (inet_protos[hash]) {
- ret = -1;
- } else {
- inet_protos[hash] = prot;
- ret = 0;
- }
- spin_unlock_bh(&inet_proto_lock);
-
- return ret;
+ return !cmpxchg(&inet_protos[hash], NULL, prot) ? 0 : -1;
}
EXPORT_SYMBOL(inet_add_protocol);
@@ -60,18 +48,9 @@ EXPORT_SYMBOL(inet_add_protocol);
int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
- int hash, ret;
-
- hash = protocol & (MAX_INET_PROTOS - 1);
+ int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- spin_lock_bh(&inet_proto_lock);
- if (inet_protos[hash] == prot) {
- inet_protos[hash] = NULL;
- ret = 0;
- } else {
- ret = -1;
- }
- spin_unlock_bh(&inet_proto_lock);
+ ret = (cmpxchg(&inet_protos[hash], prot, NULL) == prot) ? 0 : -1;
synchronize_net();
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 009a7b2aa1ef..1f85ef289895 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -505,7 +505,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.addr = inet->inet_saddr;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 3f56b6e6c6aa..7864d0c48968 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1107,6 +1107,7 @@ restart:
* on the route gc list.
*/
+ rt->dst.flags |= DST_NOCACHE;
if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
int err = arp_bind_neighbour(&rt->dst);
if (err) {
@@ -1231,7 +1232,7 @@ restart:
}
if (net_ratelimit())
- printk(KERN_WARNING "Neighbour table overflow.\n");
+ printk(KERN_WARNING "ipv4: Neighbour table overflow.\n");
rt_drop(rt);
return -ENOBUFS;
}
@@ -1268,18 +1269,11 @@ skip_hashing:
void rt_bind_peer(struct rtable *rt, int create)
{
- static DEFINE_SPINLOCK(rt_peer_lock);
struct inet_peer *peer;
peer = inet_getpeer(rt->rt_dst, create);
- spin_lock_bh(&rt_peer_lock);
- if (rt->peer == NULL) {
- rt->peer = peer;
- peer = NULL;
- }
- spin_unlock_bh(&rt_peer_lock);
- if (peer)
+ if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
inet_putpeer(peer);
}
@@ -1779,12 +1773,15 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
if (rt->fl.iif == 0)
src = rt->rt_src;
- else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) {
- src = FIB_RES_PREFSRC(res);
- fib_res_put(&res);
- } else
- src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
+ else {
+ rcu_read_lock();
+ if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0)
+ src = FIB_RES_PREFSRC(res);
+ else
+ src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
RT_SCOPE_UNIVERSE);
+ rcu_read_unlock();
+ }
memcpy(addr, &src, 4);
}
@@ -2087,6 +2084,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
* Such approach solves two big problems:
* 1. Not simplex devices are handled properly.
* 2. IP spoofing attempts are filtered with 100% of guarantee.
+ * called with rcu_read_lock()
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2108,7 +2106,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
unsigned hash;
__be32 spec_dst;
int err = -EINVAL;
- int free_res = 0;
struct net * net = dev_net(dev);
/* IP on this device is disabled. */
@@ -2140,12 +2137,12 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
/*
* Now we are ready to route packet.
*/
- if ((err = fib_lookup(net, &fl, &res)) != 0) {
+ err = fib_lookup(net, &fl, &res);
+ if (err != 0) {
if (!IN_DEV_FORWARD(in_dev))
goto e_hostunreach;
goto no_route;
}
- free_res = 1;
RT_CACHE_STAT_INC(in_slow_tot);
@@ -2154,8 +2151,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res.type == RTN_LOCAL) {
err = fib_validate_source(saddr, daddr, tos,
- net->loopback_dev->ifindex,
- dev, &spec_dst, &itag, skb->mark);
+ net->loopback_dev->ifindex,
+ dev, &spec_dst, &itag, skb->mark);
if (err < 0)
goto martian_source_keep_err;
if (err)
@@ -2170,9 +2167,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto martian_destination;
err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
-done:
- if (free_res)
- fib_res_put(&res);
out: return err;
brd_input:
@@ -2232,7 +2226,7 @@ local_input:
rth->rt_type = res.type;
hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
- goto done;
+ goto out;
no_route:
RT_CACHE_STAT_INC(in_no_route);
@@ -2255,21 +2249,21 @@ martian_destination:
e_hostunreach:
err = -EHOSTUNREACH;
- goto done;
+ goto out;
e_inval:
err = -EINVAL;
- goto done;
+ goto out;
e_nobufs:
err = -ENOBUFS;
- goto done;
+ goto out;
martian_source:
err = -EINVAL;
martian_source_keep_err:
ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
- goto done;
+ goto out;
}
int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2355,6 +2349,7 @@ skip_cache:
}
EXPORT_SYMBOL(ip_route_input_common);
+/* called with rcu_read_lock() */
static int __mkroute_output(struct rtable **result,
struct fib_result *res,
const struct flowi *fl,
@@ -2365,9 +2360,8 @@ static int __mkroute_output(struct rtable **result,
struct rtable *rth;
struct in_device *in_dev;
u32 tos = RT_FL_TOS(oldflp);
- int err = 0;
- if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
+ if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags & IFF_LOOPBACK))
return -EINVAL;
if (fl->fl4_dst == htonl(0xFFFFFFFF))
@@ -2380,38 +2374,35 @@ static int __mkroute_output(struct rtable **result,
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- /* get work reference to inet device */
- in_dev = in_dev_get(dev_out);
+ in_dev = __in_dev_get_rcu(dev_out);
if (!in_dev)
return -EINVAL;
if (res->type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
- if (res->fi) {
- fib_info_put(res->fi);
- res->fi = NULL;
- }
+ res->fi = NULL;
} else if (res->type == RTN_MULTICAST) {
- flags |= RTCF_MULTICAST|RTCF_LOCAL;
+ flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
oldflp->proto))
flags &= ~RTCF_LOCAL;
/* If multicast route do not exist use
- default one, but do not gateway in this case.
- Yes, it is hack.
+ * default one, but do not gateway in this case.
+ * Yes, it is hack.
*/
- if (res->fi && res->prefixlen < 4) {
- fib_info_put(res->fi);
+ if (res->fi && res->prefixlen < 4)
res->fi = NULL;
- }
}
rth = dst_alloc(&ipv4_dst_ops);
if (!rth) {
- err = -ENOBUFS;
- goto cleanup;
+ rcu_read_unlock();
+ return -ENOBUFS;
}
+ in_dev_hold(in_dev);
+ rcu_read_unlock();
+ rth->idev = in_dev;
atomic_set(&rth->dst.__refcnt, 1);
rth->dst.flags= DST_HOST;
@@ -2432,7 +2423,6 @@ static int __mkroute_output(struct rtable **result,
cache entry */
rth->dst.dev = dev_out;
dev_hold(dev_out);
- rth->idev = in_dev_get(dev_out);
rth->rt_gateway = fl->fl4_dst;
rth->rt_spec_dst= fl->fl4_src;
@@ -2467,15 +2457,11 @@ static int __mkroute_output(struct rtable **result,
rt_set_nexthop(rth, res, 0);
rth->rt_flags = flags;
-
*result = rth;
- cleanup:
- /* release work reference to inet device */
- in_dev_put(in_dev);
-
- return err;
+ return 0;
}
+/* called with rcu_read_lock() */
static int ip_mkroute_output(struct rtable **rp,
struct fib_result *res,
const struct flowi *fl,
@@ -2497,6 +2483,7 @@ static int ip_mkroute_output(struct rtable **rp,
/*
* Major route resolver routine.
+ * called with rcu_read_lock();
*/
static int ip_route_output_slow(struct net *net, struct rtable **rp,
@@ -2515,9 +2502,8 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
.iif = net->loopback_dev->ifindex,
.oif = oldflp->oif };
struct fib_result res;
- unsigned flags = 0;
+ unsigned int flags = 0;
struct net_device *dev_out = NULL;
- int free_res = 0;
int err;
@@ -2545,7 +2531,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
(ipv4_is_multicast(oldflp->fl4_dst) ||
oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
- dev_out = ip_dev_find(net, oldflp->fl4_src);
+ dev_out = __ip_dev_find(net, oldflp->fl4_src, false);
if (dev_out == NULL)
goto out;
@@ -2570,26 +2556,21 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
- dev_out = ip_dev_find(net, oldflp->fl4_src);
- if (dev_out == NULL)
+ if (!__ip_dev_find(net, oldflp->fl4_src, false))
goto out;
- dev_put(dev_out);
- dev_out = NULL;
}
}
if (oldflp->oif) {
- dev_out = dev_get_by_index(net, oldflp->oif);
+ dev_out = dev_get_by_index_rcu(net, oldflp->oif);
err = -ENODEV;
if (dev_out == NULL)
goto out;
/* RACE: Check return value of inet_select_addr instead. */
- if (__in_dev_get_rtnl(dev_out) == NULL) {
- dev_put(dev_out);
+ if (rcu_dereference(dev_out->ip_ptr) == NULL)
goto out; /* Wrong error code */
- }
if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
@@ -2612,10 +2593,7 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
fl.fl4_dst = fl.fl4_src;
if (!fl.fl4_dst)
fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
- if (dev_out)
- dev_put(dev_out);
dev_out = net->loopback_dev;
- dev_hold(dev_out);
fl.oif = net->loopback_dev->ifindex;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
@@ -2649,23 +2627,15 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
res.type = RTN_UNICAST;
goto make_route;
}
- if (dev_out)
- dev_put(dev_out);
err = -ENETUNREACH;
goto out;
}
- free_res = 1;
if (res.type == RTN_LOCAL) {
if (!fl.fl4_src)
fl.fl4_src = fl.fl4_dst;
- if (dev_out)
- dev_put(dev_out);
dev_out = net->loopback_dev;
- dev_hold(dev_out);
fl.oif = dev_out->ifindex;
- if (res.fi)
- fib_info_put(res.fi);
res.fi = NULL;
flags |= RTCF_LOCAL;
goto make_route;
@@ -2682,28 +2652,21 @@ static int ip_route_output_slow(struct net *net, struct rtable **rp,
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
- if (dev_out)
- dev_put(dev_out);
dev_out = FIB_RES_DEV(res);
- dev_hold(dev_out);
fl.oif = dev_out->ifindex;
make_route:
err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
-
- if (free_res)
- fib_res_put(&res);
- if (dev_out)
- dev_put(dev_out);
out: return err;
}
int __ip_route_output_key(struct net *net, struct rtable **rp,
const struct flowi *flp)
{
- unsigned hash;
+ unsigned int hash;
+ int res;
struct rtable *rth;
if (!rt_caching(net))
@@ -2734,10 +2697,18 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
rcu_read_unlock_bh();
slow_output:
- return ip_route_output_slow(net, rp, flp);
+ rcu_read_lock();
+ res = ip_route_output_slow(net, rp, flp);
+ rcu_read_unlock();
+ return res;
}
EXPORT_SYMBOL_GPL(__ip_route_output_key);
+static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ return NULL;
+}
+
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
{
}
@@ -2746,7 +2717,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
- .check = ipv4_dst_check,
+ .check = ipv4_blackhole_dst_check,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.entries = ATOMIC_INIT(0),
};
@@ -2793,7 +2764,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
dst_release(&(*rp)->dst);
*rp = rt;
- return (rt ? 0 : -ENOMEM);
+ return rt ? 0 : -ENOMEM;
}
int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 176e11aaea77..1664a0590bb8 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -386,8 +386,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
*/
mask = 0;
- if (sk->sk_err)
- mask = POLLERR;
/*
* POLLHUP is certainly not done right. But poll() doesn't
@@ -451,11 +449,17 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
- }
+ } else
+ mask |= POLLOUT | POLLWRNORM;
if (tp->urg_data & TCP_URG_VALID)
mask |= POLLPRI;
}
+ /* This barrier is coupled with smp_wmb() in tcp_reset() */
+ smp_rmb();
+ if (sk->sk_err)
+ mask |= POLLERR;
+
return mask;
}
EXPORT_SYMBOL(tcp_poll);
@@ -939,7 +943,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sg = sk->sk_route_caps & NETIF_F_SG;
while (--iovlen >= 0) {
- int seglen = iov->iov_len;
+ size_t seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
iov++;
@@ -2011,11 +2015,8 @@ adjudge_to_death:
}
}
if (sk->sk_state != TCP_CLOSE) {
- int orphan_count = percpu_counter_read_positive(
- sk->sk_prot->orphan_count);
-
sk_mem_reclaim(sk);
- if (tcp_too_many_orphans(sk, orphan_count)) {
+ if (tcp_too_many_orphans(sk, 0)) {
if (net_ratelimit())
printk(KERN_INFO "TCP: too many of orphaned "
"sockets\n");
@@ -2391,7 +2392,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
err = tp->af_specific->md5_parse(sk, optval, optlen);
break;
#endif
-
+ case TCP_USER_TIMEOUT:
+ /* Cap the max timeout in ms TCP will retry/retrans
+ * before giving up and aborting (ETIMEDOUT) a connection.
+ */
+ icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ break;
default:
err = -ENOPROTOOPT;
break;
@@ -2610,6 +2616,10 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_THIN_DUPACK:
val = tp->thin_dupack;
break;
+
+ case TCP_USER_TIMEOUT:
+ val = jiffies_to_msecs(icsk->icsk_user_timeout);
+ break;
default:
return -ENOPROTOOPT;
}
@@ -3212,7 +3222,7 @@ void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long nr_pages, limit;
- int order, i, max_share;
+ int i, max_share, cnt;
unsigned long jiffy = jiffies;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@@ -3261,22 +3271,12 @@ void __init tcp_init(void)
INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
}
- /* Try to be a bit smarter and adjust defaults depending
- * on available memory.
- */
- for (order = 0; ((1 << order) << PAGE_SHIFT) <
- (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
- order++)
- ;
- if (order >= 4) {
- tcp_death_row.sysctl_max_tw_buckets = 180000;
- sysctl_tcp_max_orphans = 4096 << (order - 4);
- sysctl_max_syn_backlog = 1024;
- } else if (order < 3) {
- tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
- sysctl_tcp_max_orphans >>= (3 - order);
- sysctl_max_syn_backlog = 128;
- }
+
+ cnt = tcp_hashinfo.ehash_mask + 1;
+
+ tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
+ sysctl_tcp_max_orphans = cnt / 2;
+ sysctl_max_syn_backlog = max(128, cnt / 256);
/* Set the pressure threshold to be a fraction of global memory that
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 0ec9bd0ae94f..850c737e08e2 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -196,10 +196,10 @@ void tcp_get_allowed_congestion_control(char *buf, size_t maxlen)
int tcp_set_allowed_congestion_control(char *val)
{
struct tcp_congestion_ops *ca;
- char *clone, *name;
+ char *saved_clone, *clone, *name;
int ret = 0;
- clone = kstrdup(val, GFP_USER);
+ saved_clone = clone = kstrdup(val, GFP_USER);
if (!clone)
return -ENOMEM;
@@ -226,6 +226,7 @@ int tcp_set_allowed_congestion_control(char *val)
}
out:
spin_unlock(&tcp_cong_list_lock);
+ kfree(saved_clone);
return ret;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e663b78a2ef6..f6fdd727a23d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -182,7 +182,7 @@ static void tcp_incr_quickack(struct sock *sk)
icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
}
-void tcp_enter_quickack_mode(struct sock *sk)
+static void tcp_enter_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_incr_quickack(sk);
@@ -805,25 +805,12 @@ void tcp_update_metrics(struct sock *sk)
}
}
-/* Numbers are taken from RFC3390.
- *
- * John Heffner states:
- *
- * The RFC specifies a window of no more than 4380 bytes
- * unless 2*MSS > 4380. Reading the pseudocode in the RFC
- * is a bit misleading because they use a clamp at 4380 bytes
- * rather than use a multiplier in the relevant range.
- */
__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
- if (!cwnd) {
- if (tp->mss_cache > 1460)
- cwnd = 2;
- else
- cwnd = (tp->mss_cache > 1095) ? 3 : 4;
- }
+ if (!cwnd)
+ cwnd = rfc3390_bytes_to_packets(tp->mss_cache);
return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
}
@@ -2314,7 +2301,7 @@ static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
{
- return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto);
+ return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
}
static inline int tcp_head_timedout(struct sock *sk)
@@ -2545,7 +2532,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets)
cnt += tcp_skb_pcount(skb);
if (cnt > packets) {
- if (tcp_is_sack(tp) || (oldcnt >= packets))
+ if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+ (oldcnt >= packets))
break;
mss = skb_shinfo(skb)->gso_size;
@@ -3411,8 +3399,8 @@ static void tcp_ack_probe(struct sock *sk)
static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
{
- return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
- inet_csk(sk)->icsk_ca_state != TCP_CA_Open);
+ return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
+ inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
@@ -3429,9 +3417,9 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp,
const u32 ack, const u32 ack_seq,
const u32 nwin)
{
- return (after(ack, tp->snd_una) ||
+ return after(ack, tp->snd_una) ||
after(ack_seq, tp->snd_wl1) ||
- (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd));
+ (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
}
/* Update our send window.
@@ -4048,6 +4036,8 @@ static void tcp_reset(struct sock *sk)
default:
sk->sk_err = ECONNRESET;
}
+ /* This barrier is coupled with smp_rmb() in tcp_poll() */
+ smp_wmb();
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 020766292bb0..a0232f3a358b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2571,7 +2571,6 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
return tcp_gro_receive(head, skb);
}
-EXPORT_SYMBOL(tcp4_gro_receive);
int tcp4_gro_complete(struct sk_buff *skb)
{
@@ -2584,7 +2583,6 @@ int tcp4_gro_complete(struct sk_buff *skb)
return tcp_gro_complete(skb);
}
-EXPORT_SYMBOL(tcp4_gro_complete);
struct proto tcp_prot = {
.name = "TCP",
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f25b56cb85cb..43cf901d7659 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -55,7 +55,7 @@ static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
return 1;
if (after(end_seq, s_win) && before(seq, e_win))
return 1;
- return (seq == e_win && seq == end_seq);
+ return seq == e_win && seq == end_seq;
}
/*
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index de3bd8458588..05b1ecf36763 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -224,16 +224,10 @@ void tcp_select_initial_window(int __space, __u32 mss,
}
}
- /* Set initial window to value enough for senders,
- * following RFC2414. Senders, not following this RFC,
- * will be satisfied with 2.
- */
+ /* Set initial window to value enough for senders, following RFC5681. */
if (mss > (1 << *rcv_wscale)) {
- int init_cwnd = 4;
- if (mss > 1460 * 3)
- init_cwnd = 2;
- else if (mss > 1460)
- init_cwnd = 3;
+ int init_cwnd = rfc3390_bytes_to_packets(mss);
+
/* when initializing use the value from init_rcv_wnd
* rather than the default from above
*/
@@ -1376,9 +1370,9 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
const struct sk_buff *skb,
unsigned mss_now, int nonagle)
{
- return (skb->len < mss_now &&
+ return skb->len < mss_now &&
((nonagle & TCP_NAGLE_CORK) ||
- (!nonagle && tp->packets_out && tcp_minshall_check(tp))));
+ (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
}
/* Return non-zero if the Nagle test allows this packet to be
@@ -1449,10 +1443,10 @@ int tcp_may_send_now(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk);
- return (skb &&
+ return skb &&
tcp_snd_test(sk, skb, tcp_current_mss(sk),
(tcp_skb_is_last(sk, skb) ?
- tp->nonagle : TCP_NAGLE_PUSH)));
+ tp->nonagle : TCP_NAGLE_PUSH));
}
/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
@@ -2429,6 +2423,12 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
__u8 rcv_wscale;
/* Set this up on the first call only */
req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
+
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+ (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
+ req->window_clamp = tcp_full_space(sk);
+
/* tcp_full_space because it is guaranteed to be the first packet */
tcp_select_initial_window(tcp_full_space(sk),
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
@@ -2555,6 +2555,11 @@ static void tcp_connect_init(struct sock *sk)
tcp_initialize_rcv_mss(sk);
+ /* limit the window selection if the user enforce a smaller rx buffer */
+ if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
+ (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
+ tp->window_clamp = tcp_full_space(sk);
+
tcp_select_initial_window(tcp_full_space(sk),
tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
&tp->rcv_wnd,
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 808bb920c9f5..f3c8c6c019ae 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -66,18 +66,18 @@ static void tcp_write_err(struct sock *sk)
static int tcp_out_of_resources(struct sock *sk, int do_reset)
{
struct tcp_sock *tp = tcp_sk(sk);
- int orphans = percpu_counter_read_positive(&tcp_orphan_count);
+ int shift = 0;
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
- orphans <<= 1;
+ shift++;
/* If some dubious ICMP arrived, penalize even more. */
if (sk->sk_err_soft)
- orphans <<= 1;
+ shift++;
- if (tcp_too_many_orphans(sk, orphans)) {
+ if (tcp_too_many_orphans(sk, shift)) {
if (net_ratelimit())
printk(KERN_INFO "Out of socket memory\n");
@@ -135,13 +135,16 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
/* This function calculates a "timeout" which is equivalent to the timeout of a
* TCP connection after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN.
+ * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
+ * syn_set flag is set.
*/
static bool retransmits_timed_out(struct sock *sk,
- unsigned int boundary)
+ unsigned int boundary,
+ unsigned int timeout,
+ bool syn_set)
{
- unsigned int timeout, linear_backoff_thresh;
- unsigned int start_ts;
+ unsigned int linear_backoff_thresh, start_ts;
+ unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
if (!inet_csk(sk)->icsk_retransmits)
return false;
@@ -151,14 +154,15 @@ static bool retransmits_timed_out(struct sock *sk,
else
start_ts = tcp_sk(sk)->retrans_stamp;
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/TCP_RTO_MIN);
-
- if (boundary <= linear_backoff_thresh)
- timeout = ((2 << boundary) - 1) * TCP_RTO_MIN;
- else
- timeout = ((2 << linear_backoff_thresh) - 1) * TCP_RTO_MIN +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ if (likely(timeout == 0)) {
+ linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
+ if (boundary <= linear_backoff_thresh)
+ timeout = ((2 << boundary) - 1) * rto_base;
+ else
+ timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
+ (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ }
return (tcp_time_stamp - start_ts) >= timeout;
}
@@ -167,14 +171,15 @@ static int tcp_write_timeout(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int retry_until;
- bool do_reset;
+ bool do_reset, syn_set = 0;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits)
dst_negative_advice(sk);
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+ syn_set = 1;
} else {
- if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
@@ -187,14 +192,15 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
- !retransmits_timed_out(sk, retry_until);
+ !retransmits_timed_out(sk, retry_until, 0, 0);
if (tcp_out_of_resources(sk, do_reset))
return 1;
}
}
- if (retransmits_timed_out(sk, retry_until)) {
+ if (retransmits_timed_out(sk, retry_until,
+ syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
@@ -436,7 +442,7 @@ out_reset_timer:
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
- if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
__sk_dst_reset(sk);
out:;
@@ -556,7 +562,14 @@ static void tcp_keepalive_timer (unsigned long data)
elapsed = keepalive_time_elapsed(tp);
if (elapsed >= keepalive_time_when(tp)) {
- if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
+ /* If the TCP_USER_TIMEOUT option is enabled, use that
+ * to determine when to timeout instead.
+ */
+ if ((icsk->icsk_user_timeout != 0 &&
+ elapsed >= icsk->icsk_user_timeout &&
+ icsk->icsk_probes_out > 0) ||
+ (icsk->icsk_user_timeout == 0 &&
+ icsk->icsk_probes_out >= keepalive_probes(tp))) {
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_write_err(sk);
goto out;
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 20151d6a6241..a534dda5456e 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -80,7 +80,7 @@ static void tcp_westwood_init(struct sock *sk)
*/
static inline u32 westwood_do_filter(u32 a, u32 b)
{
- return (((7 * a) + b) >> 3);
+ return ((7 * a) + b) >> 3;
}
static void westwood_filter(struct westwood *w, u32 delta)
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 59186ca7808a..9a17bd2a0a37 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -14,8 +14,8 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm_tunnel *tunnel4_handlers;
-static struct xfrm_tunnel *tunnel64_handlers;
+static struct xfrm_tunnel *tunnel4_handlers __read_mostly;
+static struct xfrm_tunnel *tunnel64_handlers __read_mostly;
static DEFINE_MUTEX(tunnel4_mutex);
static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
@@ -39,7 +39,7 @@ int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
}
handler->next = *pprev;
- *pprev = handler;
+ rcu_assign_pointer(*pprev, handler);
ret = 0;
@@ -73,6 +73,11 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
}
EXPORT_SYMBOL(xfrm4_tunnel_deregister);
+#define for_each_tunnel_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next)) \
+
static int tunnel4_rcv(struct sk_buff *skb)
{
struct xfrm_tunnel *handler;
@@ -80,7 +85,7 @@ static int tunnel4_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
- for (handler = tunnel4_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel4_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -99,7 +104,7 @@ static int tunnel64_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
- for (handler = tunnel64_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel64_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -115,7 +120,7 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
- for (handler = tunnel4_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel4_handlers, handler)
if (!handler->err_handler(skb, info))
break;
}
@@ -125,7 +130,7 @@ static void tunnel64_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
- for (handler = tunnel64_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel64_handlers, handler)
if (!handler->err_handler(skb, info))
break;
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 32e0bef60d0a..b3f7e8cf18ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -797,7 +797,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
return -EOPNOTSUPP;
ipc.opt = NULL;
- ipc.shtx.flags = 0;
+ ipc.tx_flags = 0;
if (up->pending) {
/*
@@ -845,7 +845,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.addr = inet->inet_saddr;
ipc.oif = sk->sk_bound_dev_if;
- err = sock_tx_timestamp(msg, sk, &ipc.shtx);
+ err = sock_tx_timestamp(sk, &ipc.tx_flags);
if (err)
return err;
if (msg->msg_controllen) {
@@ -1260,6 +1260,49 @@ void udp_lib_unhash(struct sock *sk)
}
EXPORT_SYMBOL(udp_lib_unhash);
+/*
+ * inet_rcv_saddr was changed, we must rehash secondary hash
+ */
+void udp_lib_rehash(struct sock *sk, u16 newhash)
+{
+ if (sk_hashed(sk)) {
+ struct udp_table *udptable = sk->sk_prot->h.udp_table;
+ struct udp_hslot *hslot, *hslot2, *nhslot2;
+
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ nhslot2 = udp_hashslot2(udptable, newhash);
+ udp_sk(sk)->udp_portaddr_hash = newhash;
+ if (hslot2 != nhslot2) {
+ hslot = udp_hashslot(udptable, sock_net(sk),
+ udp_sk(sk)->udp_port_hash);
+ /* we must lock primary chain too */
+ spin_lock_bh(&hslot->lock);
+
+ spin_lock(&hslot2->lock);
+ hlist_nulls_del_init_rcu(&udp_sk(sk)->udp_portaddr_node);
+ hslot2->count--;
+ spin_unlock(&hslot2->lock);
+
+ spin_lock(&nhslot2->lock);
+ hlist_nulls_add_head_rcu(&udp_sk(sk)->udp_portaddr_node,
+ &nhslot2->head);
+ nhslot2->count++;
+ spin_unlock(&nhslot2->lock);
+
+ spin_unlock_bh(&hslot->lock);
+ }
+ }
+}
+EXPORT_SYMBOL(udp_lib_rehash);
+
+static void udp_v4_rehash(struct sock *sk)
+{
+ u16 new_hash = udp4_portaddr_hash(sock_net(sk),
+ inet_sk(sk)->inet_rcv_saddr,
+ inet_sk(sk)->inet_num);
+ udp_lib_rehash(sk, new_hash);
+}
+
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
@@ -1843,6 +1886,7 @@ struct proto udp_prot = {
.backlog_rcv = __udp_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v4_rehash,
.get_port = udp_v4_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 869078d4eeb9..a580349f0b8a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -61,7 +61,7 @@ static int xfrm4_get_saddr(struct net *net,
static int xfrm4_get_tos(struct flowi *fl)
{
- return fl->fl4_tos;
+ return IPTOS_RT_MASK & fl->fl4_tos; /* Strip ECN bits */
}
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1ef1366a0a03..47947624eccc 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -21,21 +21,25 @@ static int xfrm4_init_flags(struct xfrm_state *x)
}
static void
-__xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm4_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
+{
+ sel->daddr.a4 = fl->fl4_dst;
+ sel->saddr.a4 = fl->fl4_src;
+ sel->dport = xfrm_flowi_dport(fl);
+ sel->dport_mask = htons(0xffff);
+ sel->sport = xfrm_flowi_sport(fl);
+ sel->sport_mask = htons(0xffff);
+ sel->family = AF_INET;
+ sel->prefixlen_d = 32;
+ sel->prefixlen_s = 32;
+ sel->proto = fl->proto;
+ sel->ifindex = fl->oif;
+}
+
+static void
+xfrm4_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
{
- x->sel.daddr.a4 = fl->fl4_dst;
- x->sel.saddr.a4 = fl->fl4_src;
- x->sel.dport = xfrm_flowi_dport(fl);
- x->sel.dport_mask = htons(0xffff);
- x->sel.sport = xfrm_flowi_sport(fl);
- x->sel.sport_mask = htons(0xffff);
- x->sel.family = AF_INET;
- x->sel.prefixlen_d = 32;
- x->sel.prefixlen_s = 32;
- x->sel.proto = fl->proto;
- x->sel.ifindex = fl->oif;
x->id = tmpl->id;
if (x->id.daddr.a4 == 0)
x->id.daddr.a4 = daddr->a4;
@@ -70,6 +74,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.owner = THIS_MODULE,
.init_flags = xfrm4_init_flags,
.init_tempsel = __xfrm4_init_tempsel,
+ .init_temprop = xfrm4_init_temprop,
.output = xfrm4_output,
.extract_input = xfrm4_extract_input,
.extract_output = xfrm4_extract_output,
diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c
index 41f5982d2087..82806455e859 100644
--- a/net/ipv4/xfrm4_tunnel.c
+++ b/net/ipv4/xfrm4_tunnel.c
@@ -58,14 +58,14 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info)
return -ENOENT;
}
-static struct xfrm_tunnel xfrm_tunnel_handler = {
+static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = {
.handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static struct xfrm_tunnel xfrm64_tunnel_handler = {
+static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = {
.handler = xfrm_tunnel_rcv,
.err_handler = xfrm_tunnel_err,
.priority = 2,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ab70a3fbcafa..ec7a91d9e865 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -243,7 +243,7 @@ static inline bool addrconf_qdisc_ok(const struct net_device *dev)
/* Check if a route is valid prefix route */
static inline int addrconf_is_prefix_route(const struct rt6_info *rt)
{
- return ((rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0);
+ return (rt->rt6i_flags & (RTF_GATEWAY | RTF_DEFAULT)) == 0;
}
static void addrconf_del_timer(struct inet6_ifaddr *ifp)
@@ -1544,7 +1544,7 @@ static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
return 0;
}
-int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
+static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
{
if (addr == 0)
return -1;
@@ -1560,7 +1560,6 @@ int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
memcpy(eui + 4, &addr, 4);
return 0;
}
-EXPORT_SYMBOL(__ipv6_isatap_ifid);
static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
{
@@ -2964,7 +2963,8 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
start sending router solicitations.
*/
- if (ifp->idev->cnf.forwarding == 0 &&
+ if ((ifp->idev->cnf.forwarding == 0 ||
+ ifp->idev->cnf.forwarding == 2) &&
ifp->idev->cnf.rtr_solicits > 0 &&
(dev->flags&IFF_LOOPBACK) == 0 &&
(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)) {
@@ -4637,10 +4637,12 @@ int __init addrconf_init(void)
if (err < 0) {
printk(KERN_CRIT "IPv6 Addrconf:"
" cannot initialize default policy table: %d.\n", err);
- return err;
+ goto out;
}
- register_pernet_subsys(&addrconf_ops);
+ err = register_pernet_subsys(&addrconf_ops);
+ if (err < 0)
+ goto out_addrlabel;
/* The addrconf netdev notifier requires that loopback_dev
* has it's ipv6 private information allocated and setup
@@ -4692,7 +4694,9 @@ errout:
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
unregister_pernet_subsys(&addrconf_ops);
-
+out_addrlabel:
+ ipv6_addr_label_cleanup();
+out:
return err;
}
@@ -4703,6 +4707,7 @@ void addrconf_cleanup(void)
unregister_netdevice_notifier(&ipv6_dev_notf);
unregister_pernet_subsys(&addrconf_ops);
+ ipv6_addr_label_cleanup();
rtnl_lock();
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f0e774cea386..c8993e5a337c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -393,6 +393,11 @@ int __init ipv6_addr_label_init(void)
return register_pernet_subsys(&ipv6_addr_label_ops);
}
+void ipv6_addr_label_cleanup(void)
+{
+ unregister_pernet_subsys(&ipv6_addr_label_ops);
+}
+
static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
[IFAL_ADDRESS] = { .len = sizeof(struct in6_addr), },
[IFAL_LABEL] = { .len = sizeof(u32), },
@@ -513,10 +518,9 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
static inline int ip6addrlbl_msgsize(void)
{
- return (NLMSG_ALIGN(sizeof(struct ifaddrlblmsg))
+ return NLMSG_ALIGN(sizeof(struct ifaddrlblmsg))
+ nla_total_size(16) /* IFAL_ADDRESS */
- + nla_total_size(4) /* IFAL_LABEL */
- );
+ + nla_total_size(4); /* IFAL_LABEL */
}
static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 56b9bf2516f4..60220985bb80 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -467,7 +467,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin->sin6_scope_id = sk->sk_bound_dev_if;
*uaddr_len = sizeof(*sin);
- return(0);
+ return 0;
}
EXPORT_SYMBOL(inet6_getname);
@@ -488,7 +488,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCADDRT:
case SIOCDELRT:
- return(ipv6_route_ioctl(net, cmd, (void __user *)arg));
+ return ipv6_route_ioctl(net, cmd, (void __user *)arg);
case SIOCSIFADDR:
return addrconf_add_ifaddr(net, (void __user *) arg);
@@ -502,7 +502,7 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return sk->sk_prot->ioctl(sk, cmd, arg);
}
/*NOTREACHED*/
- return(0);
+ return 0;
}
EXPORT_SYMBOL(inet6_ioctl);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 7d929a22cbc2..ef371aa01ac5 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -105,9 +105,12 @@ ipv4_connected:
if (ipv6_addr_any(&np->saddr))
ipv6_addr_set_v4mapped(inet->inet_saddr, &np->saddr);
- if (ipv6_addr_any(&np->rcv_saddr))
+ if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_set_v4mapped(inet->inet_rcv_saddr,
&np->rcv_saddr);
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
+ }
goto out;
}
@@ -181,6 +184,8 @@ ipv4_connected:
if (ipv6_addr_any(&np->rcv_saddr)) {
ipv6_addr_copy(&np->rcv_saddr, &fl.fl6_src);
inet->inet_rcv_saddr = LOOPBACK4_IPV6;
+ if (sk->sk_prot->rehash)
+ sk->sk_prot->rehash(sk);
}
ip6_dst_store(sk, dst,
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index e1caa5d526c2..14ed0a955b56 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -13,12 +13,12 @@ int ipv6_ext_hdr(u8 nexthdr)
/*
* find out if nexthdr is an extension header or a protocol
*/
- return ( (nexthdr == NEXTHDR_HOP) ||
+ return (nexthdr == NEXTHDR_HOP) ||
(nexthdr == NEXTHDR_ROUTING) ||
(nexthdr == NEXTHDR_FRAGMENT) ||
(nexthdr == NEXTHDR_AUTH) ||
(nexthdr == NEXTHDR_NONE) ||
- (nexthdr == NEXTHDR_DEST) );
+ (nexthdr == NEXTHDR_DEST);
}
/*
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d40b330c0ee6..99157b4cd56e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -637,9 +637,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
}
mtu -= hlen + sizeof(struct frag_hdr);
- if (skb_has_frags(skb)) {
+ if (skb_has_frag_list(skb)) {
int first_len = skb_pagelen(skb);
- int truesizes = 0;
+ struct sk_buff *frag2;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
@@ -651,18 +651,18 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
- goto slow_path;
+ goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
- goto slow_path;
+ goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
- truesizes += frag->truesize;
}
+ skb->truesize -= frag->truesize;
}
err = 0;
@@ -693,7 +693,6 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
first_len = skb_pagelen(skb);
skb->data_len = first_len - skb_headlen(skb);
- skb->truesize -= truesizes;
skb->len = first_len;
ipv6_hdr(skb)->payload_len = htons(first_len -
sizeof(struct ipv6hdr));
@@ -756,6 +755,15 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
IPSTATS_MIB_FRAGFAILS);
dst_release(&rt->dst);
return err;
+
+slow_path_clean:
+ skb_walk_frags(skb, frag2) {
+ if (frag2 == frag)
+ break;
+ frag2->sk = NULL;
+ frag2->destructor = NULL;
+ skb->truesize += frag2->truesize;
+ }
}
slow_path:
@@ -870,8 +878,8 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
struct in6_addr *fl_addr,
struct in6_addr *addr_cache)
{
- return ((rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
- (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)));
+ return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
+ (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
}
static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0fd027f3f47e..c2c0f89397b1 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -75,7 +75,7 @@ MODULE_LICENSE("GPL");
(addr)->s6_addr32[2] ^ (addr)->s6_addr32[3]) & \
(HASH_SIZE - 1))
-static void ip6_tnl_dev_init(struct net_device *dev);
+static int ip6_tnl_dev_init(struct net_device *dev);
static void ip6_tnl_dev_setup(struct net_device *dev);
static int ip6_tnl_net_id __read_mostly;
@@ -83,15 +83,42 @@ struct ip6_tnl_net {
/* the IPv6 tunnel fallback device */
struct net_device *fb_tnl_dev;
/* lists for storing tunnels in use */
- struct ip6_tnl *tnls_r_l[HASH_SIZE];
- struct ip6_tnl *tnls_wc[1];
- struct ip6_tnl **tnls[2];
+ struct ip6_tnl __rcu *tnls_r_l[HASH_SIZE];
+ struct ip6_tnl __rcu *tnls_wc[1];
+ struct ip6_tnl __rcu **tnls[2];
};
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ip6_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
+
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ip6_tnl_lock);
static inline struct dst_entry *ip6_tnl_dst_check(struct ip6_tnl *t)
{
@@ -138,8 +165,8 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
static struct ip6_tnl *
ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(local);
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -167,7 +194,7 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
* Return: head of IPv6 tunnel list
**/
-static struct ip6_tnl **
+static struct ip6_tnl __rcu **
ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p)
{
struct in6_addr *remote = &p->raddr;
@@ -190,12 +217,10 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p)
static void
ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
- struct ip6_tnl **tp = ip6_tnl_bucket(ip6n, &t->parms);
+ struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
- spin_lock_bh(&ip6_tnl_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next , rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ip6_tnl_lock);
}
/**
@@ -206,18 +231,25 @@ ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
static void
ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
{
- struct ip6_tnl **tp;
-
- for (tp = ip6_tnl_bucket(ip6n, &t->parms); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ip6_tnl_lock);
- *tp = t->next;
- spin_unlock_bh(&ip6_tnl_lock);
+ struct ip6_tnl __rcu **tp;
+ struct ip6_tnl *iter;
+
+ for (tp = ip6_tnl_bucket(ip6n, &t->parms);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
}
+static void ip6_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
/**
* ip6_tnl_create() - create a new tunnel
* @p: tunnel parameters
@@ -256,7 +288,9 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
t = netdev_priv(dev);
t->parms = *p;
- ip6_tnl_dev_init(dev);
+ err = ip6_tnl_dev_init(dev);
+ if (err < 0)
+ goto failed_free;
if ((err = register_netdevice(dev)) < 0)
goto failed_free;
@@ -266,7 +300,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
return t;
failed_free:
- free_netdev(dev);
+ ip6_dev_free(dev);
failed:
return NULL;
}
@@ -290,10 +324,13 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
{
struct in6_addr *remote = &p->raddr;
struct in6_addr *local = &p->laddr;
+ struct ip6_tnl __rcu **tp;
struct ip6_tnl *t;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- for (t = *ip6_tnl_bucket(ip6n, p); t; t = t->next) {
+ for (tp = ip6_tnl_bucket(ip6n, p);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
ipv6_addr_equal(remote, &t->parms.raddr))
return t;
@@ -318,13 +355,10 @@ ip6_tnl_dev_uninit(struct net_device *dev)
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
- if (dev == ip6n->fb_tnl_dev) {
- spin_lock_bh(&ip6_tnl_lock);
- ip6n->tnls_wc[0] = NULL;
- spin_unlock_bh(&ip6_tnl_lock);
- } else {
+ if (dev == ip6n->fb_tnl_dev)
+ rcu_assign_pointer(ip6n->tnls_wc[0], NULL);
+ else
ip6_tnl_unlink(ip6n, t);
- }
ip6_tnl_dst_reset(t);
dev_put(dev);
}
@@ -702,6 +736,8 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
if ((t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr,
&ipv6h->daddr)) != NULL) {
+ struct pcpu_tstats *tstats;
+
if (t->parms.proto != ipproto && t->parms.proto != 0) {
rcu_read_unlock();
goto discard;
@@ -724,10 +760,16 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
skb->pkt_type = PACKET_HOST;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
- skb_tunnel_rx(skb, t->dev);
+ tstats = this_cpu_ptr(t->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, t->dev);
dscp_ecn_decapsulate(t, ipv6h, skb);
+
netif_rx(skb);
+
rcu_read_unlock();
return 0;
}
@@ -934,8 +976,10 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
err = ip6_local_out(skb);
if (net_xmit_eval(err) == 0) {
- stats->tx_bytes += pkt_len;
- stats->tx_packets++;
+ struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats);
+
+ tstats->tx_bytes += pkt_len;
+ tstats->tx_packets++;
} else {
stats->tx_errors++;
stats->tx_aborted_errors++;
@@ -1300,12 +1344,14 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
static const struct net_device_ops ip6_tnl_netdev_ops = {
- .ndo_uninit = ip6_tnl_dev_uninit,
+ .ndo_uninit = ip6_tnl_dev_uninit,
.ndo_start_xmit = ip6_tnl_xmit,
- .ndo_do_ioctl = ip6_tnl_ioctl,
+ .ndo_do_ioctl = ip6_tnl_ioctl,
.ndo_change_mtu = ip6_tnl_change_mtu,
+ .ndo_get_stats = ip6_get_stats,
};
+
/**
* ip6_tnl_dev_setup - setup virtual tunnel device
* @dev: virtual device associated with tunnel
@@ -1317,7 +1363,7 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
static void ip6_tnl_dev_setup(struct net_device *dev)
{
dev->netdev_ops = &ip6_tnl_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ip6_dev_free;
dev->type = ARPHRD_TUNNEL6;
dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
@@ -1333,12 +1379,17 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
* @dev: virtual device associated with tunnel
**/
-static inline void
+static inline int
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
+
t->dev = dev;
strcpy(t->parms.name, dev->name);
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+ return 0;
}
/**
@@ -1346,11 +1397,15 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
* @dev: virtual device associated with tunnel
**/
-static void ip6_tnl_dev_init(struct net_device *dev)
+static int ip6_tnl_dev_init(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- ip6_tnl_dev_init_gen(dev);
+ int err = ip6_tnl_dev_init_gen(dev);
+
+ if (err)
+ return err;
ip6_tnl_link_config(t);
+ return 0;
}
/**
@@ -1360,25 +1415,29 @@ static void ip6_tnl_dev_init(struct net_device *dev)
* Return: 0
**/
-static void __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
+static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = dev_net(dev);
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+ int err = ip6_tnl_dev_init_gen(dev);
+
+ if (err)
+ return err;
- ip6_tnl_dev_init_gen(dev);
t->parms.proto = IPPROTO_IPV6;
dev_hold(dev);
- ip6n->tnls_wc[0] = t;
+ rcu_assign_pointer(ip6n->tnls_wc[0], t);
+ return 0;
}
-static struct xfrm6_tunnel ip4ip6_handler = {
+static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
.handler = ip4ip6_rcv,
.err_handler = ip4ip6_err,
.priority = 1,
};
-static struct xfrm6_tunnel ip6ip6_handler = {
+static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
.handler = ip6ip6_rcv,
.err_handler = ip6ip6_err,
.priority = 1,
@@ -1391,14 +1450,14 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
LIST_HEAD(list);
for (h = 0; h < HASH_SIZE; h++) {
- t = ip6n->tnls_r_l[h];
+ t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t != NULL) {
unregister_netdevice_queue(t->dev, &list);
- t = t->next;
+ t = rtnl_dereference(t->next);
}
}
- t = ip6n->tnls_wc[0];
+ t = rtnl_dereference(ip6n->tnls_wc[0]);
unregister_netdevice_queue(t->dev, &list);
unregister_netdevice_many(&list);
}
@@ -1419,7 +1478,9 @@ static int __net_init ip6_tnl_init_net(struct net *net)
goto err_alloc_dev;
dev_net_set(ip6n->fb_tnl_dev, net);
- ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+ err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
+ if (err < 0)
+ goto err_register;
err = register_netdev(ip6n->fb_tnl_dev);
if (err < 0)
@@ -1427,7 +1488,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
return 0;
err_register:
- free_netdev(ip6n->fb_tnl_dev);
+ ip6_dev_free(ip6n->fb_tnl_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 66078dad7fe8..6f32ffce7022 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -667,6 +667,7 @@ static int pim6_rcv(struct sk_buff *skb)
skb_tunnel_rx(skb, reg_dev);
netif_rx(skb);
+
dev_put(reg_dev);
return 0;
drop:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 58841c4ae947..998d6d27e7cf 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -91,7 +91,9 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
-static u32 ndisc_hash(const void *pkey, const struct net_device *dev);
+static u32 ndisc_hash(const void *pkey,
+ const struct net_device *dev,
+ __u32 rnd);
static int ndisc_constructor(struct neighbour *neigh);
static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb);
static void ndisc_error_report(struct neighbour *neigh, struct sk_buff *skb);
@@ -228,12 +230,12 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
do {
cur = ((void *)cur) + (cur->nd_opt_len << 3);
} while(cur < end && cur->nd_opt_type != type);
- return (cur <= end && cur->nd_opt_type == type ? cur : NULL);
+ return cur <= end && cur->nd_opt_type == type ? cur : NULL;
}
static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
{
- return (opt->nd_opt_type == ND_OPT_RDNSS);
+ return opt->nd_opt_type == ND_OPT_RDNSS;
}
static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -244,7 +246,7 @@ static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
do {
cur = ((void *)cur) + (cur->nd_opt_len << 3);
} while(cur < end && !ndisc_is_useropt(cur));
- return (cur <= end && ndisc_is_useropt(cur) ? cur : NULL);
+ return cur <= end && ndisc_is_useropt(cur) ? cur : NULL;
}
static struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
@@ -319,7 +321,7 @@ static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
int prepad = ndisc_addr_option_pad(dev->type);
if (lladdrlen != NDISC_OPT_SPACE(dev->addr_len + prepad))
return NULL;
- return (lladdr + prepad);
+ return lladdr + prepad;
}
int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
@@ -350,7 +352,9 @@ int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int d
EXPORT_SYMBOL(ndisc_mc_map);
-static u32 ndisc_hash(const void *pkey, const struct net_device *dev)
+static u32 ndisc_hash(const void *pkey,
+ const struct net_device *dev,
+ __u32 hash_rnd)
{
const u32 *p32 = pkey;
u32 addr_hash, i;
@@ -359,7 +363,7 @@ static u32 ndisc_hash(const void *pkey, const struct net_device *dev)
for (i = 0; i < (sizeof(struct in6_addr) / sizeof(u32)); i++)
addr_hash ^= *p32++;
- return jhash_2words(addr_hash, dev->ifindex, nd_tbl.hash_rnd);
+ return jhash_2words(addr_hash, dev->ifindex, hash_rnd);
}
static int ndisc_constructor(struct neighbour *neigh)
@@ -1105,6 +1109,18 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_ND_USEROPT, err);
}
+static inline int accept_ra(struct inet6_dev *in6_dev)
+{
+ /*
+ * If forwarding is enabled, RA are not accepted unless the special
+ * hybrid mode (accept_ra=2) is enabled.
+ */
+ if (in6_dev->cnf.forwarding && in6_dev->cnf.accept_ra < 2)
+ return 0;
+
+ return in6_dev->cnf.accept_ra;
+}
+
static void ndisc_router_discovery(struct sk_buff *skb)
{
struct ra_msg *ra_msg = (struct ra_msg *)skb_transport_header(skb);
@@ -1158,8 +1174,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return;
}
- /* skip route and link configuration on routers */
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
+ if (!accept_ra(in6_dev))
goto skip_linkparms;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -1309,8 +1324,7 @@ skip_linkparms:
NEIGH_UPDATE_F_ISROUTER);
}
- /* skip route and link configuration on routers */
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
+ if (!accept_ra(in6_dev))
goto out;
#ifdef CONFIG_IPV6_ROUTE_INFO
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 5359ef4daac5..6b331e9b5706 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -82,13 +82,13 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
int
ip6t_ext_hdr(u8 nexthdr)
{
- return ( (nexthdr == IPPROTO_HOPOPTS) ||
- (nexthdr == IPPROTO_ROUTING) ||
- (nexthdr == IPPROTO_FRAGMENT) ||
- (nexthdr == IPPROTO_ESP) ||
- (nexthdr == IPPROTO_AH) ||
- (nexthdr == IPPROTO_NONE) ||
- (nexthdr == IPPROTO_DSTOPTS) );
+ return (nexthdr == IPPROTO_HOPOPTS) ||
+ (nexthdr == IPPROTO_ROUTING) ||
+ (nexthdr == IPPROTO_FRAGMENT) ||
+ (nexthdr == IPPROTO_ESP) ||
+ (nexthdr == IPPROTO_AH) ||
+ (nexthdr == IPPROTO_NONE) ||
+ (nexthdr == IPPROTO_DSTOPTS);
}
/* Returns whether matches rule or not. */
@@ -922,6 +922,7 @@ get_counters(const struct xt_table_info *t,
if (cpu == curcpu)
continue;
i = 0;
+ local_bh_disable();
xt_info_wrlock(cpu);
xt_entry_foreach(iter, t->entries[cpu], t->size) {
ADD_COUNTER(counters[i], iter->counters.bcnt,
@@ -929,6 +930,7 @@ get_counters(const struct xt_table_info *t,
++i;
}
xt_info_wrunlock(cpu);
+ local_bh_enable();
}
put_cpu();
}
@@ -1764,6 +1766,9 @@ translate_compat_table(struct net *net,
if (ret != 0)
break;
++i;
+ if (strcmp(ip6t_get_target(iter1)->u.user.name,
+ XT_ERROR_TARGET) == 0)
+ ++newinfo->stacksize;
}
if (ret) {
/*
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 13ef5bc05cf5..138a8b362706 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -113,14 +113,6 @@ static void nf_skb_free(struct sk_buff *skb)
kfree_skb(NFCT_FRAG6_CB(skb)->orig);
}
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct sk_buff *skb)
-{
- atomic_sub(skb->truesize, &nf_init_frags.mem);
- nf_skb_free(skb);
- kfree_skb(skb);
-}
-
/* Destruction primitives. */
static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
@@ -282,66 +274,22 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
}
found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
- */
- if (prev) {
- int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
-
- if (i > 0) {
- offset += i;
- if (end <= offset) {
- pr_debug("overlap\n");
- goto err;
- }
- if (!pskb_pull(skb, i)) {
- pr_debug("Can't pull\n");
- goto err;
- }
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
-
- /* Look for overlap with succeeding segments.
- * If we can merge fragments, do it.
+ /* RFC5722, Section 4:
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments, including those not yet received) MUST be silently
+ * discarded.
*/
- while (next && NFCT_FRAG6_CB(next)->offset < end) {
- /* overlap is 'i' bytes */
- int i = end - NFCT_FRAG6_CB(next)->offset;
-
- if (i < next->len) {
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- pr_debug("Eat head of the overlapped parts.: %d", i);
- if (!pskb_pull(next, i))
- goto err;
- /* next fragment */
- NFCT_FRAG6_CB(next)->offset += i;
- fq->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragmnet is completely overridden with
- * new one drop it.
- */
- next = next->next;
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+ goto discard_fq;
- if (prev)
- prev->next = next;
- else
- fq->q.fragments = next;
-
- fq->q.meat -= free_it->len;
- frag_kfree_skb(free_it);
- }
- }
+ /* Look for overlap with succeeding segment. */
+ if (next && NFCT_FRAG6_CB(next)->offset < end)
+ goto discard_fq;
NFCT_FRAG6_CB(skb)->offset = offset;
@@ -371,6 +319,8 @@ found:
write_unlock(&nf_frags.lock);
return 0;
+discard_fq:
+ fq_kill(fq);
err:
return -1;
}
@@ -413,7 +363,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
- if (skb_has_frags(head)) {
+ if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 1fa3468f0f32..9bb936ae2452 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -25,28 +25,14 @@
#include <linux/spinlock.h>
#include <net/protocol.h>
-const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
-static DEFINE_SPINLOCK(inet6_proto_lock);
-
+const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS] __read_mostly;
int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
- int ret, hash = protocol & (MAX_INET_PROTOS - 1);
-
- spin_lock_bh(&inet6_proto_lock);
-
- if (inet6_protos[hash]) {
- ret = -1;
- } else {
- inet6_protos[hash] = prot;
- ret = 0;
- }
-
- spin_unlock_bh(&inet6_proto_lock);
+ int hash = protocol & (MAX_INET_PROTOS - 1);
- return ret;
+ return !cmpxchg(&inet6_protos[hash], NULL, prot) ? 0 : -1;
}
-
EXPORT_SYMBOL(inet6_add_protocol);
/*
@@ -57,20 +43,10 @@ int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
- spin_lock_bh(&inet6_proto_lock);
-
- if (inet6_protos[hash] != prot) {
- ret = -1;
- } else {
- inet6_protos[hash] = NULL;
- ret = 0;
- }
-
- spin_unlock_bh(&inet6_proto_lock);
+ ret = (cmpxchg(&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1;
synchronize_net();
return ret;
}
-
EXPORT_SYMBOL(inet6_del_protocol);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index e677937a07fc..45e6efb7f171 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -764,7 +764,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
return -EINVAL;
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
- return(-EAFNOSUPPORT);
+ return -EAFNOSUPPORT;
/* port is the proto value [0..255] carried in nexthdr */
proto = ntohs(sin6->sin6_port);
@@ -772,10 +772,10 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (!proto)
proto = inet->inet_num;
else if (proto != inet->inet_num)
- return(-EINVAL);
+ return -EINVAL;
if (proto > 255)
- return(-EINVAL);
+ return -EINVAL;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
@@ -985,7 +985,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
/* You may get strange result with a positive odd offset;
RFC2292bis agrees with me. */
if (val > 0 && (val&1))
- return(-EINVAL);
+ return -EINVAL;
if (val < 0) {
rp->checksum = 0;
} else {
@@ -997,7 +997,7 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
break;
default:
- return(-ENOPROTOOPT);
+ return -ENOPROTOOPT;
}
}
@@ -1190,7 +1190,7 @@ static int rawv6_init_sk(struct sock *sk)
default:
break;
}
- return(0);
+ return 0;
}
struct proto rawv6_prot = {
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 545c4141b755..c7ba3149633f 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -149,13 +149,6 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
}
EXPORT_SYMBOL(ip6_frag_match);
-/* Memory Tracking Functions. */
-static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
-{
- atomic_sub(skb->truesize, &nf->mem);
- kfree_skb(skb);
-}
-
void ip6_frag_init(struct inet_frag_queue *q, void *a)
{
struct frag_queue *fq = container_of(q, struct frag_queue, q);
@@ -346,58 +339,22 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
}
found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
+ /* RFC5722, Section 4:
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments, including those not yet received) MUST be silently
+ * discarded.
*/
- if (prev) {
- int i = (FRAG6_CB(prev)->offset + prev->len) - offset;
- if (i > 0) {
- offset += i;
- if (end <= offset)
- goto err;
- if (!pskb_pull(skb, i))
- goto err;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+ /* Check for overlap with preceding fragment. */
+ if (prev &&
+ (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+ goto discard_fq;
- /* Look for overlap with succeeding segments.
- * If we can merge fragments, do it.
- */
- while (next && FRAG6_CB(next)->offset < end) {
- int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */
-
- if (i < next->len) {
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- if (!pskb_pull(next, i))
- goto err;
- FRAG6_CB(next)->offset += i; /* next fragment */
- fq->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragment is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
- else
- fq->q.fragments = next;
-
- fq->q.meat -= free_it->len;
- frag_kfree_skb(fq->q.net, free_it);
- }
- }
+ /* Look for overlap with succeeding segment. */
+ if (next && FRAG6_CB(next)->offset < end)
+ goto discard_fq;
FRAG6_CB(skb)->offset = offset;
@@ -436,6 +393,8 @@ found:
write_unlock(&ip6_frags.lock);
return -1;
+discard_fq:
+ fq_kill(fq);
err:
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
@@ -499,7 +458,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
/* If the first fragment is fragmented itself, we split
* it to two chunks: the first with data and paged part
* and the second, holding only fragments. */
- if (skb_has_frags(head)) {
+ if (skb_has_frag_list(head)) {
struct sk_buff *clone;
int i, plen = 0;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8f2d0400cf8a..17e217933885 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -217,14 +217,14 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
static __inline__ int rt6_check_expired(const struct rt6_info *rt)
{
- return (rt->rt6i_flags & RTF_EXPIRES &&
- time_after(jiffies, rt->rt6i_expires));
+ return (rt->rt6i_flags & RTF_EXPIRES) &&
+ time_after(jiffies, rt->rt6i_expires);
}
static inline int rt6_need_strict(struct in6_addr *daddr)
{
- return (ipv6_addr_type(daddr) &
- (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK));
+ return ipv6_addr_type(daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
/*
@@ -440,7 +440,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
__func__, match);
net = dev_net(rt0->rt6i_dev);
- return (match ? match : net->ipv6.ip6_null_entry);
+ return match ? match : net->ipv6.ip6_null_entry;
}
#ifdef CONFIG_IPV6_ROUTE_INFO
@@ -670,7 +670,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
if (net_ratelimit())
printk(KERN_WARNING
- "Neighbour table overflow.\n");
+ "ipv6: Neighbour table overflow.\n");
dst_free(&rt->dst);
return NULL;
}
@@ -859,7 +859,7 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
dst_release(*dstp);
*dstp = new;
- return (new ? 0 : -ENOMEM);
+ return new ? 0 : -ENOMEM;
}
EXPORT_SYMBOL_GPL(ip6_dst_blackhole);
@@ -1070,7 +1070,7 @@ static int ip6_dst_gc(struct dst_ops *ops)
net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
out:
net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
- return (atomic_read(&ops->entries) > rt_max_size);
+ return atomic_read(&ops->entries) > rt_max_size;
}
/* Clean host part of a prefix. Not necessary in radix tree,
@@ -1169,6 +1169,8 @@ int ip6_route_add(struct fib6_config *cfg)
if (addr_type & IPV6_ADDR_MULTICAST)
rt->dst.input = ip6_mc_input;
+ else if (cfg->fc_flags & RTF_LOCAL)
+ rt->dst.input = ip6_input;
else
rt->dst.input = ip6_forward;
@@ -1190,7 +1192,8 @@ int ip6_route_add(struct fib6_config *cfg)
they would result in kernel looping; promote them to reject routes
*/
if ((cfg->fc_flags & RTF_REJECT) ||
- (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
+ (dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK)
+ && !(cfg->fc_flags&RTF_LOCAL))) {
/* hold loopback dev/idev if we haven't done so. */
if (dev != net->loopback_dev) {
if (dev) {
@@ -1556,14 +1559,13 @@ out:
* i.e. Path MTU discovery
*/
-void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
- struct net_device *dev, u32 pmtu)
+static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr,
+ struct net *net, u32 pmtu, int ifindex)
{
struct rt6_info *rt, *nrt;
- struct net *net = dev_net(dev);
int allfrag = 0;
- rt = rt6_lookup(net, daddr, saddr, dev->ifindex, 0);
+ rt = rt6_lookup(net, daddr, saddr, ifindex, 0);
if (rt == NULL)
return;
@@ -1631,6 +1633,27 @@ out:
dst_release(&rt->dst);
}
+void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
+ struct net_device *dev, u32 pmtu)
+{
+ struct net *net = dev_net(dev);
+
+ /*
+ * RFC 1981 states that a node "MUST reduce the size of the packets it
+ * is sending along the path" that caused the Packet Too Big message.
+ * Since it's not possible in the general case to determine which
+ * interface was used to send the original packet, we update the MTU
+ * on the interface that will be used to send future packets. We also
+ * update the MTU on the interface that received the Packet Too Big in
+ * case the original packet was forced out that interface with
+ * SO_BINDTODEVICE or similar. This is the next best thing to the
+ * correct behaviour, which would be to update the MTU on all
+ * interfaces.
+ */
+ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, 0);
+ rt6_do_pmtu_disc(daddr, saddr, net, pmtu, dev->ifindex);
+}
+
/*
* Misc support functions
*/
@@ -2082,6 +2105,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rtm->rtm_type == RTN_UNREACHABLE)
cfg->fc_flags |= RTF_REJECT;
+ if (rtm->rtm_type == RTN_LOCAL)
+ cfg->fc_flags |= RTF_LOCAL;
+
cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
cfg->fc_nlinfo.nlh = nlh;
cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
@@ -2202,6 +2228,8 @@ static int rt6_fill_node(struct net *net,
NLA_PUT_U32(skb, RTA_TABLE, table);
if (rt->rt6i_flags&RTF_REJECT)
rtm->rtm_type = RTN_UNREACHABLE;
+ else if (rt->rt6i_flags&RTF_LOCAL)
+ rtm->rtm_type = RTN_LOCAL;
else if (rt->rt6i_dev && (rt->rt6i_dev->flags&IFF_LOOPBACK))
rtm->rtm_type = RTN_LOCAL;
else
@@ -2580,7 +2608,7 @@ ctl_table ipv6_route_table_template[] = {
.data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec,
},
{
.procname = "mtu_expires",
@@ -2594,7 +2622,7 @@ ctl_table ipv6_route_table_template[] = {
.data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
+ .proc_handler = proc_dointvec,
},
{
.procname = "gc_min_interval_ms",
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 4699cd3c3118..367a6cc584cc 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -63,36 +63,63 @@
#define HASH_SIZE 16
#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
-static void ipip6_tunnel_init(struct net_device *dev);
+static int ipip6_tunnel_init(struct net_device *dev);
static void ipip6_tunnel_setup(struct net_device *dev);
+static void ipip6_dev_free(struct net_device *dev);
static int sit_net_id __read_mostly;
struct sit_net {
- struct ip_tunnel *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel *tunnels_r[HASH_SIZE];
- struct ip_tunnel *tunnels_l[HASH_SIZE];
- struct ip_tunnel *tunnels_wc[1];
- struct ip_tunnel **tunnels[4];
+ struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
+ struct ip_tunnel __rcu *tunnels_wc[1];
+ struct ip_tunnel __rcu **tunnels[4];
struct net_device *fb_tunnel_dev;
};
/*
- * Locking : hash tables are protected by RCU and a spinlock
+ * Locking : hash tables are protected by RCU and RTNL
*/
-static DEFINE_SPINLOCK(ipip6_lock);
#define for_each_ip_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
+/* often modified stats are per cpu, other are shared (netdev->stats) */
+struct pcpu_tstats {
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+};
+
+static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
+{
+ struct pcpu_tstats sum = { 0 };
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
+
+ sum.rx_packets += tstats->rx_packets;
+ sum.rx_bytes += tstats->rx_bytes;
+ sum.tx_packets += tstats->tx_packets;
+ sum.tx_bytes += tstats->tx_bytes;
+ }
+ dev->stats.rx_packets = sum.rx_packets;
+ dev->stats.rx_bytes = sum.rx_bytes;
+ dev->stats.tx_packets = sum.tx_packets;
+ dev->stats.tx_bytes = sum.tx_bytes;
+ return &dev->stats;
+}
/*
* Must be invoked with rcu_read_lock
*/
static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
struct net_device *dev, __be32 remote, __be32 local)
{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
+ unsigned int h0 = HASH(remote);
+ unsigned int h1 = HASH(local);
struct ip_tunnel *t;
struct sit_net *sitn = net_generic(net, sit_net_id);
@@ -121,12 +148,12 @@ static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net,
return NULL;
}
-static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn,
+static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn,
struct ip_tunnel_parm *parms)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- unsigned h = 0;
+ unsigned int h = 0;
int prio = 0;
if (remote) {
@@ -140,7 +167,7 @@ static struct ip_tunnel **__ipip6_bucket(struct sit_net *sitn,
return &sitn->tunnels[prio][h];
}
-static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn,
+static inline struct ip_tunnel __rcu **ipip6_bucket(struct sit_net *sitn,
struct ip_tunnel *t)
{
return __ipip6_bucket(sitn, &t->parms);
@@ -148,13 +175,14 @@ static inline struct ip_tunnel **ipip6_bucket(struct sit_net *sitn,
static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp;
-
- for (tp = ipip6_bucket(sitn, t); *tp; tp = &(*tp)->next) {
- if (t == *tp) {
- spin_lock_bh(&ipip6_lock);
- *tp = t->next;
- spin_unlock_bh(&ipip6_lock);
+ struct ip_tunnel __rcu **tp;
+ struct ip_tunnel *iter;
+
+ for (tp = ipip6_bucket(sitn, t);
+ (iter = rtnl_dereference(*tp)) != NULL;
+ tp = &iter->next) {
+ if (t == iter) {
+ rcu_assign_pointer(*tp, t->next);
break;
}
}
@@ -162,12 +190,10 @@ static void ipip6_tunnel_unlink(struct sit_net *sitn, struct ip_tunnel *t)
static void ipip6_tunnel_link(struct sit_net *sitn, struct ip_tunnel *t)
{
- struct ip_tunnel **tp = ipip6_bucket(sitn, t);
+ struct ip_tunnel __rcu **tp = ipip6_bucket(sitn, t);
- spin_lock_bh(&ipip6_lock);
- t->next = *tp;
+ rcu_assign_pointer(t->next, rtnl_dereference(*tp));
rcu_assign_pointer(*tp, t);
- spin_unlock_bh(&ipip6_lock);
}
static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
@@ -187,17 +213,20 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
#endif
}
-static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
+static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
struct ip_tunnel_parm *parms, int create)
{
__be32 remote = parms->iph.daddr;
__be32 local = parms->iph.saddr;
- struct ip_tunnel *t, **tp, *nt;
+ struct ip_tunnel *t, *nt;
+ struct ip_tunnel __rcu **tp;
struct net_device *dev;
char name[IFNAMSIZ];
struct sit_net *sitn = net_generic(net, sit_net_id);
- for (tp = __ipip6_bucket(sitn, parms); (t = *tp) != NULL; tp = &t->next) {
+ for (tp = __ipip6_bucket(sitn, parms);
+ (t = rtnl_dereference(*tp)) != NULL;
+ tp = &t->next) {
if (local == t->parms.iph.saddr &&
remote == t->parms.iph.daddr &&
parms->link == t->parms.link) {
@@ -213,7 +242,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
if (parms->name[0])
strlcpy(name, parms->name, IFNAMSIZ);
else
- sprintf(name, "sit%%d");
+ strcpy(name, "sit%d");
dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
if (dev == NULL)
@@ -229,7 +258,8 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
nt = netdev_priv(dev);
nt->parms = *parms;
- ipip6_tunnel_init(dev);
+ if (ipip6_tunnel_init(dev) < 0)
+ goto failed_free;
ipip6_tunnel_clone_6rd(dev, sitn);
if (parms->i_flags & SIT_ISATAP)
@@ -244,7 +274,7 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct net *net,
return nt;
failed_free:
- free_netdev(dev);
+ ipip6_dev_free(dev);
failed:
return NULL;
}
@@ -340,7 +370,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
ASSERT_RTNL();
- for (p = t->prl; p; p = p->next) {
+ for (p = rtnl_dereference(t->prl); p; p = rtnl_dereference(p->next)) {
if (p->addr == a->addr) {
if (chg) {
p->flags = a->flags;
@@ -451,15 +481,12 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
struct sit_net *sitn = net_generic(net, sit_net_id);
if (dev == sitn->fb_tunnel_dev) {
- spin_lock_bh(&ipip6_lock);
- sitn->tunnels_wc[0] = NULL;
- spin_unlock_bh(&ipip6_lock);
- dev_put(dev);
+ rcu_assign_pointer(sitn->tunnels_wc[0], NULL);
} else {
ipip6_tunnel_unlink(sitn, netdev_priv(dev));
ipip6_tunnel_del_prl(netdev_priv(dev), NULL);
- dev_put(dev);
}
+ dev_put(dev);
}
@@ -548,6 +575,8 @@ static int ipip6_rcv(struct sk_buff *skb)
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
if (tunnel != NULL) {
+ struct pcpu_tstats *tstats;
+
secpath_reset(skb);
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
@@ -563,10 +592,16 @@ static int ipip6_rcv(struct sk_buff *skb)
return 0;
}
- skb_tunnel_rx(skb, tunnel->dev);
+ tstats = this_cpu_ptr(tunnel->dev->tstats);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+
+ __skb_tunnel_rx(skb, tunnel->dev);
ipip6_ecn_decapsulate(iph, skb);
+
netif_rx(skb);
+
rcu_read_unlock();
return 0;
}
@@ -590,7 +625,7 @@ __be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel)
#ifdef CONFIG_IPV6_SIT_6RD
if (ipv6_prefix_equal(v6dst, &tunnel->ip6rd.prefix,
tunnel->ip6rd.prefixlen)) {
- unsigned pbw0, pbi0;
+ unsigned int pbw0, pbi0;
int pbi1;
u32 d;
@@ -625,14 +660,13 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct net_device_stats *stats = &dev->stats;
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct pcpu_tstats *tstats;
struct iphdr *tiph = &tunnel->parms.iph;
struct ipv6hdr *iph6 = ipv6_hdr(skb);
u8 tos = tunnel->parms.iph.tos;
__be16 df = tiph->frag_off;
struct rtable *rt; /* Route to the other host */
- struct net_device *tdev; /* Device to other host */
+ struct net_device *tdev; /* Device to other host */
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
__be32 dst = tiph->daddr;
@@ -703,20 +737,20 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
.oif = tunnel->parms.link,
.proto = IPPROTO_IPV6 };
if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
}
if (rt->rt_type != RTN_UNICAST) {
ip_rt_put(rt);
- stats->tx_carrier_errors++;
+ dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
- stats->collisions++;
+ dev->stats.collisions++;
goto tx_error;
}
@@ -724,7 +758,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
- stats->collisions++;
+ dev->stats.collisions++;
ip_rt_put(rt);
goto tx_error;
}
@@ -763,7 +797,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
if (!new_skb) {
ip_rt_put(rt);
- txq->tx_dropped++;
+ dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -799,14 +833,14 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
iph->ttl = iph6->hop_limit;
nf_reset(skb);
-
- IPTUNNEL_XMIT();
+ tstats = this_cpu_ptr(dev->tstats);
+ __IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
tx_error:
- stats->tx_errors++;
+ dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1083,12 +1117,19 @@ static const struct net_device_ops ipip6_netdev_ops = {
.ndo_start_xmit = ipip6_tunnel_xmit,
.ndo_do_ioctl = ipip6_tunnel_ioctl,
.ndo_change_mtu = ipip6_tunnel_change_mtu,
+ .ndo_get_stats = ipip6_get_stats,
};
+static void ipip6_dev_free(struct net_device *dev)
+{
+ free_percpu(dev->tstats);
+ free_netdev(dev);
+}
+
static void ipip6_tunnel_setup(struct net_device *dev)
{
dev->netdev_ops = &ipip6_netdev_ops;
- dev->destructor = free_netdev;
+ dev->destructor = ipip6_dev_free;
dev->type = ARPHRD_SIT;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -1098,9 +1139,10 @@ static void ipip6_tunnel_setup(struct net_device *dev)
dev->iflink = 0;
dev->addr_len = 4;
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_LLTX;
}
-static void ipip6_tunnel_init(struct net_device *dev)
+static int ipip6_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
@@ -1111,9 +1153,14 @@ static void ipip6_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip6_tunnel_bind_dev(dev);
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
}
-static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
+static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
@@ -1128,11 +1175,15 @@ static void __net_init ipip6_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
iph->ttl = 64;
+ dev->tstats = alloc_percpu(struct pcpu_tstats);
+ if (!dev->tstats)
+ return -ENOMEM;
dev_hold(dev);
sitn->tunnels_wc[0] = tunnel;
+ return 0;
}
-static struct xfrm_tunnel sit_handler = {
+static struct xfrm_tunnel sit_handler __read_mostly = {
.handler = ipip6_rcv,
.err_handler = ipip6_err,
.priority = 1,
@@ -1173,7 +1224,10 @@ static int __net_init sit_init_net(struct net *net)
}
dev_net_set(sitn->fb_tunnel_dev, net);
- ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+ err = ipip6_fb_tunnel_init(sitn->fb_tunnel_dev);
+ if (err)
+ goto err_dev_free;
+
ipip6_tunnel_clone_6rd(sitn->fb_tunnel_dev, sitn);
if ((err = register_netdev(sitn->fb_tunnel_dev)))
@@ -1183,7 +1237,8 @@ static int __net_init sit_init_net(struct net *net)
err_reg_dev:
dev_put(sitn->fb_tunnel_dev);
- free_netdev(sitn->fb_tunnel_dev);
+err_dev_free:
+ ipip6_dev_free(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index fe6d40418c0b..8d93f6d81979 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -139,7 +139,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
return -EINVAL;
if (usin->sin6_family != AF_INET6)
- return(-EAFNOSUPPORT);
+ return -EAFNOSUPPORT;
memset(&fl, 0, sizeof(fl));
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index fc3c86a47452..d9864725d0c6 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -30,8 +30,8 @@
#include <net/protocol.h>
#include <net/xfrm.h>
-static struct xfrm6_tunnel *tunnel6_handlers;
-static struct xfrm6_tunnel *tunnel46_handlers;
+static struct xfrm6_tunnel *tunnel6_handlers __read_mostly;
+static struct xfrm6_tunnel *tunnel46_handlers __read_mostly;
static DEFINE_MUTEX(tunnel6_mutex);
int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
@@ -51,7 +51,7 @@ int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family)
}
handler->next = *pprev;
- *pprev = handler;
+ rcu_assign_pointer(*pprev, handler);
ret = 0;
@@ -88,6 +88,11 @@ int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family)
EXPORT_SYMBOL(xfrm6_tunnel_deregister);
+#define for_each_tunnel_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next)) \
+
static int tunnel6_rcv(struct sk_buff *skb)
{
struct xfrm6_tunnel *handler;
@@ -95,7 +100,7 @@ static int tunnel6_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
- for (handler = tunnel6_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -113,7 +118,7 @@ static int tunnel46_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
- for (handler = tunnel46_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel46_handlers, handler)
if (!handler->handler(skb))
return 0;
@@ -129,7 +134,7 @@ static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
{
struct xfrm6_tunnel *handler;
- for (handler = tunnel6_handlers; handler; handler = handler->next)
+ for_each_tunnel_rcu(tunnel6_handlers, handler)
if (!handler->err_handler(skb, opt, type, code, offset, info))
break;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 1dd1affdead2..5acb3560ff15 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -111,6 +111,15 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
+static void udp_v6_rehash(struct sock *sk)
+{
+ u16 new_hash = udp6_portaddr_hash(sock_net(sk),
+ &inet6_sk(sk)->rcv_saddr,
+ inet_sk(sk)->inet_num);
+
+ udp_lib_rehash(sk, new_hash);
+}
+
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
struct in6_addr *saddr, __be16 sport,
@@ -1447,6 +1456,7 @@ struct proto udpv6_prot = {
.backlog_rcv = udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
+ .rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 6baeabbbca82..39676eac3a37 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -199,7 +199,7 @@ static inline int xfrm6_garbage_collect(struct dst_ops *ops)
struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
xfrm6_policy_afinfo.garbage_collect(net);
- return (atomic_read(&ops->entries) > ops->gc_thresh * 2);
+ return atomic_read(&ops->entries) > ops->gc_thresh * 2;
}
static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu)
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index f417b77fa0e1..a67575d472a3 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -20,23 +20,27 @@
#include <net/addrconf.h>
static void
-__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr)
+__xfrm6_init_tempsel(struct xfrm_selector *sel, struct flowi *fl)
{
/* Initialize temporary selector matching only
* to current session. */
- ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
- ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
- x->sel.dport = xfrm_flowi_dport(fl);
- x->sel.dport_mask = htons(0xffff);
- x->sel.sport = xfrm_flowi_sport(fl);
- x->sel.sport_mask = htons(0xffff);
- x->sel.family = AF_INET6;
- x->sel.prefixlen_d = 128;
- x->sel.prefixlen_s = 128;
- x->sel.proto = fl->proto;
- x->sel.ifindex = fl->oif;
+ ipv6_addr_copy((struct in6_addr *)&sel->daddr, &fl->fl6_dst);
+ ipv6_addr_copy((struct in6_addr *)&sel->saddr, &fl->fl6_src);
+ sel->dport = xfrm_flowi_dport(fl);
+ sel->dport_mask = htons(0xffff);
+ sel->sport = xfrm_flowi_sport(fl);
+ sel->sport_mask = htons(0xffff);
+ sel->family = AF_INET6;
+ sel->prefixlen_d = 128;
+ sel->prefixlen_s = 128;
+ sel->proto = fl->proto;
+ sel->ifindex = fl->oif;
+}
+
+static void
+xfrm6_init_temprop(struct xfrm_state *x, struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr)
+{
x->id = tmpl->id;
if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
@@ -168,6 +172,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
.eth_proto = htons(ETH_P_IPV6),
.owner = THIS_MODULE,
.init_tempsel = __xfrm6_init_tempsel,
+ .init_temprop = xfrm6_init_temprop,
.tmpl_sort = __xfrm6_tmpl_sort,
.state_sort = __xfrm6_state_sort,
.output = xfrm6_output,
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2ce3a8278f26..ac7584b946a5 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -317,13 +317,13 @@ static const struct xfrm_type xfrm6_tunnel_type = {
.output = xfrm6_tunnel_output,
};
-static struct xfrm6_tunnel xfrm6_tunnel_handler = {
+static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
};
-static struct xfrm6_tunnel xfrm46_tunnel_handler = {
+static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = {
.handler = xfrm6_tunnel_rcv,
.err_handler = xfrm6_tunnel_err,
.priority = 2,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 79986a674f6e..bf3635129b17 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -573,9 +573,9 @@ static int irda_find_lsap_sel(struct irda_sock *self, char *name)
/* Requested object/attribute doesn't exist */
if((self->errno == IAS_CLASS_UNKNOWN) ||
(self->errno == IAS_ATTRIB_UNKNOWN))
- return (-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
else
- return (-EHOSTUNREACH);
+ return -EHOSTUNREACH;
}
/* Get the remote TSAP selector */
@@ -663,7 +663,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
__func__, name);
self->daddr = DEV_ADDR_ANY;
kfree(discoveries);
- return(-ENOTUNIQ);
+ return -ENOTUNIQ;
}
/* First time we found that one, save it ! */
daddr = self->daddr;
@@ -677,7 +677,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__);
self->daddr = DEV_ADDR_ANY;
kfree(discoveries);
- return(-EHOSTUNREACH);
+ return -EHOSTUNREACH;
break;
}
}
@@ -689,7 +689,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n",
__func__, name);
self->daddr = DEV_ADDR_ANY;
- return(-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
}
/* Revert back to discovered device & service */
@@ -824,8 +824,8 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name);
if (err < 0) {
- kfree(self->ias_obj->name);
- kfree(self->ias_obj);
+ irias_delete_object(self->ias_obj);
+ self->ias_obj = NULL;
goto out;
}
@@ -2465,9 +2465,9 @@ bed:
/* Requested object/attribute doesn't exist */
if((self->errno == IAS_CLASS_UNKNOWN) ||
(self->errno == IAS_ATTRIB_UNKNOWN))
- return (-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
else
- return (-EHOSTUNREACH);
+ return -EHOSTUNREACH;
}
/* Translate from internal to user structure */
diff --git a/net/irda/discovery.c b/net/irda/discovery.c
index c1c8ae939126..36c3f037f172 100644
--- a/net/irda/discovery.c
+++ b/net/irda/discovery.c
@@ -315,7 +315,7 @@ struct irda_device_info *irlmp_copy_discoveries(hashbin_t *log, int *pn,
/* Get the actual number of device in the buffer and return */
*pn = i;
- return(buffer);
+ return buffer;
}
#ifdef CONFIG_PROC_FS
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index faa82ca2dfdc..a39cca8331df 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -449,8 +449,8 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
}
#ifdef SERIAL_DO_RESTART
- return ((self->flags & ASYNC_HUP_NOTIFY) ?
- -EAGAIN : -ERESTARTSYS);
+ return (self->flags & ASYNC_HUP_NOTIFY) ?
+ -EAGAIN : -ERESTARTSYS;
#else
return -EAGAIN;
#endif
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index a788f9e9427d..6130f9d9dbe1 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -1102,7 +1102,7 @@ int irlan_extract_param(__u8 *buf, char *name, char *value, __u16 *len)
memcpy(&val_len, buf+n, 2); /* To avoid alignment problems */
le16_to_cpus(&val_len); n+=2;
- if (val_len > 1016) {
+ if (val_len >= 1016) {
IRDA_DEBUG(2, "%s(), parameter length to long\n", __func__ );
return -RSP_INVALID_COMMAND_FORMAT;
}
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 9616c32d1076..8ee1ff6c742f 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -45,13 +45,11 @@ static int irlan_eth_close(struct net_device *dev);
static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
struct net_device *dev);
static void irlan_eth_set_multicast_list( struct net_device *dev);
-static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
static const struct net_device_ops irlan_eth_netdev_ops = {
.ndo_open = irlan_eth_open,
.ndo_stop = irlan_eth_close,
.ndo_start_xmit = irlan_eth_xmit,
- .ndo_get_stats = irlan_eth_get_stats,
.ndo_set_multicast_list = irlan_eth_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
@@ -169,6 +167,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
{
struct irlan_cb *self = netdev_priv(dev);
int ret;
+ unsigned int len;
/* skb headroom large enough to contain all IrDA-headers? */
if ((skb_headroom(skb) < self->max_header_size) || (skb_shared(skb))) {
@@ -188,6 +187,7 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
dev->trans_start = jiffies;
+ len = skb->len;
/* Now queue the packet in the transport layer */
if (self->use_udata)
ret = irttp_udata_request(self->tsap_data, skb);
@@ -206,10 +206,10 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
* tried :-) DB
*/
/* irttp_data_request already free the packet */
- self->stats.tx_dropped++;
+ dev->stats.tx_dropped++;
} else {
- self->stats.tx_packets++;
- self->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
}
return NETDEV_TX_OK;
@@ -224,15 +224,16 @@ static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
{
struct irlan_cb *self = instance;
+ struct net_device *dev = self->dev;
if (skb == NULL) {
- ++self->stats.rx_dropped;
+ dev->stats.rx_dropped++;
return 0;
}
if (skb->len < ETH_HLEN) {
IRDA_DEBUG(0, "%s() : IrLAN frame too short (%d)\n",
__func__, skb->len);
- ++self->stats.rx_dropped;
+ dev->stats.rx_dropped++;
dev_kfree_skb(skb);
return 0;
}
@@ -242,10 +243,10 @@ int irlan_eth_receive(void *instance, void *sap, struct sk_buff *skb)
* might have been previously set by the low level IrDA network
* device driver
*/
- skb->protocol = eth_type_trans(skb, self->dev); /* Remove eth header */
+ skb->protocol = eth_type_trans(skb, dev); /* Remove eth header */
- self->stats.rx_packets++;
- self->stats.rx_bytes += skb->len;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
netif_rx(skb); /* Eat it! */
@@ -346,16 +347,3 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
else
irlan_set_broadcast_filter(self, FALSE);
}
-
-/*
- * Function irlan_get_stats (dev)
- *
- * Get the current statistics for this device
- *
- */
-static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev)
-{
- struct irlan_cb *self = netdev_priv(dev);
-
- return &self->stats;
-}
diff --git a/net/irda/irlan/irlan_event.c b/net/irda/irlan/irlan_event.c
index cbcb4eb54037..43f16040a6fe 100644
--- a/net/irda/irlan/irlan_event.c
+++ b/net/irda/irlan/irlan_event.c
@@ -24,7 +24,7 @@
#include <net/irda/irlan_event.h>
-char *irlan_state[] = {
+const char * const irlan_state[] = {
"IRLAN_IDLE",
"IRLAN_QUERY",
"IRLAN_CONN",
diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c
index 0e7d8bde145d..6115a44c0a24 100644
--- a/net/irda/irlmp.c
+++ b/net/irda/irlmp.c
@@ -939,7 +939,7 @@ struct irda_device_info *irlmp_get_discoveries(int *pn, __u16 mask, int nslots)
}
/* Return current cached discovery log */
- return(irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE));
+ return irlmp_copy_discoveries(irlmp->cachelog, pn, mask, TRUE);
}
EXPORT_SYMBOL(irlmp_get_discoveries);
diff --git a/net/irda/irlmp_frame.c b/net/irda/irlmp_frame.c
index 3750884094da..062e63b1c5c4 100644
--- a/net/irda/irlmp_frame.c
+++ b/net/irda/irlmp_frame.c
@@ -448,7 +448,7 @@ static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
(self->cache.slsap_sel == slsap_sel) &&
(self->cache.dlsap_sel == dlsap_sel))
{
- return (self->cache.lsap);
+ return self->cache.lsap;
}
#endif
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c
index e98e40d76f4f..7f17a8020e8a 100644
--- a/net/irda/irnet/irnet_irda.c
+++ b/net/irda/irnet/irnet_irda.c
@@ -238,7 +238,7 @@ irnet_ias_to_tsap(irnet_socket * self,
DEXIT(IRDA_SR_TRACE, "\n");
/* Return the TSAP */
- return(dtsap_sel);
+ return dtsap_sel;
}
/*------------------------------------------------------------------*/
@@ -301,7 +301,7 @@ irnet_connect_tsap(irnet_socket * self)
{
clear_bit(0, &self->ttp_connect);
DERROR(IRDA_SR_ERROR, "connect aborted!\n");
- return(err);
+ return err;
}
/* Connect to remote device */
@@ -312,7 +312,7 @@ irnet_connect_tsap(irnet_socket * self)
{
clear_bit(0, &self->ttp_connect);
DERROR(IRDA_SR_ERROR, "connect aborted!\n");
- return(err);
+ return err;
}
/* The above call is non-blocking.
@@ -321,7 +321,7 @@ irnet_connect_tsap(irnet_socket * self)
* See you there ;-) */
DEXIT(IRDA_SR_TRACE, "\n");
- return(err);
+ return err;
}
/*------------------------------------------------------------------*/
@@ -362,10 +362,10 @@ irnet_discover_next_daddr(irnet_socket * self)
/* The above request is non-blocking.
* After a while, IrDA will call us back in irnet_discovervalue_confirm()
* We will then call irnet_ias_to_tsap() and come back here again... */
- return(0);
+ return 0;
}
else
- return(1);
+ return 1;
}
/*------------------------------------------------------------------*/
@@ -436,7 +436,7 @@ irnet_discover_daddr_and_lsap_sel(irnet_socket * self)
/* Follow me in irnet_discovervalue_confirm() */
DEXIT(IRDA_SR_TRACE, "\n");
- return(0);
+ return 0;
}
/*------------------------------------------------------------------*/
@@ -485,7 +485,7 @@ irnet_dname_to_daddr(irnet_socket * self)
/* No luck ! */
DEBUG(IRDA_SR_INFO, "cannot discover device ``%s'' !!!\n", self->rname);
kfree(discoveries);
- return(-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
}
@@ -527,7 +527,7 @@ irda_irnet_create(irnet_socket * self)
INIT_WORK(&self->disconnect_work, irnet_ppp_disconnect);
DEXIT(IRDA_SOCK_TRACE, "\n");
- return(0);
+ return 0;
}
/*------------------------------------------------------------------*/
@@ -601,7 +601,7 @@ irda_irnet_connect(irnet_socket * self)
* We will finish the connection procedure in irnet_connect_tsap().
*/
DEXIT(IRDA_SOCK_TRACE, "\n");
- return(0);
+ return 0;
}
/*------------------------------------------------------------------*/
@@ -733,7 +733,7 @@ irnet_daddr_to_dname(irnet_socket * self)
/* No luck ! */
DEXIT(IRDA_SERV_INFO, ": cannot discover device 0x%08x !!!\n", self->daddr);
kfree(discoveries);
- return(-EADDRNOTAVAIL);
+ return -EADDRNOTAVAIL;
}
/*------------------------------------------------------------------*/
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index dfe7b38dd4af..69f1fa64994e 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -166,7 +166,7 @@ irnet_ctrl_write(irnet_socket * ap,
}
/* Success : we have parsed all commands successfully */
- return(count);
+ return count;
}
#ifdef INITIAL_DISCOVERY
@@ -300,7 +300,7 @@ irnet_ctrl_read(irnet_socket * ap,
}
DEXIT(CTRL_TRACE, "\n");
- return(strlen(event));
+ return strlen(event);
}
#endif /* INITIAL_DISCOVERY */
@@ -409,7 +409,7 @@ irnet_ctrl_read(irnet_socket * ap,
}
DEXIT(CTRL_TRACE, "\n");
- return(strlen(event));
+ return strlen(event);
}
/*------------------------------------------------------------------*/
@@ -623,7 +623,7 @@ dev_irnet_poll(struct file * file,
mask |= irnet_ctrl_poll(ap, file, wait);
DEXIT(FS_TRACE, " - mask=0x%X\n", mask);
- return(mask);
+ return mask;
}
/*------------------------------------------------------------------*/
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index b5df2418f90c..940225866da0 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -103,7 +103,8 @@ static const struct file_operations irnet_device_fops =
.poll = dev_irnet_poll,
.unlocked_ioctl = dev_irnet_ioctl,
.open = dev_irnet_open,
- .release = dev_irnet_close
+ .release = dev_irnet_close,
+ .llseek = noop_llseek,
/* Also : llseek, readdir, mmap, flush, fsync, fasync, lock, readv, writev */
};
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 43040e97c474..d87c22df6f1e 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -565,12 +565,12 @@ pfkey_proto2satype(uint16_t proto)
static uint8_t pfkey_proto_to_xfrm(uint8_t proto)
{
- return (proto == IPSEC_PROTO_ANY ? 0 : proto);
+ return proto == IPSEC_PROTO_ANY ? 0 : proto;
}
static uint8_t pfkey_proto_from_xfrm(uint8_t proto)
{
- return (proto ? proto : IPSEC_PROTO_ANY);
+ return proto ? proto : IPSEC_PROTO_ANY;
}
static inline int pfkey_sockaddr_len(sa_family_t family)
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 58c6c4cda73b..8d9ce0accc98 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
printk("\n");
}
- if (data_len < ETH_HLEN)
+ if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
goto error;
secpath_reset(skb);
@@ -144,7 +144,6 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
nf_reset(skb);
if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += data_len;
} else
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index ff954b3e94b6..39a21d0c61c4 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1768,7 +1768,7 @@ static const struct proto_ops pppol2tp_ops = {
.ioctl = pppox_ioctl,
};
-static struct pppox_proto pppol2tp_proto = {
+static const struct pppox_proto pppol2tp_proto = {
.create = pppol2tp_create,
.ioctl = pppol2tp_ioctl
};
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 023ba820236f..582612998211 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
{
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- int rc = -EINVAL, opt;
+ unsigned int opt;
+ int rc = -EINVAL;
lock_sock(sk);
if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c
index e4dae0244d76..cf4aea3ba30f 100644
--- a/net/llc/llc_station.c
+++ b/net/llc/llc_station.c
@@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb)
int __init llc_station_init(void)
{
- u16 rc = -ENOBUFS;
+ int rc = -ENOBUFS;
struct sk_buff *skb;
struct llc_station_state_ev *ev;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 915ecf87e4ac..eb0f59977676 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -369,7 +369,7 @@ static int ieee80211_ifa_changed(struct notifier_block *nb,
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return NOTIFY_DONE;
- idev = sdata->dev->ip_ptr;
+ idev = __in_dev_get_rtnl(sdata->dev);
if (!idev)
return NOTIFY_DONE;
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index f77a45625c0b..b0cc385bf989 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -103,6 +103,7 @@ ieee80211_rate_control_ops_get(const char *name)
struct rate_control_ops *ops;
const char *alg_name;
+ kparam_block_sysfs_write(ieee80211_default_rc_algo);
if (!name)
alg_name = ieee80211_default_rc_algo;
else
@@ -120,6 +121,7 @@ ieee80211_rate_control_ops_get(const char *name)
/* try built-in one if specific alg requested but not found */
if (!ops && strlen(CONFIG_MAC80211_RC_DEFAULT))
ops = ieee80211_try_rate_control_ops_get(CONFIG_MAC80211_RC_DEFAULT);
+ kparam_unblock_sysfs_write(ieee80211_default_rc_algo);
return ops;
}
@@ -205,7 +207,7 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
fc = hdr->frame_control;
- return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
+ return (info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc);
}
static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4f8ddba48011..0c043b6ce65e 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -40,6 +40,7 @@
#include <net/udp.h>
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h>
+#include <net/ip6_checksum.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
@@ -637,10 +638,12 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
}
/* And finally the ICMP checksum */
- icmph->icmp6_cksum = 0;
- /* TODO IPv6: is this correct for ICMPv6? */
- ip_vs_checksum_complete(skb, icmp_offset);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ icmph->icmp6_cksum = ~csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ skb->len - icmp_offset,
+ IPPROTO_ICMPV6, 0);
+ skb->csum_start = skb_network_header(skb) - skb->head + icmp_offset;
+ skb->csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
+ skb->ip_summed = CHECKSUM_PARTIAL;
if (inout)
IP_VS_DBG_PKT(11, pp, skb, (void *)ciph - (void *)iph,
@@ -924,6 +927,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
ip_vs_out_stats(cp, skb);
ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pp);
+ ip_vs_update_conntrack(skb, cp, 0);
ip_vs_conn_put(cp);
skb->ipvs_property = 1;
@@ -1380,8 +1384,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
if (af == AF_INET && (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
cp->protocol == IPPROTO_SCTP) {
if ((cp->state == IP_VS_SCTP_S_ESTABLISHED &&
- (atomic_read(&cp->in_pkts) %
- sysctl_ip_vs_sync_threshold[1]
+ (pkts % sysctl_ip_vs_sync_threshold[1]
== sysctl_ip_vs_sync_threshold[0])) ||
(cp->old_state != cp->state &&
((cp->state == IP_VS_SCTP_S_CLOSED) ||
@@ -1392,7 +1395,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
}
}
- if (af == AF_INET &&
+ /* Keep this block last: TCP and others with pp->num_states <= 1 */
+ else if (af == AF_INET &&
(ip_vs_sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
cp->state == IP_VS_TCP_S_ESTABLISHED) &&
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 0f0c079c422a..ca8ec8c4f311 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -61,7 +61,7 @@ static DEFINE_RWLOCK(__ip_vs_svc_lock);
static DEFINE_RWLOCK(__ip_vs_rs_lock);
/* lock for state and timeout tables */
-static DEFINE_RWLOCK(__ip_vs_securetcp_lock);
+static DEFINE_SPINLOCK(ip_vs_securetcp_lock);
/* lock for drop entry handling */
static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
@@ -204,7 +204,7 @@ static void update_defense_level(void)
spin_unlock(&__ip_vs_droppacket_lock);
/* secure_tcp */
- write_lock(&__ip_vs_securetcp_lock);
+ spin_lock(&ip_vs_securetcp_lock);
switch (sysctl_ip_vs_secure_tcp) {
case 0:
if (old_secure_tcp >= 2)
@@ -238,7 +238,7 @@ static void update_defense_level(void)
old_secure_tcp = sysctl_ip_vs_secure_tcp;
if (to_change >= 0)
ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
- write_unlock(&__ip_vs_securetcp_lock);
+ spin_unlock(&ip_vs_securetcp_lock);
local_bh_enable();
}
@@ -843,7 +843,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
return -EINVAL;
}
- dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
+ dest = kzalloc(sizeof(struct ip_vs_dest), GFP_KERNEL);
if (dest == NULL) {
pr_err("%s(): no memory.\n", __func__);
return -ENOMEM;
@@ -1177,7 +1177,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
}
#endif
- svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
+ svc = kzalloc(sizeof(struct ip_vs_service), GFP_KERNEL);
if (svc == NULL) {
IP_VS_DBG(1, "%s(): no memory\n", __func__);
ret = -ENOMEM;
@@ -2155,7 +2155,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (cmd != IP_VS_SO_SET_ADD
&& (svc == NULL || svc->protocol != usvc.protocol)) {
ret = -ESRCH;
- goto out_unlock;
+ goto out_drop_service;
}
switch (cmd) {
@@ -2189,6 +2189,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
ret = -EINVAL;
}
+out_drop_service:
if (svc)
ip_vs_service_put(svc);
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index f228a17ec649..7e9af5b76d9e 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -45,6 +45,7 @@
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_helper.h>
#include <linux/gfp.h>
#include <net/protocol.h>
@@ -359,7 +360,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
buf_len = strlen(buf);
ct = nf_ct_get(skb, &ctinfo);
- if (ct && !nf_ct_is_untracked(ct)) {
+ if (ct && !nf_ct_is_untracked(ct) && nfct_nat(ct)) {
/* If mangling fails this function will return 0
* which will cause the packet to be dropped.
* Mangling can only fail under memory pressure,
@@ -409,7 +410,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
union nf_inet_addr to;
__be16 port;
struct ip_vs_conn *n_cp;
- struct nf_conn *ct;
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
@@ -496,11 +496,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
ip_vs_control_add(n_cp, cp);
}
- ct = (struct nf_conn *)skb->nfct;
- if (ct && ct != &nf_conntrack_untracked)
- ip_vs_expect_related(skb, ct, n_cp,
- IPPROTO_TCP, &n_cp->dport, 1);
-
/*
* Move tunnel to listen state
*/
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 4c0855cb006e..2f982a4c4770 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -61,6 +61,7 @@ sctp_snat_handler(struct sk_buff *skb,
{
sctp_sctphdr_t *sctph;
unsigned int sctphoff;
+ struct sk_buff *iter;
__be32 crc32;
#ifdef CONFIG_IP_VS_IPV6
@@ -89,8 +90,8 @@ sctp_snat_handler(struct sk_buff *skb,
/* Calculate the checksum */
crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
- for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
- crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
+ skb_walk_frags(skb, iter)
+ crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
crc32);
crc32 = sctp_end_cksum(crc32);
sctph->checksum = crc32;
@@ -102,9 +103,9 @@ static int
sctp_dnat_handler(struct sk_buff *skb,
struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
{
-
sctp_sctphdr_t *sctph;
unsigned int sctphoff;
+ struct sk_buff *iter;
__be32 crc32;
#ifdef CONFIG_IP_VS_IPV6
@@ -133,8 +134,8 @@ sctp_dnat_handler(struct sk_buff *skb,
/* Calculate the checksum */
crc32 = sctp_start_cksum((u8 *) sctph, skb_headlen(skb) - sctphoff);
- for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next)
- crc32 = sctp_update_cksum((u8 *) skb->data, skb_headlen(skb),
+ skb_walk_frags(skb, iter)
+ crc32 = sctp_update_cksum((u8 *) iter->data, skb_headlen(iter),
crc32);
crc32 = sctp_end_cksum(crc32);
sctph->checksum = crc32;
@@ -145,9 +146,9 @@ sctp_dnat_handler(struct sk_buff *skb,
static int
sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
- struct sk_buff *list = skb_shinfo(skb)->frag_list;
unsigned int sctphoff;
struct sctphdr *sh, _sctph;
+ struct sk_buff *iter;
__le32 cmp;
__le32 val;
__u32 tmp;
@@ -166,9 +167,9 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
cmp = sh->checksum;
tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
- for (; list; list = list->next)
- tmp = sctp_update_cksum((__u8 *) list->data,
- skb_headlen(list), tmp);
+ skb_walk_frags(skb, iter)
+ tmp = sctp_update_cksum((__u8 *) iter->data,
+ skb_headlen(iter), tmp);
val = sctp_end_cksum(tmp);
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index bbc1ac795952..727e45b66953 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -35,7 +35,7 @@
static LIST_HEAD(ip_vs_schedulers);
/* lock for service table */
-static DEFINE_RWLOCK(__ip_vs_sched_lock);
+static DEFINE_SPINLOCK(ip_vs_sched_lock);
/*
@@ -108,7 +108,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
- read_lock_bh(&__ip_vs_sched_lock);
+ spin_lock_bh(&ip_vs_sched_lock);
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
/*
@@ -122,14 +122,14 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
}
if (strcmp(sched_name, sched->name)==0) {
/* HIT */
- read_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
return sched;
}
if (sched->module)
module_put(sched->module);
}
- read_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
return NULL;
}
@@ -184,10 +184,10 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
/* increase the module use count */
ip_vs_use_count_inc();
- write_lock_bh(&__ip_vs_sched_lock);
+ spin_lock_bh(&ip_vs_sched_lock);
if (!list_empty(&scheduler->n_list)) {
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already linked\n",
__func__, scheduler->name);
@@ -200,7 +200,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
*/
list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
if (strcmp(scheduler->name, sched->name) == 0) {
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
ip_vs_use_count_dec();
pr_err("%s(): [%s] scheduler already existed "
"in the system\n", __func__, scheduler->name);
@@ -211,7 +211,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
* Add it into the d-linked scheduler list
*/
list_add(&scheduler->n_list, &ip_vs_schedulers);
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
pr_info("[%s] scheduler registered.\n", scheduler->name);
@@ -229,9 +229,9 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
return -EINVAL;
}
- write_lock_bh(&__ip_vs_sched_lock);
+ spin_lock_bh(&ip_vs_sched_lock);
if (list_empty(&scheduler->n_list)) {
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
pr_err("%s(): [%s] scheduler is not in the list. failed\n",
__func__, scheduler->name);
return -EINVAL;
@@ -241,7 +241,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
* Remove it from the d-linked scheduler list
*/
list_del(&scheduler->n_list);
- write_unlock_bh(&__ip_vs_sched_lock);
+ spin_unlock_bh(&ip_vs_sched_lock);
/* decrease the module use count */
ip_vs_use_count_dec();
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 21e1a5e9b9d3..49df6bea6a2d 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -349,8 +349,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
}
#endif
-static void
-ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
+void
+ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
{
struct nf_conn *ct = (struct nf_conn *)skb->nfct;
struct nf_conntrack_tuple new_tuple;
@@ -365,11 +365,17 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
* real-server we will see RIP->DIP.
*/
new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- new_tuple.src.u3 = cp->daddr;
+ if (outin)
+ new_tuple.src.u3 = cp->daddr;
+ else
+ new_tuple.dst.u3 = cp->vaddr;
/*
* This will also take care of UDP and other protocols.
*/
- new_tuple.src.u.tcp.port = cp->dport;
+ if (outin)
+ new_tuple.src.u.tcp.port = cp->dport;
+ else
+ new_tuple.dst.u.tcp.port = cp->vport;
nf_conntrack_alter_reply(ct, &new_tuple);
}
@@ -428,7 +434,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
- ip_vs_update_conntrack(skb, cp);
+ ip_vs_update_conntrack(skb, cp, 1);
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
@@ -506,7 +512,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
- ip_vs_update_conntrack(skb, cp);
+ ip_vs_update_conntrack(skb, cp, 1);
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 7dcf7a404190..8d9e4c949b96 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -48,15 +48,17 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
{
unsigned int off, len;
struct nf_ct_ext_type *t;
+ size_t alloc_size;
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
BUG_ON(t == NULL);
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
len = off + t->len;
+ alloc_size = t->alloc_size;
rcu_read_unlock();
- *ext = kzalloc(t->alloc_size, gfp);
+ *ext = kzalloc(alloc_size, gfp);
if (!*ext)
return NULL;
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 53d892210a04..f64de9544866 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1376,7 +1376,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
unsigned int msglen, origlen;
const char *dptr, *end;
s16 diff, tdiff = 0;
- int ret;
+ int ret = NF_ACCEPT;
typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
if (ctinfo != IP_CT_ESTABLISHED &&
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 5490fc37c92d..daab8c4a903c 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -70,7 +70,11 @@ nf_tproxy_destructor(struct sk_buff *skb)
int
nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
{
- if (inet_sk(sk)->transparent) {
+ bool transparent = (sk->sk_state == TCP_TIME_WAIT) ?
+ inet_twsk(sk)->tw_transparent :
+ inet_sk(sk)->transparent;
+
+ if (transparent) {
skb_orphan(skb);
skb->sk = sk;
skb->destructor = nf_tproxy_destructor;
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index b46a8390896d..9228ee0dc11a 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -448,6 +448,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
{
__be16 _ports[2], *ports;
u8 nexthdr;
+ int poff;
memset(dst, 0, sizeof(*dst));
@@ -492,19 +493,13 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
return 0;
}
- switch (nexthdr) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE:
- case IPPROTO_SCTP:
- case IPPROTO_DCCP:
- ports = skb_header_pointer(skb, protoff, sizeof(_ports),
+ poff = proto_ports_offset(nexthdr);
+ if (poff >= 0) {
+ ports = skb_header_pointer(skb, protoff + poff, sizeof(_ports),
&_ports);
- break;
- default:
+ } else {
_ports[0] = _ports[1] = 0;
ports = _ports;
- break;
}
if (!ports)
return -1;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2cbf380377d5..cd96ed3ccee4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1406,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
struct netlink_sock *nlk = nlk_sk(sk);
int noblock = flags&MSG_DONTWAIT;
size_t copied;
- struct sk_buff *skb;
+ struct sk_buff *skb, *data_skb;
int err;
if (flags&MSG_OOB)
@@ -1418,59 +1418,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
if (skb == NULL)
goto out;
+ data_skb = skb;
+
#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
if (unlikely(skb_shinfo(skb)->frag_list)) {
- bool need_compat = !!(flags & MSG_CMSG_COMPAT);
-
/*
- * If this skb has a frag_list, then here that means that
- * we will have to use the frag_list skb for compat tasks
- * and the regular skb for non-compat tasks.
+ * If this skb has a frag_list, then here that means that we
+ * will have to use the frag_list skb's data for compat tasks
+ * and the regular skb's data for normal (non-compat) tasks.
*
- * The skb might (and likely will) be cloned, so we can't
- * just reset frag_list and go on with things -- we need to
- * keep that. For the compat case that's easy -- simply get
- * a reference to the compat skb and free the regular one
- * including the frag. For the non-compat case, we need to
- * avoid sending the frag to the user -- so assign NULL but
- * restore it below before freeing the skb.
+ * If we need to send the compat skb, assign it to the
+ * 'data_skb' variable so that it will be used below for data
+ * copying. We keep 'skb' for everything else, including
+ * freeing both later.
*/
- if (need_compat) {
- struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
- skb_get(compskb);
- kfree_skb(skb);
- skb = compskb;
- } else {
- /*
- * Before setting frag_list to NULL, we must get a
- * private copy of skb if shared (because of MSG_PEEK)
- */
- if (skb_shared(skb)) {
- struct sk_buff *nskb;
-
- nskb = pskb_copy(skb, GFP_KERNEL);
- kfree_skb(skb);
- skb = nskb;
- err = -ENOMEM;
- if (!skb)
- goto out;
- }
- kfree_skb(skb_shinfo(skb)->frag_list);
- skb_shinfo(skb)->frag_list = NULL;
- }
+ if (flags & MSG_CMSG_COMPAT)
+ data_skb = skb_shinfo(skb)->frag_list;
}
#endif
msg->msg_namelen = 0;
- copied = skb->len;
+ copied = data_skb->len;
if (len < copied) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
- skb_reset_transport_header(skb);
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ skb_reset_transport_header(data_skb);
+ err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied);
if (msg->msg_name) {
struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
@@ -1490,7 +1466,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
}
siocb->scm->creds = *NETLINK_CREDS(skb);
if (flags & MSG_TRUNC)
- copied = skb->len;
+ copied = data_skb->len;
skb_free_datagram(sk, skb);
@@ -2126,6 +2102,26 @@ static void __net_exit netlink_net_exit(struct net *net)
#endif
}
+static void __init netlink_add_usersock_entry(void)
+{
+ unsigned long *listeners;
+ int groups = 32;
+
+ listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
+ GFP_KERNEL);
+ if (!listeners)
+ panic("netlink_add_usersock_entry: Cannot allocate listneres\n");
+
+ netlink_table_grab();
+
+ nl_table[NETLINK_USERSOCK].groups = groups;
+ nl_table[NETLINK_USERSOCK].listeners = listeners;
+ nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
+ nl_table[NETLINK_USERSOCK].registered = 1;
+
+ netlink_table_ungrab();
+}
+
static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init,
.exit = netlink_net_exit,
@@ -2174,6 +2170,8 @@ static int __init netlink_proto_init(void)
hash->rehash_time = jiffies;
}
+ netlink_add_usersock_entry();
+
sock_register(&netlink_family_ops);
register_pernet_subsys(&netlink_net_ops);
/* The netlink device handler may be needed early. */
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 9a17f28b1253..3616f27b9d46 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -488,7 +488,7 @@ retry:
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (err < 0)
goto out_unlock;
@@ -1209,7 +1209,7 @@ static int packet_snd(struct socket *sock,
err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
if (err)
goto out_free;
- err = sock_tx_timestamp(msg, sk, skb_tx(skb));
+ err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
if (err < 0)
goto out_free;
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig
index 6ec7d55b1769..901956ada9c8 100644
--- a/net/phonet/Kconfig
+++ b/net/phonet/Kconfig
@@ -14,3 +14,14 @@ config PHONET
To compile this driver as a module, choose M here: the module
will be called phonet. If unsure, say N.
+
+config PHONET_PIPECTRLR
+ bool "Phonet Pipe Controller"
+ depends on PHONET
+ default N
+ help
+ The Pipe Controller implementation in Phonet stack to support Pipe
+ data with Nokia Slim modems like WG2.5 used on ST-Ericsson U8500
+ platform.
+
+ If unsure, say N.
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index 73aee7f2fcdc..fd95beb72f5d 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -251,6 +251,16 @@ int pn_skb_send(struct sock *sk, struct sk_buff *skb,
else if (phonet_address_lookup(net, daddr) == 0) {
dev = phonet_device_get(net);
skb->pkt_type = PACKET_LOOPBACK;
+ } else if (pn_sockaddr_get_object(target) == 0) {
+ /* Resource routing (small race until phonet_rcv()) */
+ struct sock *sk = pn_find_sock_by_res(net,
+ target->spn_resource);
+ if (sk) {
+ sock_put(sk);
+ dev = phonet_device_get(net);
+ skb->pkt_type = PACKET_LOOPBACK;
+ } else
+ dev = phonet_route_output(net, daddr);
} else
dev = phonet_route_output(net, daddr);
@@ -383,6 +393,13 @@ static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
goto out;
}
+ /* resource routing */
+ if (pn_sockaddr_get_object(&sa) == 0) {
+ struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource);
+ if (sk)
+ return sk_receive_skb(sk, skb, 0);
+ }
+
/* check if we are the destination */
if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
/* Phonet packet input */
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 1bd38db4fe1e..2f032381bd45 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -52,6 +52,19 @@ static int pn_ioctl(struct sock *sk, int cmd, unsigned long arg)
answ = skb ? skb->len : 0;
release_sock(sk);
return put_user(answ, (int __user *)arg);
+
+ case SIOCPNADDRESOURCE:
+ case SIOCPNDELRESOURCE: {
+ u32 res;
+ if (get_user(res, (u32 __user *)arg))
+ return -EFAULT;
+ if (res >= 256)
+ return -EINVAL;
+ if (cmd == SIOCPNADDRESOURCE)
+ return pn_sock_bind_res(sk, res);
+ else
+ return pn_sock_unbind_res(sk, res);
+ }
}
return -ENOIOCTLCMD;
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b2a3ae6cad78..aa3d8700d213 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -88,6 +88,15 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb,
const struct pnpipehdr *oph = pnp_hdr(oskb);
struct pnpipehdr *ph;
struct sk_buff *skb;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ const struct phonethdr *hdr = pn_hdr(oskb);
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = hdr->pn_sdev,
+ .spn_obj = hdr->pn_sobj,
+ };
+#endif
skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
if (!skb)
@@ -105,10 +114,271 @@ static int pep_reply(struct sock *sk, struct sk_buff *oskb,
ph->pipe_handle = oph->pipe_handle;
ph->error_code = code;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ return pn_skb_send(sk, skb, &spn);
+#else
return pn_skb_send(sk, skb, &pipe_srv);
+#endif
}
#define PAD 0x00
+
+#ifdef CONFIG_PHONET_PIPECTRLR
+static u8 pipe_negotiate_fc(u8 *host_fc, u8 *remote_fc, int len)
+{
+ int i, j;
+ u8 base_fc, final_fc;
+
+ for (i = 0; i < len; i++) {
+ base_fc = host_fc[i];
+ for (j = 0; j < len; j++) {
+ if (remote_fc[j] == base_fc) {
+ final_fc = base_fc;
+ goto done;
+ }
+ }
+ }
+ return -EINVAL;
+
+done:
+ return final_fc;
+
+}
+
+static int pipe_get_flow_info(struct sock *sk, struct sk_buff *skb,
+ u8 *pref_rx_fc, u8 *req_tx_fc)
+{
+ struct pnpipehdr *hdr;
+ u8 n_sb;
+
+ if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
+ return -EINVAL;
+
+ hdr = pnp_hdr(skb);
+ n_sb = hdr->data[4];
+
+ __skb_pull(skb, sizeof(*hdr) + 4);
+ while (n_sb > 0) {
+ u8 type, buf[3], len = sizeof(buf);
+ u8 *data = pep_get_sb(skb, &type, &len, buf);
+
+ if (data == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case PN_PIPE_SB_REQUIRED_FC_TX:
+ if (len < 3 || (data[2] | data[3] | data[4]) > 3)
+ break;
+ req_tx_fc[0] = data[2];
+ req_tx_fc[1] = data[3];
+ req_tx_fc[2] = data[4];
+ break;
+
+ case PN_PIPE_SB_PREFERRED_FC_RX:
+ if (len < 3 || (data[2] | data[3] | data[4]) > 3)
+ break;
+ pref_rx_fc[0] = data[2];
+ pref_rx_fc[1] = data[3];
+ pref_rx_fc[2] = data[4];
+ break;
+
+ }
+ n_sb--;
+ }
+ return 0;
+}
+
+static int pipe_handler_send_req(struct sock *sk, u16 dobj, u8 utid,
+ u8 msg_id, u8 p_handle, gfp_t priority)
+{
+ int len;
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(dobj),
+ .spn_obj = pn_obj(dobj),
+ };
+
+ static const u8 data[4] = {
+ PAD, PAD, PAD, PAD,
+ };
+
+ switch (msg_id) {
+ case PNS_PEP_CONNECT_REQ:
+ len = sizeof(data);
+ break;
+
+ case PNS_PEP_DISCONNECT_REQ:
+ case PNS_PEP_ENABLE_REQ:
+ case PNS_PEP_DISABLE_REQ:
+ len = 0;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
+ if (!skb)
+ return -ENOMEM;
+ skb_set_owner_w(skb, sk);
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ if (len) {
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, data, len);
+ }
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = utid;
+ ph->message_id = msg_id;
+ ph->pipe_handle = p_handle;
+ ph->error_code = PN_PIPE_NO_ERROR;
+
+ return pn_skb_send(sk, skb, &spn);
+}
+
+static int pipe_handler_send_created_ind(struct sock *sk, u16 dobj,
+ u8 utid, u8 p_handle, u8 msg_id, u8 tx_fc, u8 rx_fc)
+{
+ int err_code;
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(dobj),
+ .spn_obj = pn_obj(dobj),
+ };
+
+ static u8 data[4] = {
+ 0x03, 0x04,
+ };
+ data[2] = tx_fc;
+ data[3] = rx_fc;
+
+ /*
+ * actually, below is number of sub-blocks and not error code.
+ * Pipe_created_ind message format does not have any
+ * error code field. However, the Phonet stack will always send
+ * an error code as part of pnpipehdr. So, use that err_code to
+ * specify the number of sub-blocks.
+ */
+ err_code = 0x01;
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER + sizeof(data), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ skb_set_owner_w(skb, sk);
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ __skb_put(skb, sizeof(data));
+ skb_copy_to_linear_data(skb, data, sizeof(data));
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = utid;
+ ph->message_id = msg_id;
+ ph->pipe_handle = p_handle;
+ ph->error_code = err_code;
+
+ return pn_skb_send(sk, skb, &spn);
+}
+
+static int pipe_handler_send_ind(struct sock *sk, u16 dobj, u8 utid,
+ u8 p_handle, u8 msg_id)
+{
+ int err_code;
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(dobj),
+ .spn_obj = pn_obj(dobj),
+ };
+
+ /*
+ * actually, below is a filler.
+ * Pipe_enabled/disabled_ind message format does not have any
+ * error code field. However, the Phonet stack will always send
+ * an error code as part of pnpipehdr. So, use that err_code to
+ * specify the filler value.
+ */
+ err_code = 0x0;
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+ skb_set_owner_w(skb, sk);
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = utid;
+ ph->message_id = msg_id;
+ ph->pipe_handle = p_handle;
+ ph->error_code = err_code;
+
+ return pn_skb_send(sk, skb, &spn);
+}
+
+static int pipe_handler_enable_pipe(struct sock *sk, int cmd)
+{
+ int ret;
+ struct pep_sock *pn = pep_sk(sk);
+
+ switch (cmd) {
+ case PNPIPE_ENABLE:
+ ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PIPE_ENABLE_UTID, PNS_PEP_ENABLE_REQ,
+ pn->pipe_handle, GFP_ATOMIC);
+ break;
+
+ case PNPIPE_DISABLE:
+ ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PIPE_DISABLE_UTID, PNS_PEP_DISABLE_REQ,
+ pn->pipe_handle, GFP_ATOMIC);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int pipe_handler_create_pipe(struct sock *sk, int pipe_handle, int cmd)
+{
+ int ret;
+ struct pep_sock *pn = pep_sk(sk);
+
+ switch (cmd) {
+ case PNPIPE_CREATE:
+ ret = pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PEP_CONNECT_UTID, PNS_PEP_CONNECT_REQ,
+ pipe_handle, GFP_ATOMIC);
+ break;
+
+ case PNPIPE_DESTROY:
+ ret = pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PEP_DISCONNECT_UTID,
+ PNS_PEP_DISCONNECT_REQ,
+ pn->pipe_handle, GFP_ATOMIC);
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+#endif
+
static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
{
static const u8 data[20] = {
@@ -173,6 +443,14 @@ static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
struct pep_sock *pn = pep_sk(sk);
struct pnpipehdr *ph;
struct sk_buff *skb;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(pn->remote_pep),
+ .spn_obj = pn_obj(pn->remote_pep),
+ };
+#endif
skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
if (!skb)
@@ -192,7 +470,11 @@ static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
ph->data[3] = PAD;
ph->data[4] = status;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ return pn_skb_send(sk, skb, &spn);
+#else
return pn_skb_send(sk, skb, &pipe_srv);
+#endif
}
/* Send our RX flow control information to the sender.
@@ -225,12 +507,13 @@ static void pipe_grant_credits(struct sock *sk)
static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
- struct pnpipehdr *hdr = pnp_hdr(skb);
+ struct pnpipehdr *hdr;
int wake = 0;
if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
return -EINVAL;
+ hdr = pnp_hdr(skb);
if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
(unsigned)hdr->data[0]);
@@ -308,6 +591,12 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
struct pnpipehdr *hdr = pnp_hdr(skb);
struct sk_buff_head *queue;
int err = 0;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ struct phonethdr *ph = pn_hdr(skb);
+ static u8 host_pref_rx_fc[3], host_req_tx_fc[3];
+ u8 remote_pref_rx_fc[3], remote_req_tx_fc[3];
+ u8 negotiated_rx_fc, negotiated_tx_fc;
+#endif
BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
@@ -316,6 +605,40 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_CONNECT_RESP:
+ if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
+ (ph->pn_sobj == pn_obj(pn->remote_pep))) {
+ pipe_get_flow_info(sk, skb, remote_pref_rx_fc,
+ remote_req_tx_fc);
+
+ negotiated_tx_fc = pipe_negotiate_fc(remote_req_tx_fc,
+ host_pref_rx_fc,
+ sizeof(host_pref_rx_fc));
+ negotiated_rx_fc = pipe_negotiate_fc(host_req_tx_fc,
+ remote_pref_rx_fc,
+ sizeof(host_pref_rx_fc));
+
+ pn->pipe_state = PIPE_DISABLED;
+ pipe_handler_send_created_ind(sk, pn->remote_pep,
+ PNS_PIPE_CREATED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_CREATED_IND,
+ negotiated_tx_fc, negotiated_rx_fc);
+ pipe_handler_send_created_ind(sk, pn->pn_sk.sobject,
+ PNS_PIPE_CREATED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_CREATED_IND,
+ negotiated_tx_fc, negotiated_rx_fc);
+ } else {
+ pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PEP_CONNECT_UTID,
+ PNS_PEP_CONNECT_REQ, pn->pipe_handle,
+ GFP_ATOMIC);
+ pipe_get_flow_info(sk, skb, host_pref_rx_fc,
+ host_req_tx_fc);
+ }
+ break;
+#endif
+
case PNS_PEP_DISCONNECT_REQ:
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
sk->sk_state = TCP_CLOSE_WAIT;
@@ -323,11 +646,41 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_state_change(sk);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_DISCONNECT_RESP:
+ pn->pipe_state = PIPE_IDLE;
+ pipe_handler_send_req(sk, pn->pn_sk.sobject,
+ PNS_PEP_DISCONNECT_UTID,
+ PNS_PEP_DISCONNECT_REQ, pn->pipe_handle,
+ GFP_KERNEL);
+ break;
+#endif
+
case PNS_PEP_ENABLE_REQ:
/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_ENABLE_RESP:
+ if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
+ (ph->pn_sobj == pn_obj(pn->remote_pep))) {
+ pn->pipe_state = PIPE_ENABLED;
+ pipe_handler_send_ind(sk, pn->remote_pep,
+ PNS_PIPE_ENABLED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_ENABLED_IND);
+ pipe_handler_send_ind(sk, pn->pn_sk.sobject,
+ PNS_PIPE_ENABLED_IND_UTID,
+ pn->pipe_handle, PNS_PIPE_ENABLED_IND);
+ } else
+ pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PIPE_ENABLE_UTID,
+ PNS_PEP_ENABLE_REQ, pn->pipe_handle,
+ GFP_KERNEL);
+
+ break;
+#endif
+
case PNS_PEP_RESET_REQ:
switch (hdr->state_after_reset) {
case PN_PIPE_DISABLE:
@@ -346,6 +699,27 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
break;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNS_PEP_DISABLE_RESP:
+ if ((ph->pn_sdev == pn_dev(pn->remote_pep)) &&
+ (ph->pn_sobj == pn_obj(pn->remote_pep))) {
+ pn->pipe_state = PIPE_DISABLED;
+ pipe_handler_send_ind(sk, pn->remote_pep,
+ PNS_PIPE_DISABLED_IND_UTID,
+ pn->pipe_handle,
+ PNS_PIPE_DISABLED_IND);
+ pipe_handler_send_ind(sk, pn->pn_sk.sobject,
+ PNS_PIPE_DISABLED_IND_UTID,
+ pn->pipe_handle,
+ PNS_PIPE_DISABLED_IND);
+ } else
+ pipe_handler_send_req(sk, pn->remote_pep,
+ PNS_PIPE_DISABLE_UTID,
+ PNS_PEP_DISABLE_REQ, pn->pipe_handle,
+ GFP_KERNEL);
+ break;
+#endif
+
case PNS_PEP_CTRL_REQ:
if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
atomic_inc(&sk->sk_drops);
@@ -519,6 +893,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
newpn->init_enable = enabled;
newpn->aligned = aligned;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ newpn->remote_pep = pn->remote_pep;
+#endif
BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -620,6 +997,28 @@ drop:
return err;
}
+static int pipe_do_remove(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(MAX_PNPIPE_HEADER, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ __skb_push(skb, sizeof(*ph));
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = 0;
+ ph->message_id = PNS_PIPE_REMOVE_REQ;
+ ph->pipe_handle = pn->pipe_handle;
+ ph->data[0] = PAD;
+
+ return pn_skb_send(sk, skb, &pipe_srv);
+}
+
/* associated socket ceases to exist */
static void pep_sock_close(struct sock *sk, long timeout)
{
@@ -638,7 +1037,10 @@ static void pep_sock_close(struct sock *sk, long timeout)
sk_for_each_safe(sknode, p, n, &pn->ackq)
sk_del_node_init(sknode);
sk->sk_state = TCP_CLOSE;
- }
+ } else if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
+ /* Forcefully remove dangling Phonet pipe */
+ pipe_do_remove(sk);
+
ifindex = pn->ifindex;
pn->ifindex = 0;
release_sock(sk);
@@ -756,6 +1158,10 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
{
struct pep_sock *pn = pep_sk(sk);
int val = 0, err = 0;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ int remote_pep;
+ int pipe_handle;
+#endif
if (level != SOL_PNPIPE)
return -ENOPROTOOPT;
@@ -766,6 +1172,48 @@ static int pep_setsockopt(struct sock *sk, int level, int optname,
lock_sock(sk);
switch (optname) {
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNPIPE_CREATE:
+ if (val) {
+ if (pn->pipe_state > PIPE_IDLE) {
+ err = -EFAULT;
+ break;
+ }
+ remote_pep = val & 0xFFFF;
+ pipe_handle = (val >> 16) & 0xFF;
+ pn->remote_pep = remote_pep;
+ err = pipe_handler_create_pipe(sk, pipe_handle,
+ PNPIPE_CREATE);
+ break;
+ }
+
+ case PNPIPE_ENABLE:
+ if (pn->pipe_state != PIPE_DISABLED) {
+ err = -EFAULT;
+ break;
+ }
+ err = pipe_handler_enable_pipe(sk, PNPIPE_ENABLE);
+ break;
+
+ case PNPIPE_DISABLE:
+ if (pn->pipe_state != PIPE_ENABLED) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = pipe_handler_enable_pipe(sk, PNPIPE_DISABLE);
+ break;
+
+ case PNPIPE_DESTROY:
+ if (pn->pipe_state < PIPE_DISABLED) {
+ err = -EFAULT;
+ break;
+ }
+
+ err = pipe_handler_create_pipe(sk, 0x0, PNPIPE_DESTROY);
+ break;
+#endif
+
case PNPIPE_ENCAP:
if (val && val != PNPIPE_ENCAP_IP) {
err = -EINVAL;
@@ -815,6 +1263,13 @@ static int pep_getsockopt(struct sock *sk, int level, int optname,
case PNPIPE_ENCAP:
val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
break;
+
+#ifdef CONFIG_PHONET_PIPECTRLR
+ case PNPIPE_INQ:
+ val = pn->pipe_state;
+ break;
+#endif
+
case PNPIPE_IFINDEX:
val = pn->ifindex;
break;
@@ -834,6 +1289,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
struct pnpipehdr *ph;
+ int err;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ struct sockaddr_pn spn = {
+ .spn_family = AF_PHONET,
+ .spn_resource = 0xD9,
+ .spn_dev = pn_dev(pn->remote_pep),
+ .spn_obj = pn_obj(pn->remote_pep),
+ };
+#endif
if (pn_flow_safe(pn->tx_fc) &&
!atomic_add_unless(&pn->tx_credits, -1, 0)) {
@@ -851,8 +1315,16 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
} else
ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
+#ifdef CONFIG_PHONET_PIPECTRLR
+ err = pn_skb_send(sk, skb, &spn);
+#else
+ err = pn_skb_send(sk, skb, &pipe_srv);
+#endif
+
+ if (err && pn_flow_safe(pn->tx_fc))
+ atomic_inc(&pn->tx_credits);
+ return err;
- return pn_skb_send(sk, skb, &pipe_srv);
}
static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
@@ -872,7 +1344,7 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
flags & MSG_DONTWAIT, &err);
if (!skb)
- return -ENOBUFS;
+ return err;
skb_reserve(skb, MAX_PHONET_HEADER + 3);
err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index b18e48fae975..947038ddd04c 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -292,8 +292,7 @@ static void phonet_route_autodel(struct net_device *dev)
if (bitmap_empty(deleted, 64))
return; /* short-circuit RCU */
synchronize_rcu();
- for (i = find_first_bit(deleted, 64); i < 64;
- i = find_next_bit(deleted, 64, i + 1)) {
+ for_each_set_bit(i, deleted, 64) {
rtm_phonet_notify(RTM_DELROUTE, dev, i);
dev_put(dev);
}
@@ -374,6 +373,7 @@ int __init phonet_device_init(void)
if (err)
return err;
+ proc_net_fops_create(&init_net, "pnresource", 0, &pn_res_seq_fops);
register_netdevice_notifier(&phonet_device_notifier);
err = phonet_netlink_register();
if (err)
@@ -386,6 +386,7 @@ void phonet_device_exit(void)
rtnl_unregister_all(PF_PHONET);
unregister_netdevice_notifier(&phonet_device_notifier);
unregister_pernet_device(&phonet_net_ops);
+ proc_net_remove(&init_net, "pnresource");
}
int phonet_route_add(struct net_device *dev, u8 daddr)
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 6e9848bf0370..aca8fba099e9 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -158,6 +158,7 @@ void pn_sock_unhash(struct sock *sk)
spin_lock_bh(&pnsocks.lock);
sk_del_node_init(sk);
spin_unlock_bh(&pnsocks.lock);
+ pn_sock_unbind_all_res(sk);
}
EXPORT_SYMBOL(pn_sock_unhash);
@@ -281,7 +282,9 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
return POLLHUP;
- if (sk->sk_state == TCP_ESTABLISHED && atomic_read(&pn->tx_credits))
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
+ atomic_read(&pn->tx_credits))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
@@ -563,3 +566,188 @@ const struct file_operations pn_sock_seq_fops = {
.release = seq_release_net,
};
#endif
+
+static struct {
+ struct sock *sk[256];
+} pnres;
+
+/*
+ * Find and hold socket based on resource.
+ */
+struct sock *pn_find_sock_by_res(struct net *net, u8 res)
+{
+ struct sock *sk;
+
+ if (!net_eq(net, &init_net))
+ return NULL;
+
+ rcu_read_lock();
+ sk = rcu_dereference(pnres.sk[res]);
+ if (sk)
+ sock_hold(sk);
+ rcu_read_unlock();
+ return sk;
+}
+
+static DEFINE_MUTEX(resource_mutex);
+
+int pn_sock_bind_res(struct sock *sk, u8 res)
+{
+ int ret = -EADDRINUSE;
+
+ if (!net_eq(sock_net(sk), &init_net))
+ return -ENOIOCTLCMD;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (pn_socket_autobind(sk->sk_socket))
+ return -EAGAIN;
+
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == NULL) {
+ sock_hold(sk);
+ rcu_assign_pointer(pnres.sk[res], sk);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+ return ret;
+}
+
+int pn_sock_unbind_res(struct sock *sk, u8 res)
+{
+ int ret = -ENOENT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == sk) {
+ rcu_assign_pointer(pnres.sk[res], NULL);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+
+ if (ret == 0) {
+ synchronize_rcu();
+ sock_put(sk);
+ }
+ return ret;
+}
+
+void pn_sock_unbind_all_res(struct sock *sk)
+{
+ unsigned res, match = 0;
+
+ mutex_lock(&resource_mutex);
+ for (res = 0; res < 256; res++) {
+ if (pnres.sk[res] == sk) {
+ rcu_assign_pointer(pnres.sk[res], NULL);
+ match++;
+ }
+ }
+ mutex_unlock(&resource_mutex);
+
+ if (match == 0)
+ return;
+ synchronize_rcu();
+ while (match > 0) {
+ sock_put(sk);
+ match--;
+ }
+}
+
+#ifdef CONFIG_PROC_FS
+static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct net *net = seq_file_net(seq);
+ unsigned i;
+
+ if (!net_eq(net, &init_net))
+ return NULL;
+
+ for (i = 0; i < 256; i++) {
+ if (pnres.sk[i] == NULL)
+ continue;
+ if (!pos)
+ return pnres.sk + i;
+ pos--;
+ }
+ return NULL;
+}
+
+static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
+{
+ struct net *net = seq_file_net(seq);
+ unsigned i;
+
+ BUG_ON(!net_eq(net, &init_net));
+
+ for (i = (sk - pnres.sk) + 1; i < 256; i++)
+ if (pnres.sk[i])
+ return pnres.sk + i;
+ return NULL;
+}
+
+static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(resource_mutex)
+{
+ mutex_lock(&resource_mutex);
+ return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sock **sk;
+
+ if (v == SEQ_START_TOKEN)
+ sk = pn_res_get_idx(seq, 0);
+ else
+ sk = pn_res_get_next(seq, v);
+ (*pos)++;
+ return sk;
+}
+
+static void pn_res_seq_stop(struct seq_file *seq, void *v)
+ __releases(resource_mutex)
+{
+ mutex_unlock(&resource_mutex);
+}
+
+static int pn_res_seq_show(struct seq_file *seq, void *v)
+{
+ int len;
+
+ if (v == SEQ_START_TOKEN)
+ seq_printf(seq, "%s%n", "rs uid inode", &len);
+ else {
+ struct sock **psk = v;
+ struct sock *sk = *psk;
+
+ seq_printf(seq, "%02X %5d %lu%n",
+ (int) (psk - pnres.sk), sock_i_uid(sk),
+ sock_i_ino(sk), &len);
+ }
+ seq_printf(seq, "%*s\n", 63 - len, "");
+ return 0;
+}
+
+static const struct seq_operations pn_res_seq_ops = {
+ .start = pn_res_seq_start,
+ .next = pn_res_seq_next,
+ .stop = pn_res_seq_stop,
+ .show = pn_res_seq_show,
+};
+
+static int pn_res_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &pn_res_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+const struct file_operations pn_res_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = pn_res_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+#endif
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index aebfecbdb841..bb6ad81b671d 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -39,7 +39,15 @@
#include <net/sock.h>
#include "rds.h"
-#include "rdma.h"
+
+char *rds_str_array(char **array, size_t elements, size_t index)
+{
+ if ((index < elements) && array[index])
+ return array[index];
+ else
+ return "unknown";
+}
+EXPORT_SYMBOL(rds_str_array);
/* this is just used for stats gathering :/ */
static DEFINE_SPINLOCK(rds_sock_lock);
@@ -62,7 +70,7 @@ static int rds_release(struct socket *sock)
struct rds_sock *rs;
unsigned long flags;
- if (sk == NULL)
+ if (!sk)
goto out;
rs = rds_sk_to_rs(sk);
@@ -73,7 +81,15 @@ static int rds_release(struct socket *sock)
* with the socket. */
rds_clear_recv_queue(rs);
rds_cong_remove_socket(rs);
+
+ /*
+ * the binding lookup hash uses rcu, we need to
+ * make sure we sychronize_rcu before we free our
+ * entry
+ */
rds_remove_bound(rs);
+ synchronize_rcu();
+
rds_send_drop_to(rs, NULL);
rds_rdma_drop_keys(rs);
rds_notify_queue_get(rs, NULL);
@@ -83,6 +99,8 @@ static int rds_release(struct socket *sock)
rds_sock_count--;
spin_unlock_irqrestore(&rds_sock_lock, flags);
+ rds_trans_put(rs->rs_transport);
+
sock->sk = NULL;
sock_put(sk);
out:
@@ -514,7 +532,7 @@ out:
spin_unlock_irqrestore(&rds_sock_lock, flags);
}
-static void __exit rds_exit(void)
+static void rds_exit(void)
{
sock_unregister(rds_family_ops.family);
proto_unregister(&rds_proto);
@@ -529,7 +547,7 @@ static void __exit rds_exit(void)
}
module_exit(rds_exit);
-static int __init rds_init(void)
+static int rds_init(void)
{
int ret;
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 5d95fc007f1a..2f6b3fcc79f8 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -34,45 +34,52 @@
#include <net/sock.h>
#include <linux/in.h>
#include <linux/if_arp.h>
+#include <linux/jhash.h>
#include "rds.h"
-/*
- * XXX this probably still needs more work.. no INADDR_ANY, and rbtrees aren't
- * particularly zippy.
- *
- * This is now called for every incoming frame so we arguably care much more
- * about it than we used to.
- */
+#define BIND_HASH_SIZE 1024
+static struct hlist_head bind_hash_table[BIND_HASH_SIZE];
static DEFINE_SPINLOCK(rds_bind_lock);
-static struct rb_root rds_bind_tree = RB_ROOT;
-static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
- struct rds_sock *insert)
+static struct hlist_head *hash_to_bucket(__be32 addr, __be16 port)
+{
+ return bind_hash_table + (jhash_2words((u32)addr, (u32)port, 0) &
+ (BIND_HASH_SIZE - 1));
+}
+
+static struct rds_sock *rds_bind_lookup(__be32 addr, __be16 port,
+ struct rds_sock *insert)
{
- struct rb_node **p = &rds_bind_tree.rb_node;
- struct rb_node *parent = NULL;
struct rds_sock *rs;
+ struct hlist_node *node;
+ struct hlist_head *head = hash_to_bucket(addr, port);
u64 cmp;
u64 needle = ((u64)be32_to_cpu(addr) << 32) | be16_to_cpu(port);
- while (*p) {
- parent = *p;
- rs = rb_entry(parent, struct rds_sock, rs_bound_node);
-
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(rs, node, head, rs_bound_node) {
cmp = ((u64)be32_to_cpu(rs->rs_bound_addr) << 32) |
be16_to_cpu(rs->rs_bound_port);
- if (needle < cmp)
- p = &(*p)->rb_left;
- else if (needle > cmp)
- p = &(*p)->rb_right;
- else
+ if (cmp == needle) {
+ rcu_read_unlock();
return rs;
+ }
}
+ rcu_read_unlock();
if (insert) {
- rb_link_node(&insert->rs_bound_node, parent, p);
- rb_insert_color(&insert->rs_bound_node, &rds_bind_tree);
+ /*
+ * make sure our addr and port are set before
+ * we are added to the list, other people
+ * in rcu will find us as soon as the
+ * hlist_add_head_rcu is done
+ */
+ insert->rs_bound_addr = addr;
+ insert->rs_bound_port = port;
+ rds_sock_addref(insert);
+
+ hlist_add_head_rcu(&insert->rs_bound_node, head);
}
return NULL;
}
@@ -86,15 +93,13 @@ static struct rds_sock *rds_bind_tree_walk(__be32 addr, __be16 port,
struct rds_sock *rds_find_bound(__be32 addr, __be16 port)
{
struct rds_sock *rs;
- unsigned long flags;
- spin_lock_irqsave(&rds_bind_lock, flags);
- rs = rds_bind_tree_walk(addr, port, NULL);
+ rs = rds_bind_lookup(addr, port, NULL);
+
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
- spin_unlock_irqrestore(&rds_bind_lock, flags);
rdsdebug("returning rs %p for %pI4:%u\n", rs, &addr,
ntohs(port));
@@ -121,22 +126,15 @@ static int rds_add_bound(struct rds_sock *rs, __be32 addr, __be16 *port)
do {
if (rover == 0)
rover++;
- if (rds_bind_tree_walk(addr, cpu_to_be16(rover), rs) == NULL) {
- *port = cpu_to_be16(rover);
+ if (!rds_bind_lookup(addr, cpu_to_be16(rover), rs)) {
+ *port = rs->rs_bound_port;
ret = 0;
+ rdsdebug("rs %p binding to %pI4:%d\n",
+ rs, &addr, (int)ntohs(*port));
break;
}
} while (rover++ != last);
- if (ret == 0) {
- rs->rs_bound_addr = addr;
- rs->rs_bound_port = *port;
- rds_sock_addref(rs);
-
- rdsdebug("rs %p binding to %pI4:%d\n",
- rs, &addr, (int)ntohs(*port));
- }
-
spin_unlock_irqrestore(&rds_bind_lock, flags);
return ret;
@@ -153,7 +151,7 @@ void rds_remove_bound(struct rds_sock *rs)
rs, &rs->rs_bound_addr,
ntohs(rs->rs_bound_port));
- rb_erase(&rs->rs_bound_node, &rds_bind_tree);
+ hlist_del_init_rcu(&rs->rs_bound_node);
rds_sock_put(rs);
rs->rs_bound_addr = 0;
}
@@ -184,7 +182,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
trans = rds_trans_get_preferred(sin->sin_addr.s_addr);
- if (trans == NULL) {
+ if (!trans) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
if (printk_ratelimit())
@@ -198,5 +196,9 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
out:
release_sock(sk);
+
+ /* we might have called rds_remove_bound on error */
+ if (ret)
+ synchronize_rcu();
return ret;
}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 0871a29f0780..75ea686f27d5 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -141,7 +141,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
unsigned long flags;
map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL);
- if (map == NULL)
+ if (!map)
return NULL;
map->m_addr = addr;
@@ -159,7 +159,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
ret = rds_cong_tree_walk(addr, map);
spin_unlock_irqrestore(&rds_cong_lock, flags);
- if (ret == NULL) {
+ if (!ret) {
ret = map;
map = NULL;
}
@@ -205,7 +205,7 @@ int rds_cong_get_maps(struct rds_connection *conn)
conn->c_lcong = rds_cong_from_addr(conn->c_laddr);
conn->c_fcong = rds_cong_from_addr(conn->c_faddr);
- if (conn->c_lcong == NULL || conn->c_fcong == NULL)
+ if (!(conn->c_lcong && conn->c_fcong))
return -ENOMEM;
return 0;
@@ -221,7 +221,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
if (!test_and_set_bit(0, &conn->c_map_queued)) {
rds_stats_inc(s_cong_update_queued);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ rds_send_xmit(conn);
}
}
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 7619b671ca28..870992e08cae 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -37,7 +37,6 @@
#include "rds.h"
#include "loop.h"
-#include "rdma.h"
#define RDS_CONNECTION_HASH_BITS 12
#define RDS_CONNECTION_HASH_ENTRIES (1 << RDS_CONNECTION_HASH_BITS)
@@ -63,18 +62,7 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
var |= RDS_INFO_CONNECTION_FLAG_##suffix; \
} while (0)
-static inline int rds_conn_is_sending(struct rds_connection *conn)
-{
- int ret = 0;
-
- if (!mutex_trylock(&conn->c_send_lock))
- ret = 1;
- else
- mutex_unlock(&conn->c_send_lock);
-
- return ret;
-}
-
+/* rcu read lock must be held or the connection spinlock */
static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
__be32 laddr, __be32 faddr,
struct rds_transport *trans)
@@ -82,7 +70,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
struct rds_connection *conn, *ret = NULL;
struct hlist_node *pos;
- hlist_for_each_entry(conn, pos, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
conn->c_trans == trans) {
ret = conn;
@@ -129,10 +117,11 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
{
struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
+ struct rds_transport *loop_trans;
unsigned long flags;
int ret;
- spin_lock_irqsave(&rds_conn_lock, flags);
+ rcu_read_lock();
conn = rds_conn_lookup(head, laddr, faddr, trans);
if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
!is_outgoing) {
@@ -143,12 +132,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
parent = conn;
conn = parent->c_passive;
}
- spin_unlock_irqrestore(&rds_conn_lock, flags);
+ rcu_read_unlock();
if (conn)
goto out;
conn = kmem_cache_zalloc(rds_conn_slab, gfp);
- if (conn == NULL) {
+ if (!conn) {
conn = ERR_PTR(-ENOMEM);
goto out;
}
@@ -159,7 +148,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
spin_lock_init(&conn->c_lock);
conn->c_next_tx_seq = 1;
- mutex_init(&conn->c_send_lock);
+ init_waitqueue_head(&conn->c_waitq);
INIT_LIST_HEAD(&conn->c_send_queue);
INIT_LIST_HEAD(&conn->c_retrans);
@@ -175,7 +164,9 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
* can bind to the destination address then we'd rather the messages
* flow through loopback rather than either transport.
*/
- if (rds_trans_get_preferred(faddr)) {
+ loop_trans = rds_trans_get_preferred(faddr);
+ if (loop_trans) {
+ rds_trans_put(loop_trans);
conn->c_loopback = 1;
if (is_outgoing && trans->t_prefer_loopback) {
/* "outgoing" connection - and the transport
@@ -238,7 +229,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
kmem_cache_free(rds_conn_slab, conn);
conn = found;
} else {
- hlist_add_head(&conn->c_hash_node, head);
+ hlist_add_head_rcu(&conn->c_hash_node, head);
rds_cong_add_conn(conn);
rds_conn_count++;
}
@@ -263,21 +254,91 @@ struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
}
EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
+void rds_conn_shutdown(struct rds_connection *conn)
+{
+ /* shut it down unless it's down already */
+ if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
+ /*
+ * Quiesce the connection mgmt handlers before we start tearing
+ * things down. We don't hold the mutex for the entire
+ * duration of the shutdown operation, else we may be
+ * deadlocking with the CM handler. Instead, the CM event
+ * handler is supposed to check for state DISCONNECTING
+ */
+ mutex_lock(&conn->c_cm_lock);
+ if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING)
+ && !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
+ rds_conn_error(conn, "shutdown called in state %d\n",
+ atomic_read(&conn->c_state));
+ mutex_unlock(&conn->c_cm_lock);
+ return;
+ }
+ mutex_unlock(&conn->c_cm_lock);
+
+ wait_event(conn->c_waitq,
+ !test_bit(RDS_IN_XMIT, &conn->c_flags));
+
+ conn->c_trans->conn_shutdown(conn);
+ rds_conn_reset(conn);
+
+ if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
+ /* This can happen - eg when we're in the middle of tearing
+ * down the connection, and someone unloads the rds module.
+ * Quite reproduceable with loopback connections.
+ * Mostly harmless.
+ */
+ rds_conn_error(conn,
+ "%s: failed to transition to state DOWN, "
+ "current state is %d\n",
+ __func__,
+ atomic_read(&conn->c_state));
+ return;
+ }
+ }
+
+ /* Then reconnect if it's still live.
+ * The passive side of an IB loopback connection is never added
+ * to the conn hash, so we never trigger a reconnect on this
+ * conn - the reconnect is always triggered by the active peer. */
+ cancel_delayed_work_sync(&conn->c_conn_w);
+ rcu_read_lock();
+ if (!hlist_unhashed(&conn->c_hash_node)) {
+ rcu_read_unlock();
+ rds_queue_reconnect(conn);
+ } else {
+ rcu_read_unlock();
+ }
+}
+
+/*
+ * Stop and free a connection.
+ *
+ * This can only be used in very limited circumstances. It assumes that once
+ * the conn has been shutdown that no one else is referencing the connection.
+ * We can only ensure this in the rmmod path in the current code.
+ */
void rds_conn_destroy(struct rds_connection *conn)
{
struct rds_message *rm, *rtmp;
+ unsigned long flags;
rdsdebug("freeing conn %p for %pI4 -> "
"%pI4\n", conn, &conn->c_laddr,
&conn->c_faddr);
- hlist_del_init(&conn->c_hash_node);
+ /* Ensure conn will not be scheduled for reconnect */
+ spin_lock_irq(&rds_conn_lock);
+ hlist_del_init_rcu(&conn->c_hash_node);
+ spin_unlock_irq(&rds_conn_lock);
+ synchronize_rcu();
- /* wait for the rds thread to shut it down */
- atomic_set(&conn->c_state, RDS_CONN_ERROR);
- cancel_delayed_work(&conn->c_conn_w);
- queue_work(rds_wq, &conn->c_down_w);
- flush_workqueue(rds_wq);
+ /* shut the connection down */
+ rds_conn_drop(conn);
+ flush_work(&conn->c_down_w);
+
+ /* make sure lingering queued work won't try to ref the conn */
+ cancel_delayed_work_sync(&conn->c_send_w);
+ cancel_delayed_work_sync(&conn->c_recv_w);
/* tear down queued messages */
list_for_each_entry_safe(rm, rtmp,
@@ -302,7 +363,9 @@ void rds_conn_destroy(struct rds_connection *conn)
BUG_ON(!list_empty(&conn->c_retrans));
kmem_cache_free(rds_conn_slab, conn);
+ spin_lock_irqsave(&rds_conn_lock, flags);
rds_conn_count--;
+ spin_unlock_irqrestore(&rds_conn_lock, flags);
}
EXPORT_SYMBOL_GPL(rds_conn_destroy);
@@ -316,23 +379,23 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
struct list_head *list;
struct rds_connection *conn;
struct rds_message *rm;
- unsigned long flags;
unsigned int total = 0;
+ unsigned long flags;
size_t i;
len /= sizeof(struct rds_info_message);
- spin_lock_irqsave(&rds_conn_lock, flags);
+ rcu_read_lock();
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
- hlist_for_each_entry(conn, pos, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
if (want_send)
list = &conn->c_send_queue;
else
list = &conn->c_retrans;
- spin_lock(&conn->c_lock);
+ spin_lock_irqsave(&conn->c_lock, flags);
/* XXX too lazy to maintain counts.. */
list_for_each_entry(rm, list, m_conn_item) {
@@ -343,11 +406,10 @@ static void rds_conn_message_info(struct socket *sock, unsigned int len,
conn->c_faddr, 0);
}
- spin_unlock(&conn->c_lock);
+ spin_unlock_irqrestore(&conn->c_lock, flags);
}
}
-
- spin_unlock_irqrestore(&rds_conn_lock, flags);
+ rcu_read_unlock();
lens->nr = total;
lens->each = sizeof(struct rds_info_message);
@@ -377,19 +439,17 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
uint64_t buffer[(item_len + 7) / 8];
struct hlist_head *head;
struct hlist_node *pos;
- struct hlist_node *tmp;
struct rds_connection *conn;
- unsigned long flags;
size_t i;
- spin_lock_irqsave(&rds_conn_lock, flags);
+ rcu_read_lock();
lens->nr = 0;
lens->each = item_len;
for (i = 0, head = rds_conn_hash; i < ARRAY_SIZE(rds_conn_hash);
i++, head++) {
- hlist_for_each_entry_safe(conn, pos, tmp, head, c_hash_node) {
+ hlist_for_each_entry_rcu(conn, pos, head, c_hash_node) {
/* XXX no c_lock usage.. */
if (!visitor(conn, buffer))
@@ -405,8 +465,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
lens->nr++;
}
}
-
- spin_unlock_irqrestore(&rds_conn_lock, flags);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
@@ -423,8 +482,8 @@ static int rds_conn_info_visitor(struct rds_connection *conn,
sizeof(cinfo->transport));
cinfo->flags = 0;
- rds_conn_info_set(cinfo->flags,
- rds_conn_is_sending(conn), SENDING);
+ rds_conn_info_set(cinfo->flags, test_bit(RDS_IN_XMIT, &conn->c_flags),
+ SENDING);
/* XXX Future: return the state rather than these funky bits */
rds_conn_info_set(cinfo->flags,
atomic_read(&conn->c_state) == RDS_CONN_CONNECTING,
@@ -444,12 +503,12 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
sizeof(struct rds_info_connection));
}
-int __init rds_conn_init(void)
+int rds_conn_init(void)
{
rds_conn_slab = kmem_cache_create("rds_connection",
sizeof(struct rds_connection),
0, 0, NULL);
- if (rds_conn_slab == NULL)
+ if (!rds_conn_slab)
return -ENOMEM;
rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
@@ -487,6 +546,18 @@ void rds_conn_drop(struct rds_connection *conn)
EXPORT_SYMBOL_GPL(rds_conn_drop);
/*
+ * If the connection is down, trigger a connect. We may have scheduled a
+ * delayed reconnect however - in this case we should not interfere.
+ */
+void rds_conn_connect_if_down(struct rds_connection *conn)
+{
+ if (rds_conn_state(conn) == RDS_CONN_DOWN &&
+ !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
+ queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
+}
+EXPORT_SYMBOL_GPL(rds_conn_connect_if_down);
+
+/*
* An error occurred on the connection
*/
void
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 8f2d6dd7700a..b12a3951167d 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -53,12 +53,71 @@ MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
module_param(rds_ib_retry_count, int, 0444);
MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
+/*
+ * we have a clumsy combination of RCU and a rwsem protecting this list
+ * because it is used both in the get_mr fast path and while blocking in
+ * the FMR flushing path.
+ */
+DECLARE_RWSEM(rds_ib_devices_lock);
struct list_head rds_ib_devices;
/* NOTE: if also grabbing ibdev lock, grab this first */
DEFINE_SPINLOCK(ib_nodev_conns_lock);
LIST_HEAD(ib_nodev_conns);
+void rds_ib_nodev_connect(void)
+{
+ struct rds_ib_connection *ic;
+
+ spin_lock(&ib_nodev_conns_lock);
+ list_for_each_entry(ic, &ib_nodev_conns, ib_node)
+ rds_conn_connect_if_down(ic->conn);
+ spin_unlock(&ib_nodev_conns_lock);
+}
+
+void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
+{
+ struct rds_ib_connection *ic;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rds_ibdev->spinlock, flags);
+ list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
+ rds_conn_drop(ic->conn);
+ spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
+}
+
+/*
+ * rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
+ * from interrupt context so we push freing off into a work struct in krdsd.
+ */
+static void rds_ib_dev_free(struct work_struct *work)
+{
+ struct rds_ib_ipaddr *i_ipaddr, *i_next;
+ struct rds_ib_device *rds_ibdev = container_of(work,
+ struct rds_ib_device, free_work);
+
+ if (rds_ibdev->mr_pool)
+ rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
+ if (rds_ibdev->mr)
+ ib_dereg_mr(rds_ibdev->mr);
+ if (rds_ibdev->pd)
+ ib_dealloc_pd(rds_ibdev->pd);
+
+ list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
+ list_del(&i_ipaddr->list);
+ kfree(i_ipaddr);
+ }
+
+ kfree(rds_ibdev);
+}
+
+void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
+{
+ BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
+ if (atomic_dec_and_test(&rds_ibdev->refcount))
+ queue_work(rds_wq, &rds_ibdev->free_work);
+}
+
void rds_ib_add_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
@@ -77,11 +136,14 @@ void rds_ib_add_one(struct ib_device *device)
goto free_attr;
}
- rds_ibdev = kmalloc(sizeof *rds_ibdev, GFP_KERNEL);
+ rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
+ ibdev_to_node(device));
if (!rds_ibdev)
goto free_attr;
spin_lock_init(&rds_ibdev->spinlock);
+ atomic_set(&rds_ibdev->refcount, 1);
+ INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
rds_ibdev->max_wrs = dev_attr->max_qp_wr;
rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
@@ -91,68 +153,107 @@ void rds_ib_add_one(struct ib_device *device)
min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
fmr_pool_size;
+ rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
+ rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
+
rds_ibdev->dev = device;
rds_ibdev->pd = ib_alloc_pd(device);
- if (IS_ERR(rds_ibdev->pd))
- goto free_dev;
+ if (IS_ERR(rds_ibdev->pd)) {
+ rds_ibdev->pd = NULL;
+ goto put_dev;
+ }
- rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd,
- IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(rds_ibdev->mr))
- goto err_pd;
+ rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(rds_ibdev->mr)) {
+ rds_ibdev->mr = NULL;
+ goto put_dev;
+ }
rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
if (IS_ERR(rds_ibdev->mr_pool)) {
rds_ibdev->mr_pool = NULL;
- goto err_mr;
+ goto put_dev;
}
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
- list_add_tail(&rds_ibdev->list, &rds_ib_devices);
+
+ down_write(&rds_ib_devices_lock);
+ list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
+ up_write(&rds_ib_devices_lock);
+ atomic_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
+ atomic_inc(&rds_ibdev->refcount);
- goto free_attr;
+ rds_ib_nodev_connect();
-err_mr:
- ib_dereg_mr(rds_ibdev->mr);
-err_pd:
- ib_dealloc_pd(rds_ibdev->pd);
-free_dev:
- kfree(rds_ibdev);
+put_dev:
+ rds_ib_dev_put(rds_ibdev);
free_attr:
kfree(dev_attr);
}
+/*
+ * New connections use this to find the device to associate with the
+ * connection. It's not in the fast path so we're not concerned about the
+ * performance of the IB call. (As of this writing, it uses an interrupt
+ * blocking spinlock to serialize walking a per-device list of all registered
+ * clients.)
+ *
+ * RCU is used to handle incoming connections racing with device teardown.
+ * Rather than use a lock to serialize removal from the client_data and
+ * getting a new reference, we use an RCU grace period. The destruction
+ * path removes the device from client_data and then waits for all RCU
+ * readers to finish.
+ *
+ * A new connection can get NULL from this if its arriving on a
+ * device that is in the process of being removed.
+ */
+struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
+{
+ struct rds_ib_device *rds_ibdev;
+
+ rcu_read_lock();
+ rds_ibdev = ib_get_client_data(device, &rds_ib_client);
+ if (rds_ibdev)
+ atomic_inc(&rds_ibdev->refcount);
+ rcu_read_unlock();
+ return rds_ibdev;
+}
+
+/*
+ * The IB stack is letting us know that a device is going away. This can
+ * happen if the underlying HCA driver is removed or if PCI hotplug is removing
+ * the pci function, for example.
+ *
+ * This can be called at any time and can be racing with any other RDS path.
+ */
void rds_ib_remove_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
- struct rds_ib_ipaddr *i_ipaddr, *i_next;
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (!rds_ibdev)
return;
- list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
- list_del(&i_ipaddr->list);
- kfree(i_ipaddr);
- }
+ rds_ib_dev_shutdown(rds_ibdev);
- rds_ib_destroy_conns(rds_ibdev);
+ /* stop connection attempts from getting a reference to this device. */
+ ib_set_client_data(device, &rds_ib_client, NULL);
- if (rds_ibdev->mr_pool)
- rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
-
- ib_dereg_mr(rds_ibdev->mr);
-
- while (ib_dealloc_pd(rds_ibdev->pd)) {
- rdsdebug("Failed to dealloc pd %p\n", rds_ibdev->pd);
- msleep(1);
- }
+ down_write(&rds_ib_devices_lock);
+ list_del_rcu(&rds_ibdev->list);
+ up_write(&rds_ib_devices_lock);
- list_del(&rds_ibdev->list);
- kfree(rds_ibdev);
+ /*
+ * This synchronize rcu is waiting for readers of both the ib
+ * client data and the devices list to finish before we drop
+ * both of those references.
+ */
+ synchronize_rcu();
+ rds_ib_dev_put(rds_ibdev);
+ rds_ib_dev_put(rds_ibdev);
}
struct ib_client rds_ib_client = {
@@ -186,7 +287,7 @@ static int rds_ib_conn_info_visitor(struct rds_connection *conn,
rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
- rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
+ rds_ibdev = ic->rds_ibdev;
iinfo->max_send_wr = ic->i_send_ring.w_nr;
iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo->max_send_sge = rds_ibdev->max_sge;
@@ -248,29 +349,36 @@ static int rds_ib_laddr_check(__be32 addr)
return ret;
}
+static void rds_ib_unregister_client(void)
+{
+ ib_unregister_client(&rds_ib_client);
+ /* wait for rds_ib_dev_free() to complete */
+ flush_workqueue(rds_wq);
+}
+
void rds_ib_exit(void)
{
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
+ rds_ib_unregister_client();
rds_ib_destroy_nodev_conns();
- ib_unregister_client(&rds_ib_client);
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
+ rds_ib_fmr_exit();
}
struct rds_transport rds_ib_transport = {
.laddr_check = rds_ib_laddr_check,
.xmit_complete = rds_ib_xmit_complete,
.xmit = rds_ib_xmit,
- .xmit_cong_map = NULL,
.xmit_rdma = rds_ib_xmit_rdma,
+ .xmit_atomic = rds_ib_xmit_atomic,
.recv = rds_ib_recv,
.conn_alloc = rds_ib_conn_alloc,
.conn_free = rds_ib_conn_free,
.conn_connect = rds_ib_conn_connect,
.conn_shutdown = rds_ib_conn_shutdown,
.inc_copy_to_user = rds_ib_inc_copy_to_user,
- .inc_purge = rds_ib_inc_purge,
.inc_free = rds_ib_inc_free,
.cm_initiate_connect = rds_ib_cm_initiate_connect,
.cm_handle_connect = rds_ib_cm_handle_connect,
@@ -286,16 +394,20 @@ struct rds_transport rds_ib_transport = {
.t_type = RDS_TRANS_IB
};
-int __init rds_ib_init(void)
+int rds_ib_init(void)
{
int ret;
INIT_LIST_HEAD(&rds_ib_devices);
- ret = ib_register_client(&rds_ib_client);
+ ret = rds_ib_fmr_init();
if (ret)
goto out;
+ ret = ib_register_client(&rds_ib_client);
+ if (ret)
+ goto out_fmr_exit;
+
ret = rds_ib_sysctl_init();
if (ret)
goto out_ibreg;
@@ -317,7 +429,9 @@ out_recv:
out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
- ib_unregister_client(&rds_ib_client);
+ rds_ib_unregister_client();
+out_fmr_exit:
+ rds_ib_fmr_exit();
out:
return ret;
}
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 64df4e79b29f..7ad3d57e06a5 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -3,11 +3,13 @@
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
#include "rds.h"
#include "rdma_transport.h"
#define RDS_FMR_SIZE 256
-#define RDS_FMR_POOL_SIZE 4096
+#define RDS_FMR_POOL_SIZE 8192
#define RDS_IB_MAX_SGE 8
#define RDS_IB_RECV_SGE 2
@@ -19,6 +21,9 @@
#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
+#define RDS_IB_RECYCLE_BATCH_COUNT 32
+
+extern struct rw_semaphore rds_ib_devices_lock;
extern struct list_head rds_ib_devices;
/*
@@ -26,20 +31,29 @@ extern struct list_head rds_ib_devices;
* try and minimize the amount of memory tied up both the device and
* socket receive queues.
*/
-/* page offset of the final full frag that fits in the page */
-#define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
struct rds_page_frag {
struct list_head f_item;
- struct page *f_page;
- unsigned long f_offset;
- dma_addr_t f_mapped;
+ struct list_head f_cache_entry;
+ struct scatterlist f_sg;
};
struct rds_ib_incoming {
struct list_head ii_frags;
+ struct list_head ii_cache_entry;
struct rds_incoming ii_inc;
};
+struct rds_ib_cache_head {
+ struct list_head *first;
+ unsigned long count;
+};
+
+struct rds_ib_refill_cache {
+ struct rds_ib_cache_head *percpu;
+ struct list_head *xfer;
+ struct list_head *ready;
+};
+
struct rds_ib_connect_private {
/* Add new fields at the end, and don't permute existing fields. */
__be32 dp_saddr;
@@ -53,8 +67,7 @@ struct rds_ib_connect_private {
};
struct rds_ib_send_work {
- struct rds_message *s_rm;
- struct rds_rdma_op *s_op;
+ void *s_op;
struct ib_send_wr s_wr;
struct ib_sge s_sge[RDS_IB_MAX_SGE];
unsigned long s_queued;
@@ -92,10 +105,11 @@ struct rds_ib_connection {
/* tx */
struct rds_ib_work_ring i_send_ring;
- struct rds_message *i_rm;
+ struct rm_data_op *i_data_op;
struct rds_header *i_send_hdrs;
u64 i_send_hdrs_dma;
struct rds_ib_send_work *i_sends;
+ atomic_t i_signaled_sends;
/* rx */
struct tasklet_struct i_recv_tasklet;
@@ -106,8 +120,9 @@ struct rds_ib_connection {
struct rds_header *i_recv_hdrs;
u64 i_recv_hdrs_dma;
struct rds_ib_recv_work *i_recvs;
- struct rds_page_frag i_frag;
u64 i_ack_recv; /* last ACK received */
+ struct rds_ib_refill_cache i_cache_incs;
+ struct rds_ib_refill_cache i_cache_frags;
/* sending acks */
unsigned long i_ack_flags;
@@ -138,7 +153,6 @@ struct rds_ib_connection {
/* Batched completions */
unsigned int i_unsignaled_wrs;
- long i_unsignaled_bytes;
};
/* This assumes that atomic_t is at least 32 bits */
@@ -164,9 +178,17 @@ struct rds_ib_device {
unsigned int max_fmrs;
int max_sge;
unsigned int max_wrs;
+ unsigned int max_initiator_depth;
+ unsigned int max_responder_resources;
spinlock_t spinlock; /* protect the above */
+ atomic_t refcount;
+ struct work_struct free_work;
};
+#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus)
+#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
+#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
+
/* bits for i_ack_flags */
#define IB_ACK_IN_FLIGHT 0
#define IB_ACK_REQUESTED 1
@@ -202,6 +224,8 @@ struct rds_ib_statistics {
uint64_t s_ib_rdma_mr_pool_flush;
uint64_t s_ib_rdma_mr_pool_wait;
uint64_t s_ib_rdma_mr_pool_depleted;
+ uint64_t s_ib_atomic_cswp;
+ uint64_t s_ib_atomic_fadd;
};
extern struct workqueue_struct *rds_ib_wq;
@@ -243,6 +267,8 @@ static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
extern struct rds_transport rds_ib_transport;
extern void rds_ib_add_one(struct ib_device *device);
extern void rds_ib_remove_one(struct ib_device *device);
+struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
+void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
extern struct ib_client rds_ib_client;
extern unsigned int fmr_pool_size;
@@ -258,7 +284,7 @@ void rds_ib_conn_free(void *arg);
int rds_ib_conn_connect(struct rds_connection *conn);
void rds_ib_conn_shutdown(struct rds_connection *conn);
void rds_ib_state_change(struct sock *sk);
-int __init rds_ib_listen_init(void);
+int rds_ib_listen_init(void);
void rds_ib_listen_stop(void);
void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -275,15 +301,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn,
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
-void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock);
-static inline void rds_ib_destroy_nodev_conns(void)
-{
- __rds_ib_destroy_conns(&ib_nodev_conns, &ib_nodev_conns_lock);
-}
-static inline void rds_ib_destroy_conns(struct rds_ib_device *rds_ibdev)
-{
- __rds_ib_destroy_conns(&rds_ibdev->conn_list, &rds_ibdev->spinlock);
-}
+void rds_ib_destroy_nodev_conns(void);
struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *);
void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
@@ -292,14 +310,16 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
void rds_ib_sync_mr(void *trans_private, int dir);
void rds_ib_free_mr(void *trans_private, int invalidate);
void rds_ib_flush_mrs(void);
+int rds_ib_fmr_init(void);
+void rds_ib_fmr_exit(void);
/* ib_recv.c */
-int __init rds_ib_recv_init(void);
+int rds_ib_recv_init(void);
void rds_ib_recv_exit(void);
int rds_ib_recv(struct rds_connection *conn);
-int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
- gfp_t page_gfp, int prefill);
-void rds_ib_inc_purge(struct rds_incoming *inc);
+int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
+void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill);
void rds_ib_inc_free(struct rds_incoming *inc);
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
@@ -325,17 +345,19 @@ u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
extern wait_queue_head_t rds_ib_ring_empty_wait;
/* ib_send.c */
+char *rds_ib_wc_status_str(enum ib_wc_status status);
void rds_ib_xmit_complete(struct rds_connection *conn);
int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off);
void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_ib_send_init_ring(struct rds_ib_connection *ic);
void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
-int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
+int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
u32 *adv_credits, int need_posted, int max_posted);
+int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
/* ib_stats.c */
DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
@@ -344,7 +366,7 @@ unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
/* ib_sysctl.c */
-int __init rds_ib_sysctl_init(void);
+int rds_ib_sysctl_init(void);
void rds_ib_sysctl_exit(void);
extern unsigned long rds_ib_sysctl_max_send_wr;
extern unsigned long rds_ib_sysctl_max_recv_wr;
@@ -354,28 +376,4 @@ extern unsigned long rds_ib_sysctl_max_recv_allocation;
extern unsigned int rds_ib_sysctl_flow_control;
extern ctl_table rds_ib_sysctl_table[];
-/*
- * Helper functions for getting/setting the header and data SGEs in
- * RDS packets (not RDMA)
- *
- * From version 3.1 onwards, header is in front of data in the sge.
- */
-static inline struct ib_sge *
-rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
-{
- if (ic->conn->c_version > RDS_PROTOCOL_3_0)
- return &sge[0];
- else
- return &sge[1];
-}
-
-static inline struct ib_sge *
-rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
-{
- if (ic->conn->c_version > RDS_PROTOCOL_3_0)
- return &sge[1];
- else
- return &sge[0];
-}
-
#endif
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index f68832798db2..ee369d201a65 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -38,6 +38,36 @@
#include "rds.h"
#include "ib.h"
+static char *rds_ib_event_type_strings[] = {
+#define RDS_IB_EVENT_STRING(foo) \
+ [IB_EVENT_##foo] = __stringify(IB_EVENT_##foo)
+ RDS_IB_EVENT_STRING(CQ_ERR),
+ RDS_IB_EVENT_STRING(QP_FATAL),
+ RDS_IB_EVENT_STRING(QP_REQ_ERR),
+ RDS_IB_EVENT_STRING(QP_ACCESS_ERR),
+ RDS_IB_EVENT_STRING(COMM_EST),
+ RDS_IB_EVENT_STRING(SQ_DRAINED),
+ RDS_IB_EVENT_STRING(PATH_MIG),
+ RDS_IB_EVENT_STRING(PATH_MIG_ERR),
+ RDS_IB_EVENT_STRING(DEVICE_FATAL),
+ RDS_IB_EVENT_STRING(PORT_ACTIVE),
+ RDS_IB_EVENT_STRING(PORT_ERR),
+ RDS_IB_EVENT_STRING(LID_CHANGE),
+ RDS_IB_EVENT_STRING(PKEY_CHANGE),
+ RDS_IB_EVENT_STRING(SM_CHANGE),
+ RDS_IB_EVENT_STRING(SRQ_ERR),
+ RDS_IB_EVENT_STRING(SRQ_LIMIT_REACHED),
+ RDS_IB_EVENT_STRING(QP_LAST_WQE_REACHED),
+ RDS_IB_EVENT_STRING(CLIENT_REREGISTER),
+#undef RDS_IB_EVENT_STRING
+};
+
+static char *rds_ib_event_str(enum ib_event_type type)
+{
+ return rds_str_array(rds_ib_event_type_strings,
+ ARRAY_SIZE(rds_ib_event_type_strings), type);
+};
+
/*
* Set the selected protocol version
*/
@@ -95,7 +125,6 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
{
const struct rds_ib_connect_private *dp = NULL;
struct rds_ib_connection *ic = conn->c_transport_data;
- struct rds_ib_device *rds_ibdev;
struct ib_qp_attr qp_attr;
int err;
@@ -111,11 +140,21 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
}
}
- printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
- &conn->c_faddr,
- RDS_PROTOCOL_MAJOR(conn->c_version),
- RDS_PROTOCOL_MINOR(conn->c_version),
- ic->i_flowctl ? ", flow control" : "");
+ if (conn->c_version < RDS_PROTOCOL(3,1)) {
+ printk(KERN_NOTICE "RDS/IB: Connection to %pI4 version %u.%u failed,"
+ " no longer supported\n",
+ &conn->c_faddr,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version));
+ rds_conn_destroy(conn);
+ return;
+ } else {
+ printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
+ &conn->c_faddr,
+ RDS_PROTOCOL_MAJOR(conn->c_version),
+ RDS_PROTOCOL_MINOR(conn->c_version),
+ ic->i_flowctl ? ", flow control" : "");
+ }
/*
* Init rings and fill recv. this needs to wait until protocol negotiation
@@ -125,7 +164,7 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
rds_ib_recv_init_ring(ic);
/* Post receive buffers - as a side effect, this will update
* the posted credit count. */
- rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
+ rds_ib_recv_refill(conn, 1);
/* Tune RNR behavior */
rds_ib_tune_rnr(ic, &qp_attr);
@@ -135,12 +174,11 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
if (err)
printk(KERN_NOTICE "ib_modify_qp(IB_QP_STATE, RTS): err=%d\n", err);
- /* update ib_device with this local ipaddr & conn */
- rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
- err = rds_ib_update_ipaddr(rds_ibdev, conn->c_laddr);
+ /* update ib_device with this local ipaddr */
+ err = rds_ib_update_ipaddr(ic->rds_ibdev, conn->c_laddr);
if (err)
- printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n", err);
- rds_ib_add_conn(rds_ibdev, conn);
+ printk(KERN_ERR "rds_ib_update_ipaddr failed (%d)\n",
+ err);
/* If the peer gave us the last packet it saw, process this as if
* we had received a regular ACK. */
@@ -153,18 +191,23 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
struct rdma_conn_param *conn_param,
struct rds_ib_connect_private *dp,
- u32 protocol_version)
+ u32 protocol_version,
+ u32 max_responder_resources,
+ u32 max_initiator_depth)
{
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ struct rds_ib_device *rds_ibdev = ic->rds_ibdev;
+
memset(conn_param, 0, sizeof(struct rdma_conn_param));
- /* XXX tune these? */
- conn_param->responder_resources = 1;
- conn_param->initiator_depth = 1;
+
+ conn_param->responder_resources =
+ min_t(u32, rds_ibdev->max_responder_resources, max_responder_resources);
+ conn_param->initiator_depth =
+ min_t(u32, rds_ibdev->max_initiator_depth, max_initiator_depth);
conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
conn_param->rnr_retry_count = 7;
if (dp) {
- struct rds_ib_connection *ic = conn->c_transport_data;
-
memset(dp, 0, sizeof(*dp));
dp->dp_saddr = conn->c_laddr;
dp->dp_daddr = conn->c_faddr;
@@ -189,7 +232,8 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
static void rds_ib_cq_event_handler(struct ib_event *event, void *data)
{
- rdsdebug("event %u data %p\n", event->event, data);
+ rdsdebug("event %u (%s) data %p\n",
+ event->event, rds_ib_event_str(event->event), data);
}
static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
@@ -197,16 +241,18 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
struct rds_connection *conn = data;
struct rds_ib_connection *ic = conn->c_transport_data;
- rdsdebug("conn %p ic %p event %u\n", conn, ic, event->event);
+ rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event,
+ rds_ib_event_str(event->event));
switch (event->event) {
case IB_EVENT_COMM_EST:
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
- rdsdebug("Fatal QP Event %u "
+ rdsdebug("Fatal QP Event %u (%s) "
"- connection %pI4->%pI4, reconnecting\n",
- event->event, &conn->c_laddr, &conn->c_faddr);
+ event->event, rds_ib_event_str(event->event),
+ &conn->c_laddr, &conn->c_faddr);
rds_conn_drop(conn);
break;
}
@@ -224,18 +270,16 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
struct rds_ib_device *rds_ibdev;
int ret;
- /* rds_ib_add_one creates a rds_ib_device object per IB device,
- * and allocates a protection domain, memory range and FMR pool
- * for each. If that fails for any reason, it will not register
- * the rds_ibdev at all.
+ /*
+ * It's normal to see a null device if an incoming connection races
+ * with device removal, so we don't print a warning.
*/
- rds_ibdev = ib_get_client_data(dev, &rds_ib_client);
- if (rds_ibdev == NULL) {
- if (printk_ratelimit())
- printk(KERN_NOTICE "RDS/IB: No client_data for device %s\n",
- dev->name);
+ rds_ibdev = rds_ib_get_client_data(dev);
+ if (!rds_ibdev)
return -EOPNOTSUPP;
- }
+
+ /* add the conn now so that connection establishment has the dev */
+ rds_ib_add_conn(rds_ibdev, conn);
if (rds_ibdev->max_wrs < ic->i_send_ring.w_nr + 1)
rds_ib_ring_resize(&ic->i_send_ring, rds_ibdev->max_wrs - 1);
@@ -306,7 +350,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_send_ring.w_nr *
sizeof(struct rds_header),
&ic->i_send_hdrs_dma, GFP_KERNEL);
- if (ic->i_send_hdrs == NULL) {
+ if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
goto out;
@@ -316,7 +360,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_recv_ring.w_nr *
sizeof(struct rds_header),
&ic->i_recv_hdrs_dma, GFP_KERNEL);
- if (ic->i_recv_hdrs == NULL) {
+ if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
goto out;
@@ -324,22 +368,24 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
&ic->i_ack_dma, GFP_KERNEL);
- if (ic->i_ack == NULL) {
+ if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
goto out;
}
- ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
- if (ic->i_sends == NULL) {
+ ic->i_sends = vmalloc_node(ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work),
+ ibdev_to_node(dev));
+ if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
goto out;
}
memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
- ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
- if (ic->i_recvs == NULL) {
+ ic->i_recvs = vmalloc_node(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work),
+ ibdev_to_node(dev));
+ if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
goto out;
@@ -352,6 +398,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
ic->i_send_cq, ic->i_recv_cq);
out:
+ rds_ib_dev_put(rds_ibdev);
return ret;
}
@@ -409,7 +456,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
struct rds_ib_connection *ic = NULL;
struct rdma_conn_param conn_param;
u32 version;
- int err, destroy = 1;
+ int err = 1, destroy = 1;
/* Check whether the remote protocol version matches ours. */
version = rds_ib_protocol_compatible(event);
@@ -448,7 +495,6 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
/* Wait and see - our connect may still be succeeding */
rds_ib_stats_inc(s_ib_connect_raced);
}
- mutex_unlock(&conn->c_cm_lock);
goto out;
}
@@ -475,24 +521,23 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
err = rds_ib_setup_qp(conn);
if (err) {
rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err);
- mutex_unlock(&conn->c_cm_lock);
goto out;
}
- rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version);
+ rds_ib_cm_fill_conn_param(conn, &conn_param, &dp_rep, version,
+ event->param.conn.responder_resources,
+ event->param.conn.initiator_depth);
/* rdma_accept() calls rdma_reject() internally if it fails */
err = rdma_accept(cm_id, &conn_param);
- mutex_unlock(&conn->c_cm_lock);
- if (err) {
+ if (err)
rds_ib_conn_error(conn, "rdma_accept failed (%d)\n", err);
- goto out;
- }
-
- return 0;
out:
- rdma_reject(cm_id, NULL, 0);
+ if (conn)
+ mutex_unlock(&conn->c_cm_lock);
+ if (err)
+ rdma_reject(cm_id, NULL, 0);
return destroy;
}
@@ -516,8 +561,8 @@ int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id)
goto out;
}
- rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION);
-
+ rds_ib_cm_fill_conn_param(conn, &conn_param, &dp, RDS_PROTOCOL_VERSION,
+ UINT_MAX, UINT_MAX);
ret = rdma_connect(cm_id, &conn_param);
if (ret)
rds_ib_conn_error(conn, "rdma_connect failed (%d)\n", ret);
@@ -601,9 +646,19 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
ic->i_cm_id, err);
}
+ /*
+ * We want to wait for tx and rx completion to finish
+ * before we tear down the connection, but we have to be
+ * careful not to get stuck waiting on a send ring that
+ * only has unsignaled sends in it. We've shutdown new
+ * sends before getting here so by waiting for signaled
+ * sends to complete we're ensured that there will be no
+ * more tx processing.
+ */
wait_event(rds_ib_ring_empty_wait,
- rds_ib_ring_empty(&ic->i_send_ring) &&
- rds_ib_ring_empty(&ic->i_recv_ring));
+ rds_ib_ring_empty(&ic->i_recv_ring) &&
+ (atomic_read(&ic->i_signaled_sends) == 0));
+ tasklet_kill(&ic->i_recv_tasklet);
if (ic->i_send_hdrs)
ib_dma_free_coherent(dev,
@@ -654,9 +709,12 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
BUG_ON(ic->rds_ibdev);
/* Clear pending transmit */
- if (ic->i_rm) {
- rds_message_put(ic->i_rm);
- ic->i_rm = NULL;
+ if (ic->i_data_op) {
+ struct rds_message *rm;
+
+ rm = container_of(ic->i_data_op, struct rds_message, data);
+ rds_message_put(rm);
+ ic->i_data_op = NULL;
}
/* Clear the ACK state */
@@ -690,12 +748,19 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
{
struct rds_ib_connection *ic;
unsigned long flags;
+ int ret;
/* XXX too lazy? */
ic = kzalloc(sizeof(struct rds_ib_connection), GFP_KERNEL);
- if (ic == NULL)
+ if (!ic)
return -ENOMEM;
+ ret = rds_ib_recv_alloc_caches(ic);
+ if (ret) {
+ kfree(ic);
+ return ret;
+ }
+
INIT_LIST_HEAD(&ic->ib_node);
tasklet_init(&ic->i_recv_tasklet, rds_ib_recv_tasklet_fn,
(unsigned long) ic);
@@ -703,6 +768,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&ic->i_ack_lock);
#endif
+ atomic_set(&ic->i_signaled_sends, 0);
/*
* rds_ib_conn_shutdown() waits for these to be emptied so they
@@ -744,6 +810,8 @@ void rds_ib_conn_free(void *arg)
list_del(&ic->ib_node);
spin_unlock_irq(lock_ptr);
+ rds_ib_recv_free_caches(ic);
+
kfree(ic);
}
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index a54cd63f9e35..b5a88415a18e 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -32,11 +32,16 @@
*/
#include <linux/kernel.h>
#include <linux/slab.h>
+#include <linux/rculist.h>
#include "rds.h"
-#include "rdma.h"
#include "ib.h"
+#include "xlist.h"
+struct workqueue_struct *rds_ib_fmr_wq;
+
+static DEFINE_PER_CPU(unsigned long, clean_list_grace);
+#define CLEAN_LIST_BUSY_BIT 0
/*
* This is stored as mr->r_trans_private.
@@ -45,7 +50,11 @@ struct rds_ib_mr {
struct rds_ib_device *device;
struct rds_ib_mr_pool *pool;
struct ib_fmr *fmr;
- struct list_head list;
+
+ struct xlist_head xlist;
+
+ /* unmap_list is for freeing */
+ struct list_head unmap_list;
unsigned int remap_count;
struct scatterlist *sg;
@@ -59,14 +68,16 @@ struct rds_ib_mr {
*/
struct rds_ib_mr_pool {
struct mutex flush_lock; /* serialize fmr invalidate */
- struct work_struct flush_worker; /* flush worker */
+ struct delayed_work flush_worker; /* flush worker */
- spinlock_t list_lock; /* protect variables below */
atomic_t item_count; /* total # of MRs */
atomic_t dirty_count; /* # dirty of MRs */
- struct list_head drop_list; /* MRs that have reached their max_maps limit */
- struct list_head free_list; /* unused MRs */
- struct list_head clean_list; /* unused & unamapped MRs */
+
+ struct xlist_head drop_list; /* MRs that have reached their max_maps limit */
+ struct xlist_head free_list; /* unused MRs */
+ struct xlist_head clean_list; /* global unused & unamapped MRs */
+ wait_queue_head_t flush_wait;
+
atomic_t free_pinned; /* memory pinned by free MRs */
unsigned long max_items;
unsigned long max_items_soft;
@@ -74,7 +85,7 @@ struct rds_ib_mr_pool {
struct ib_fmr_attr fmr_attr;
};
-static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all);
+static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
@@ -83,16 +94,17 @@ static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
struct rds_ib_device *rds_ibdev;
struct rds_ib_ipaddr *i_ipaddr;
- list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
- spin_lock_irq(&rds_ibdev->spinlock);
- list_for_each_entry(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(rds_ibdev, &rds_ib_devices, list) {
+ list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
- spin_unlock_irq(&rds_ibdev->spinlock);
+ atomic_inc(&rds_ibdev->refcount);
+ rcu_read_unlock();
return rds_ibdev;
}
}
- spin_unlock_irq(&rds_ibdev->spinlock);
}
+ rcu_read_unlock();
return NULL;
}
@@ -108,7 +120,7 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
i_ipaddr->ipaddr = ipaddr;
spin_lock_irq(&rds_ibdev->spinlock);
- list_add_tail(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
+ list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
spin_unlock_irq(&rds_ibdev->spinlock);
return 0;
@@ -116,17 +128,24 @@ static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
{
- struct rds_ib_ipaddr *i_ipaddr, *next;
+ struct rds_ib_ipaddr *i_ipaddr;
+ struct rds_ib_ipaddr *to_free = NULL;
+
spin_lock_irq(&rds_ibdev->spinlock);
- list_for_each_entry_safe(i_ipaddr, next, &rds_ibdev->ipaddr_list, list) {
+ list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
if (i_ipaddr->ipaddr == ipaddr) {
- list_del(&i_ipaddr->list);
- kfree(i_ipaddr);
+ list_del_rcu(&i_ipaddr->list);
+ to_free = i_ipaddr;
break;
}
}
spin_unlock_irq(&rds_ibdev->spinlock);
+
+ if (to_free) {
+ synchronize_rcu();
+ kfree(to_free);
+ }
}
int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
@@ -134,8 +153,10 @@ int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
struct rds_ib_device *rds_ibdev_old;
rds_ibdev_old = rds_ib_get_device(ipaddr);
- if (rds_ibdev_old)
+ if (rds_ibdev_old) {
rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
+ rds_ib_dev_put(rds_ibdev_old);
+ }
return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
}
@@ -150,12 +171,13 @@ void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *con
BUG_ON(list_empty(&ic->ib_node));
list_del(&ic->ib_node);
- spin_lock_irq(&rds_ibdev->spinlock);
+ spin_lock(&rds_ibdev->spinlock);
list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
- spin_unlock_irq(&rds_ibdev->spinlock);
+ spin_unlock(&rds_ibdev->spinlock);
spin_unlock_irq(&ib_nodev_conns_lock);
ic->rds_ibdev = rds_ibdev;
+ atomic_inc(&rds_ibdev->refcount);
}
void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
@@ -175,18 +197,18 @@ void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *
spin_unlock(&ib_nodev_conns_lock);
ic->rds_ibdev = NULL;
+ rds_ib_dev_put(rds_ibdev);
}
-void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
+void rds_ib_destroy_nodev_conns(void)
{
struct rds_ib_connection *ic, *_ic;
LIST_HEAD(tmp_list);
/* avoid calling conn_destroy with irqs off */
- spin_lock_irq(list_lock);
- list_splice(list, &tmp_list);
- INIT_LIST_HEAD(list);
- spin_unlock_irq(list_lock);
+ spin_lock_irq(&ib_nodev_conns_lock);
+ list_splice(&ib_nodev_conns, &tmp_list);
+ spin_unlock_irq(&ib_nodev_conns_lock);
list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
rds_conn_destroy(ic->conn);
@@ -200,12 +222,12 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
if (!pool)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->drop_list);
- INIT_LIST_HEAD(&pool->clean_list);
+ INIT_XLIST_HEAD(&pool->free_list);
+ INIT_XLIST_HEAD(&pool->drop_list);
+ INIT_XLIST_HEAD(&pool->clean_list);
mutex_init(&pool->flush_lock);
- spin_lock_init(&pool->list_lock);
- INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
+ init_waitqueue_head(&pool->flush_wait);
+ INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
pool->fmr_attr.max_pages = fmr_message_size;
pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
@@ -233,34 +255,60 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co
void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
{
- flush_workqueue(rds_wq);
- rds_ib_flush_mr_pool(pool, 1);
+ cancel_delayed_work_sync(&pool->flush_worker);
+ rds_ib_flush_mr_pool(pool, 1, NULL);
WARN_ON(atomic_read(&pool->item_count));
WARN_ON(atomic_read(&pool->free_pinned));
kfree(pool);
}
+static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
+ struct rds_ib_mr **ibmr_ret)
+{
+ struct xlist_head *ibmr_xl;
+ ibmr_xl = xlist_del_head_fast(xl);
+ *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
+}
+
static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
{
struct rds_ib_mr *ibmr = NULL;
- unsigned long flags;
+ struct xlist_head *ret;
+ unsigned long *flag;
- spin_lock_irqsave(&pool->list_lock, flags);
- if (!list_empty(&pool->clean_list)) {
- ibmr = list_entry(pool->clean_list.next, struct rds_ib_mr, list);
- list_del_init(&ibmr->list);
- }
- spin_unlock_irqrestore(&pool->list_lock, flags);
+ preempt_disable();
+ flag = &__get_cpu_var(clean_list_grace);
+ set_bit(CLEAN_LIST_BUSY_BIT, flag);
+ ret = xlist_del_head(&pool->clean_list);
+ if (ret)
+ ibmr = list_entry(ret, struct rds_ib_mr, xlist);
+ clear_bit(CLEAN_LIST_BUSY_BIT, flag);
+ preempt_enable();
return ibmr;
}
+static inline void wait_clean_list_grace(void)
+{
+ int cpu;
+ unsigned long *flag;
+
+ for_each_online_cpu(cpu) {
+ flag = &per_cpu(clean_list_grace, cpu);
+ while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
+ cpu_relax();
+ }
+}
+
static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
{
struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
struct rds_ib_mr *ibmr = NULL;
int err = 0, iter = 0;
+ if (atomic_read(&pool->dirty_count) >= pool->max_items / 10)
+ queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
+
while (1) {
ibmr = rds_ib_reuse_fmr(pool);
if (ibmr)
@@ -287,19 +335,24 @@ static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
/* We do have some empty MRs. Flush them out. */
rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, &ibmr);
+ if (ibmr)
+ return ibmr;
}
- ibmr = kzalloc(sizeof(*ibmr), GFP_KERNEL);
+ ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
if (!ibmr) {
err = -ENOMEM;
goto out_no_cigar;
}
+ memset(ibmr, 0, sizeof(*ibmr));
+
ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE),
+ IB_ACCESS_REMOTE_WRITE|
+ IB_ACCESS_REMOTE_ATOMIC),
&pool->fmr_attr);
if (IS_ERR(ibmr->fmr)) {
err = PTR_ERR(ibmr->fmr);
@@ -367,7 +420,8 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
if (page_cnt > fmr_message_size)
return -EINVAL;
- dma_pages = kmalloc(sizeof(u64) * page_cnt, GFP_ATOMIC);
+ dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
+ rdsibdev_to_node(rds_ibdev));
if (!dma_pages)
return -ENOMEM;
@@ -441,7 +495,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
/* FIXME we need a way to tell a r/w MR
* from a r/o MR */
- BUG_ON(in_interrupt());
+ BUG_ON(irqs_disabled());
set_page_dirty(page);
put_page(page);
}
@@ -477,33 +531,109 @@ static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int fr
}
/*
+ * given an xlist of mrs, put them all into the list_head for more processing
+ */
+static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
+{
+ struct rds_ib_mr *ibmr;
+ struct xlist_head splice;
+ struct xlist_head *cur;
+ struct xlist_head *next;
+
+ splice.next = NULL;
+ xlist_splice(xlist, &splice);
+ cur = splice.next;
+ while (cur) {
+ next = cur->next;
+ ibmr = list_entry(cur, struct rds_ib_mr, xlist);
+ list_add_tail(&ibmr->unmap_list, list);
+ cur = next;
+ }
+}
+
+/*
+ * this takes a list head of mrs and turns it into an xlist of clusters.
+ * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
+ * reuse.
+ */
+static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
+ struct list_head *list, struct xlist_head *xlist,
+ struct xlist_head **tail_ret)
+{
+ struct rds_ib_mr *ibmr;
+ struct xlist_head *cur_mr = xlist;
+ struct xlist_head *tail_mr = NULL;
+
+ list_for_each_entry(ibmr, list, unmap_list) {
+ tail_mr = &ibmr->xlist;
+ tail_mr->next = NULL;
+ cur_mr->next = tail_mr;
+ cur_mr = tail_mr;
+ }
+ *tail_ret = tail_mr;
+}
+
+/*
* Flush our pool of MRs.
* At a minimum, all currently unused MRs are unmapped.
* If the number of MRs allocated exceeds the limit, we also try
* to free as many MRs as needed to get back to this limit.
*/
-static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
+static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
+ int free_all, struct rds_ib_mr **ibmr_ret)
{
struct rds_ib_mr *ibmr, *next;
+ struct xlist_head clean_xlist;
+ struct xlist_head *clean_tail;
LIST_HEAD(unmap_list);
LIST_HEAD(fmr_list);
unsigned long unpinned = 0;
- unsigned long flags;
unsigned int nfreed = 0, ncleaned = 0, free_goal;
int ret = 0;
rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
- mutex_lock(&pool->flush_lock);
+ if (ibmr_ret) {
+ DEFINE_WAIT(wait);
+ while(!mutex_trylock(&pool->flush_lock)) {
+ ibmr = rds_ib_reuse_fmr(pool);
+ if (ibmr) {
+ *ibmr_ret = ibmr;
+ finish_wait(&pool->flush_wait, &wait);
+ goto out_nolock;
+ }
+
+ prepare_to_wait(&pool->flush_wait, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (xlist_empty(&pool->clean_list))
+ schedule();
+
+ ibmr = rds_ib_reuse_fmr(pool);
+ if (ibmr) {
+ *ibmr_ret = ibmr;
+ finish_wait(&pool->flush_wait, &wait);
+ goto out_nolock;
+ }
+ }
+ finish_wait(&pool->flush_wait, &wait);
+ } else
+ mutex_lock(&pool->flush_lock);
+
+ if (ibmr_ret) {
+ ibmr = rds_ib_reuse_fmr(pool);
+ if (ibmr) {
+ *ibmr_ret = ibmr;
+ goto out;
+ }
+ }
- spin_lock_irqsave(&pool->list_lock, flags);
/* Get the list of all MRs to be dropped. Ordering matters -
- * we want to put drop_list ahead of free_list. */
- list_splice_init(&pool->free_list, &unmap_list);
- list_splice_init(&pool->drop_list, &unmap_list);
+ * we want to put drop_list ahead of free_list.
+ */
+ xlist_append_to_list(&pool->drop_list, &unmap_list);
+ xlist_append_to_list(&pool->free_list, &unmap_list);
if (free_all)
- list_splice_init(&pool->clean_list, &unmap_list);
- spin_unlock_irqrestore(&pool->list_lock, flags);
+ xlist_append_to_list(&pool->clean_list, &unmap_list);
free_goal = rds_ib_flush_goal(pool, free_all);
@@ -511,19 +641,20 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
goto out;
/* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
- list_for_each_entry(ibmr, &unmap_list, list)
+ list_for_each_entry(ibmr, &unmap_list, unmap_list)
list_add(&ibmr->fmr->list, &fmr_list);
+
ret = ib_unmap_fmr(&fmr_list);
if (ret)
printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
/* Now we can destroy the DMA mapping and unpin any pages */
- list_for_each_entry_safe(ibmr, next, &unmap_list, list) {
+ list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
unpinned += ibmr->sg_len;
__rds_ib_teardown_mr(ibmr);
if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
rds_ib_stats_inc(s_ib_rdma_mr_free);
- list_del(&ibmr->list);
+ list_del(&ibmr->unmap_list);
ib_dealloc_fmr(ibmr->fmr);
kfree(ibmr);
nfreed++;
@@ -531,9 +662,27 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
ncleaned++;
}
- spin_lock_irqsave(&pool->list_lock, flags);
- list_splice(&unmap_list, &pool->clean_list);
- spin_unlock_irqrestore(&pool->list_lock, flags);
+ if (!list_empty(&unmap_list)) {
+ /* we have to make sure that none of the things we're about
+ * to put on the clean list would race with other cpus trying
+ * to pull items off. The xlist would explode if we managed to
+ * remove something from the clean list and then add it back again
+ * while another CPU was spinning on that same item in xlist_del_head.
+ *
+ * This is pretty unlikely, but just in case wait for an xlist grace period
+ * here before adding anything back into the clean list.
+ */
+ wait_clean_list_grace();
+
+ list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
+ if (ibmr_ret)
+ refill_local(pool, &clean_xlist, ibmr_ret);
+
+ /* refill_local may have emptied our list */
+ if (!xlist_empty(&clean_xlist))
+ xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
+
+ }
atomic_sub(unpinned, &pool->free_pinned);
atomic_sub(ncleaned, &pool->dirty_count);
@@ -541,14 +690,35 @@ static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all)
out:
mutex_unlock(&pool->flush_lock);
+ if (waitqueue_active(&pool->flush_wait))
+ wake_up(&pool->flush_wait);
+out_nolock:
return ret;
}
+int rds_ib_fmr_init(void)
+{
+ rds_ib_fmr_wq = create_workqueue("rds_fmr_flushd");
+ if (!rds_ib_fmr_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/*
+ * By the time this is called all the IB devices should have been torn down and
+ * had their pools freed. As each pool is freed its work struct is waited on,
+ * so the pool flushing work queue should be idle by the time we get here.
+ */
+void rds_ib_fmr_exit(void)
+{
+ destroy_workqueue(rds_ib_fmr_wq);
+}
+
static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
{
- struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker);
+ struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, NULL);
}
void rds_ib_free_mr(void *trans_private, int invalidate)
@@ -556,47 +726,49 @@ void rds_ib_free_mr(void *trans_private, int invalidate)
struct rds_ib_mr *ibmr = trans_private;
struct rds_ib_device *rds_ibdev = ibmr->device;
struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
- unsigned long flags;
rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
/* Return it to the pool's free list */
- spin_lock_irqsave(&pool->list_lock, flags);
if (ibmr->remap_count >= pool->fmr_attr.max_maps)
- list_add(&ibmr->list, &pool->drop_list);
+ xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
else
- list_add(&ibmr->list, &pool->free_list);
+ xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
atomic_add(ibmr->sg_len, &pool->free_pinned);
atomic_inc(&pool->dirty_count);
- spin_unlock_irqrestore(&pool->list_lock, flags);
/* If we've pinned too many pages, request a flush */
if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
atomic_read(&pool->dirty_count) >= pool->max_items / 10)
- queue_work(rds_wq, &pool->flush_worker);
+ queue_delayed_work(rds_ib_fmr_wq, &pool->flush_worker, 10);
if (invalidate) {
if (likely(!in_interrupt())) {
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, NULL);
} else {
/* We get here if the user created a MR marked
* as use_once and invalidate at the same time. */
- queue_work(rds_wq, &pool->flush_worker);
+ queue_delayed_work(rds_ib_fmr_wq,
+ &pool->flush_worker, 10);
}
}
+
+ rds_ib_dev_put(rds_ibdev);
}
void rds_ib_flush_mrs(void)
{
struct rds_ib_device *rds_ibdev;
+ down_read(&rds_ib_devices_lock);
list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
if (pool)
- rds_ib_flush_mr_pool(pool, 0);
+ rds_ib_flush_mr_pool(pool, 0, NULL);
}
+ up_read(&rds_ib_devices_lock);
}
void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
@@ -628,6 +800,7 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
ibmr->device = rds_ibdev;
+ rds_ibdev = NULL;
out:
if (ret) {
@@ -635,5 +808,8 @@ void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
rds_ib_free_mr(ibmr, 0);
ibmr = ERR_PTR(ret);
}
+ if (rds_ibdev)
+ rds_ib_dev_put(rds_ibdev);
return ibmr;
}
+
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c74e9904a6b2..e29e0ca32f74 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -43,42 +43,6 @@ static struct kmem_cache *rds_ib_incoming_slab;
static struct kmem_cache *rds_ib_frag_slab;
static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
-static void rds_ib_frag_drop_page(struct rds_page_frag *frag)
-{
- rdsdebug("frag %p page %p\n", frag, frag->f_page);
- __free_page(frag->f_page);
- frag->f_page = NULL;
-}
-
-static void rds_ib_frag_free(struct rds_page_frag *frag)
-{
- rdsdebug("frag %p page %p\n", frag, frag->f_page);
- BUG_ON(frag->f_page != NULL);
- kmem_cache_free(rds_ib_frag_slab, frag);
-}
-
-/*
- * We map a page at a time. Its fragments are posted in order. This
- * is called in fragment order as the fragments get send completion events.
- * Only the last frag in the page performs the unmapping.
- *
- * It's OK for ring cleanup to call this in whatever order it likes because
- * DMA is not in flight and so we can unmap while other ring entries still
- * hold page references in their frags.
- */
-static void rds_ib_recv_unmap_page(struct rds_ib_connection *ic,
- struct rds_ib_recv_work *recv)
-{
- struct rds_page_frag *frag = recv->r_frag;
-
- rdsdebug("recv %p frag %p page %p\n", recv, frag, frag->f_page);
- if (frag->f_mapped)
- ib_dma_unmap_page(ic->i_cm_id->device,
- frag->f_mapped,
- RDS_FRAG_SIZE, DMA_FROM_DEVICE);
- frag->f_mapped = 0;
-}
-
void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
{
struct rds_ib_recv_work *recv;
@@ -95,16 +59,161 @@ void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
recv->r_wr.sg_list = recv->r_sge;
recv->r_wr.num_sge = RDS_IB_RECV_SGE;
- sge = rds_ib_data_sge(ic, recv->r_sge);
+ sge = &recv->r_sge[0];
+ sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
+ sge->length = sizeof(struct rds_header);
+ sge->lkey = ic->i_mr->lkey;
+
+ sge = &recv->r_sge[1];
sge->addr = 0;
sge->length = RDS_FRAG_SIZE;
sge->lkey = ic->i_mr->lkey;
+ }
+}
- sge = rds_ib_header_sge(ic, recv->r_sge);
- sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
- sge->length = sizeof(struct rds_header);
- sge->lkey = ic->i_mr->lkey;
+/*
+ * The entire 'from' list, including the from element itself, is put on
+ * to the tail of the 'to' list.
+ */
+static void list_splice_entire_tail(struct list_head *from,
+ struct list_head *to)
+{
+ struct list_head *from_last = from->prev;
+
+ list_splice_tail(from_last, to);
+ list_add_tail(from_last, to);
+}
+
+static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
+{
+ struct list_head *tmp;
+
+ tmp = xchg(&cache->xfer, NULL);
+ if (tmp) {
+ if (cache->ready)
+ list_splice_entire_tail(tmp, cache->ready);
+ else
+ cache->ready = tmp;
+ }
+}
+
+static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
+{
+ struct rds_ib_cache_head *head;
+ int cpu;
+
+ cache->percpu = alloc_percpu(struct rds_ib_cache_head);
+ if (!cache->percpu)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ head = per_cpu_ptr(cache->percpu, cpu);
+ head->first = NULL;
+ head->count = 0;
+ }
+ cache->xfer = NULL;
+ cache->ready = NULL;
+
+ return 0;
+}
+
+int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
+{
+ int ret;
+
+ ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
+ if (!ret) {
+ ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
+ if (ret)
+ free_percpu(ic->i_cache_incs.percpu);
}
+
+ return ret;
+}
+
+static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache,
+ struct list_head *caller_list)
+{
+ struct rds_ib_cache_head *head;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ head = per_cpu_ptr(cache->percpu, cpu);
+ if (head->first) {
+ list_splice_entire_tail(head->first, caller_list);
+ head->first = NULL;
+ }
+ }
+
+ if (cache->ready) {
+ list_splice_entire_tail(cache->ready, caller_list);
+ cache->ready = NULL;
+ }
+}
+
+void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
+{
+ struct rds_ib_incoming *inc;
+ struct rds_ib_incoming *inc_tmp;
+ struct rds_page_frag *frag;
+ struct rds_page_frag *frag_tmp;
+ LIST_HEAD(list);
+
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
+ rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list);
+ free_percpu(ic->i_cache_incs.percpu);
+
+ list_for_each_entry_safe(inc, inc_tmp, &list, ii_cache_entry) {
+ list_del(&inc->ii_cache_entry);
+ WARN_ON(!list_empty(&inc->ii_frags));
+ kmem_cache_free(rds_ib_incoming_slab, inc);
+ }
+
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
+ rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list);
+ free_percpu(ic->i_cache_frags.percpu);
+
+ list_for_each_entry_safe(frag, frag_tmp, &list, f_cache_entry) {
+ list_del(&frag->f_cache_entry);
+ WARN_ON(!list_empty(&frag->f_item));
+ kmem_cache_free(rds_ib_frag_slab, frag);
+ }
+}
+
+/* fwd decl */
+static void rds_ib_recv_cache_put(struct list_head *new_item,
+ struct rds_ib_refill_cache *cache);
+static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache);
+
+
+/* Recycle frag and attached recv buffer f_sg */
+static void rds_ib_frag_free(struct rds_ib_connection *ic,
+ struct rds_page_frag *frag)
+{
+ rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
+
+ rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags);
+}
+
+/* Recycle inc after freeing attached frags */
+void rds_ib_inc_free(struct rds_incoming *inc)
+{
+ struct rds_ib_incoming *ibinc;
+ struct rds_page_frag *frag;
+ struct rds_page_frag *pos;
+ struct rds_ib_connection *ic = inc->i_conn->c_transport_data;
+
+ ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
+
+ /* Free attached frags */
+ list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
+ list_del_init(&frag->f_item);
+ rds_ib_frag_free(ic, frag);
+ }
+ BUG_ON(!list_empty(&ibinc->ii_frags));
+
+ rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
+ rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs);
}
static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
@@ -115,10 +224,8 @@ static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
recv->r_ibinc = NULL;
}
if (recv->r_frag) {
- rds_ib_recv_unmap_page(ic, recv);
- if (recv->r_frag->f_page)
- rds_ib_frag_drop_page(recv->r_frag);
- rds_ib_frag_free(recv->r_frag);
+ ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
+ rds_ib_frag_free(ic, recv->r_frag);
recv->r_frag = NULL;
}
}
@@ -129,84 +236,111 @@ void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
for (i = 0; i < ic->i_recv_ring.w_nr; i++)
rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
-
- if (ic->i_frag.f_page)
- rds_ib_frag_drop_page(&ic->i_frag);
}
-static int rds_ib_recv_refill_one(struct rds_connection *conn,
- struct rds_ib_recv_work *recv,
- gfp_t kptr_gfp, gfp_t page_gfp)
+static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic,
+ gfp_t slab_mask)
{
- struct rds_ib_connection *ic = conn->c_transport_data;
- dma_addr_t dma_addr;
- struct ib_sge *sge;
- int ret = -ENOMEM;
+ struct rds_ib_incoming *ibinc;
+ struct list_head *cache_item;
+ int avail_allocs;
- if (recv->r_ibinc == NULL) {
- if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
+ cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs);
+ if (cache_item) {
+ ibinc = container_of(cache_item, struct rds_ib_incoming, ii_cache_entry);
+ } else {
+ avail_allocs = atomic_add_unless(&rds_ib_allocation,
+ 1, rds_ib_sysctl_max_recv_allocation);
+ if (!avail_allocs) {
rds_ib_stats_inc(s_ib_rx_alloc_limit);
- goto out;
+ return NULL;
}
- recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab,
- kptr_gfp);
- if (recv->r_ibinc == NULL) {
+ ibinc = kmem_cache_alloc(rds_ib_incoming_slab, slab_mask);
+ if (!ibinc) {
atomic_dec(&rds_ib_allocation);
- goto out;
+ return NULL;
}
- INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
- rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
}
+ INIT_LIST_HEAD(&ibinc->ii_frags);
+ rds_inc_init(&ibinc->ii_inc, ic->conn, ic->conn->c_faddr);
- if (recv->r_frag == NULL) {
- recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, kptr_gfp);
- if (recv->r_frag == NULL)
- goto out;
- INIT_LIST_HEAD(&recv->r_frag->f_item);
- recv->r_frag->f_page = NULL;
+ return ibinc;
+}
+
+static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic,
+ gfp_t slab_mask, gfp_t page_mask)
+{
+ struct rds_page_frag *frag;
+ struct list_head *cache_item;
+ int ret;
+
+ cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags);
+ if (cache_item) {
+ frag = container_of(cache_item, struct rds_page_frag, f_cache_entry);
+ } else {
+ frag = kmem_cache_alloc(rds_ib_frag_slab, slab_mask);
+ if (!frag)
+ return NULL;
+
+ sg_init_table(&frag->f_sg, 1);
+ ret = rds_page_remainder_alloc(&frag->f_sg,
+ RDS_FRAG_SIZE, page_mask);
+ if (ret) {
+ kmem_cache_free(rds_ib_frag_slab, frag);
+ return NULL;
+ }
}
- if (ic->i_frag.f_page == NULL) {
- ic->i_frag.f_page = alloc_page(page_gfp);
- if (ic->i_frag.f_page == NULL)
- goto out;
- ic->i_frag.f_offset = 0;
+ INIT_LIST_HEAD(&frag->f_item);
+
+ return frag;
+}
+
+static int rds_ib_recv_refill_one(struct rds_connection *conn,
+ struct rds_ib_recv_work *recv, int prefill)
+{
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ struct ib_sge *sge;
+ int ret = -ENOMEM;
+ gfp_t slab_mask = GFP_NOWAIT;
+ gfp_t page_mask = GFP_NOWAIT;
+
+ if (prefill) {
+ slab_mask = GFP_KERNEL;
+ page_mask = GFP_HIGHUSER;
}
- dma_addr = ib_dma_map_page(ic->i_cm_id->device,
- ic->i_frag.f_page,
- ic->i_frag.f_offset,
- RDS_FRAG_SIZE,
- DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(ic->i_cm_id->device, dma_addr))
- goto out;
+ if (!ic->i_cache_incs.ready)
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_incs);
+ if (!ic->i_cache_frags.ready)
+ rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
/*
- * Once we get the RDS_PAGE_LAST_OFF frag then rds_ib_frag_unmap()
- * must be called on this recv. This happens as completions hit
- * in order or on connection shutdown.
+ * ibinc was taken from recv if recv contained the start of a message.
+ * recvs that were continuations will still have this allocated.
*/
- recv->r_frag->f_page = ic->i_frag.f_page;
- recv->r_frag->f_offset = ic->i_frag.f_offset;
- recv->r_frag->f_mapped = dma_addr;
+ if (!recv->r_ibinc) {
+ recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask);
+ if (!recv->r_ibinc)
+ goto out;
+ }
- sge = rds_ib_data_sge(ic, recv->r_sge);
- sge->addr = dma_addr;
- sge->length = RDS_FRAG_SIZE;
+ WARN_ON(recv->r_frag); /* leak! */
+ recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask);
+ if (!recv->r_frag)
+ goto out;
+
+ ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
+ 1, DMA_FROM_DEVICE);
+ WARN_ON(ret != 1);
- sge = rds_ib_header_sge(ic, recv->r_sge);
+ sge = &recv->r_sge[0];
sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
sge->length = sizeof(struct rds_header);
- get_page(recv->r_frag->f_page);
-
- if (ic->i_frag.f_offset < RDS_PAGE_LAST_OFF) {
- ic->i_frag.f_offset += RDS_FRAG_SIZE;
- } else {
- put_page(ic->i_frag.f_page);
- ic->i_frag.f_page = NULL;
- ic->i_frag.f_offset = 0;
- }
+ sge = &recv->r_sge[1];
+ sge->addr = sg_dma_address(&recv->r_frag->f_sg);
+ sge->length = sg_dma_len(&recv->r_frag->f_sg);
ret = 0;
out:
@@ -216,13 +350,11 @@ out:
/*
* This tries to allocate and post unused work requests after making sure that
* they have all the allocations they need to queue received fragments into
- * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
- * pairs don't go unmatched.
+ * sockets.
*
* -1 is returned if posting fails due to temporary resource exhaustion.
*/
-int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
- gfp_t page_gfp, int prefill)
+void rds_ib_recv_refill(struct rds_connection *conn, int prefill)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_recv_work *recv;
@@ -236,28 +368,25 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
if (pos >= ic->i_recv_ring.w_nr) {
printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
pos);
- ret = -EINVAL;
break;
}
recv = &ic->i_recvs[pos];
- ret = rds_ib_recv_refill_one(conn, recv, kptr_gfp, page_gfp);
+ ret = rds_ib_recv_refill_one(conn, recv, prefill);
if (ret) {
- ret = -1;
break;
}
/* XXX when can this fail? */
ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
- recv->r_ibinc, recv->r_frag->f_page,
- (long) recv->r_frag->f_mapped, ret);
+ recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
+ (long) sg_dma_address(&recv->r_frag->f_sg), ret);
if (ret) {
rds_ib_conn_error(conn, "recv post on "
"%pI4 returned %d, disconnecting and "
"reconnecting\n", &conn->c_faddr,
ret);
- ret = -1;
break;
}
@@ -270,37 +399,73 @@ int rds_ib_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
if (ret)
rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
- return ret;
}
-void rds_ib_inc_purge(struct rds_incoming *inc)
+/*
+ * We want to recycle several types of recv allocations, like incs and frags.
+ * To use this, the *_free() function passes in the ptr to a list_head within
+ * the recyclee, as well as the cache to put it on.
+ *
+ * First, we put the memory on a percpu list. When this reaches a certain size,
+ * We move it to an intermediate non-percpu list in a lockless manner, with some
+ * xchg/compxchg wizardry.
+ *
+ * N.B. Instead of a list_head as the anchor, we use a single pointer, which can
+ * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
+ * list_empty() will return true with one element is actually present.
+ */
+static void rds_ib_recv_cache_put(struct list_head *new_item,
+ struct rds_ib_refill_cache *cache)
{
- struct rds_ib_incoming *ibinc;
- struct rds_page_frag *frag;
- struct rds_page_frag *pos;
+ unsigned long flags;
+ struct rds_ib_cache_head *chp;
+ struct list_head *old;
- ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
- rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
+ local_irq_save(flags);
- list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
- list_del_init(&frag->f_item);
- rds_ib_frag_drop_page(frag);
- rds_ib_frag_free(frag);
- }
+ chp = per_cpu_ptr(cache->percpu, smp_processor_id());
+ if (!chp->first)
+ INIT_LIST_HEAD(new_item);
+ else /* put on front */
+ list_add_tail(new_item, chp->first);
+ chp->first = new_item;
+ chp->count++;
+
+ if (chp->count < RDS_IB_RECYCLE_BATCH_COUNT)
+ goto end;
+
+ /*
+ * Return our per-cpu first list to the cache's xfer by atomically
+ * grabbing the current xfer list, appending it to our per-cpu list,
+ * and then atomically returning that entire list back to the
+ * cache's xfer list as long as it's still empty.
+ */
+ do {
+ old = xchg(&cache->xfer, NULL);
+ if (old)
+ list_splice_entire_tail(old, chp->first);
+ old = cmpxchg(&cache->xfer, NULL, chp->first);
+ } while (old);
+
+ chp->first = NULL;
+ chp->count = 0;
+end:
+ local_irq_restore(flags);
}
-void rds_ib_inc_free(struct rds_incoming *inc)
+static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache)
{
- struct rds_ib_incoming *ibinc;
-
- ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
+ struct list_head *head = cache->ready;
+
+ if (head) {
+ if (!list_empty(head)) {
+ cache->ready = head->next;
+ list_del_init(head);
+ } else
+ cache->ready = NULL;
+ }
- rds_ib_inc_purge(inc);
- rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
- BUG_ON(!list_empty(&ibinc->ii_frags));
- kmem_cache_free(rds_ib_incoming_slab, ibinc);
- atomic_dec(&rds_ib_allocation);
- BUG_ON(atomic_read(&rds_ib_allocation) < 0);
+ return head;
}
int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
@@ -336,13 +501,13 @@ int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
to_copy = min_t(unsigned long, to_copy, len - copied);
rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
- "[%p, %lu] + %lu\n",
+ "[%p, %u] + %lu\n",
to_copy, iov->iov_base, iov->iov_len, iov_off,
- frag->f_page, frag->f_offset, frag_off);
+ sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
/* XXX needs + offset for multiple recvs per page */
- ret = rds_page_copy_to_user(frag->f_page,
- frag->f_offset + frag_off,
+ ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
+ frag->f_sg.offset + frag_off,
iov->iov_base + iov_off,
to_copy);
if (ret) {
@@ -557,47 +722,6 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
return rds_ib_get_ack(ic);
}
-static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
- struct rds_ib_recv_work *recv,
- u32 data_len)
-{
- struct rds_ib_connection *ic = conn->c_transport_data;
- void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
- void *addr;
- u32 misplaced_hdr_bytes;
-
- /*
- * Support header at the front (RDS 3.1+) as well as header-at-end.
- *
- * Cases:
- * 1) header all in header buff (great!)
- * 2) header all in data page (copy all to header buff)
- * 3) header split across hdr buf + data page
- * (move bit in hdr buff to end before copying other bit from data page)
- */
- if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
- return hdr_buff;
-
- if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
- addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
- memcpy(hdr_buff,
- addr + recv->r_frag->f_offset + data_len,
- sizeof(struct rds_header));
- kunmap_atomic(addr, KM_SOFTIRQ0);
- return hdr_buff;
- }
-
- misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
-
- memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
-
- addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
- memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
- sizeof(struct rds_header) - misplaced_hdr_bytes);
- kunmap_atomic(addr, KM_SOFTIRQ0);
- return hdr_buff;
-}
-
/*
* It's kind of lame that we're copying from the posted receive pages into
* long-lived bitmaps. We could have posted the bitmaps and rdma written into
@@ -639,7 +763,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
- addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0);
+ addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
src = addr + frag_off;
dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -710,7 +834,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
}
data_len -= sizeof(struct rds_header);
- ihdr = rds_ib_get_header(conn, recv, data_len);
+ ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
/* Validate the checksum. */
if (!rds_message_verify_checksum(ihdr)) {
@@ -742,12 +866,12 @@ static void rds_ib_process_recv(struct rds_connection *conn,
* the inc is freed. We don't go that route, so we have to drop the
* page ref ourselves. We can't just leave the page on the recv
* because that confuses the dma mapping of pages and each recv's use
- * of a partial page. We can leave the frag, though, it will be
- * reused.
+ * of a partial page.
*
* FIXME: Fold this into the code path below.
*/
- rds_ib_frag_drop_page(recv->r_frag);
+ rds_ib_frag_free(ic, recv->r_frag);
+ recv->r_frag = NULL;
return;
}
@@ -757,7 +881,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
* into the inc and save the inc so we can hang upcoming fragments
* off its list.
*/
- if (ibinc == NULL) {
+ if (!ibinc) {
ibinc = recv->r_ibinc;
recv->r_ibinc = NULL;
ic->i_ibinc = ibinc;
@@ -842,32 +966,38 @@ static inline void rds_poll_cq(struct rds_ib_connection *ic,
struct rds_ib_recv_work *recv;
while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
- rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
- (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
+ rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+ (unsigned long long)wc.wr_id, wc.status,
+ rds_ib_wc_status_str(wc.status), wc.byte_len,
be32_to_cpu(wc.ex.imm_data));
rds_ib_stats_inc(s_ib_rx_cq_event);
recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
- rds_ib_recv_unmap_page(ic, recv);
+ ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
/*
* Also process recvs in connecting state because it is possible
* to get a recv completion _before_ the rdmacm ESTABLISHED
* event is processed.
*/
- if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
+ if (wc.status == IB_WC_SUCCESS) {
+ rds_ib_process_recv(conn, recv, wc.byte_len, state);
+ } else {
/* We expect errors as the qp is drained during shutdown */
- if (wc.status == IB_WC_SUCCESS) {
- rds_ib_process_recv(conn, recv, wc.byte_len, state);
- } else {
- rds_ib_conn_error(conn, "recv completion on "
- "%pI4 had status %u, disconnecting and "
- "reconnecting\n", &conn->c_faddr,
- wc.status);
- }
+ if (rds_conn_up(conn) || rds_conn_connecting(conn))
+ rds_ib_conn_error(conn, "recv completion on %pI4 had "
+ "status %u (%s), disconnecting and "
+ "reconnecting\n", &conn->c_faddr,
+ wc.status,
+ rds_ib_wc_status_str(wc.status));
}
+ /*
+ * It's very important that we only free this ring entry if we've truly
+ * freed the resources allocated to the entry. The refilling path can
+ * leak if we don't.
+ */
rds_ib_ring_free(&ic->i_recv_ring, 1);
}
}
@@ -897,11 +1027,8 @@ void rds_ib_recv_tasklet_fn(unsigned long data)
if (rds_ib_ring_empty(&ic->i_recv_ring))
rds_ib_stats_inc(s_ib_rx_ring_empty);
- /*
- * If the ring is running low, then schedule the thread to refill.
- */
if (rds_ib_ring_low(&ic->i_recv_ring))
- queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+ rds_ib_recv_refill(conn, 0);
}
int rds_ib_recv(struct rds_connection *conn)
@@ -910,25 +1037,13 @@ int rds_ib_recv(struct rds_connection *conn)
int ret = 0;
rdsdebug("conn %p\n", conn);
-
- /*
- * If we get a temporary posting failure in this context then
- * we're really low and we want the caller to back off for a bit.
- */
- mutex_lock(&ic->i_recv_mutex);
- if (rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 0))
- ret = -ENOMEM;
- else
- rds_ib_stats_inc(s_ib_rx_refill_from_thread);
- mutex_unlock(&ic->i_recv_mutex);
-
if (rds_conn_up(conn))
rds_ib_attempt_ack(ic);
return ret;
}
-int __init rds_ib_recv_init(void)
+int rds_ib_recv_init(void)
{
struct sysinfo si;
int ret = -ENOMEM;
@@ -939,14 +1054,14 @@ int __init rds_ib_recv_init(void)
rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
sizeof(struct rds_ib_incoming),
- 0, 0, NULL);
- if (rds_ib_incoming_slab == NULL)
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!rds_ib_incoming_slab)
goto out;
rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
sizeof(struct rds_page_frag),
- 0, 0, NULL);
- if (rds_ib_frag_slab == NULL)
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!rds_ib_frag_slab)
kmem_cache_destroy(rds_ib_incoming_slab);
else
ret = 0;
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 17fa80803ab0..71f373c421bc 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -36,11 +36,49 @@
#include <linux/dmapool.h>
#include "rds.h"
-#include "rdma.h"
#include "ib.h"
-static void rds_ib_send_rdma_complete(struct rds_message *rm,
- int wc_status)
+static char *rds_ib_wc_status_strings[] = {
+#define RDS_IB_WC_STATUS_STR(foo) \
+ [IB_WC_##foo] = __stringify(IB_WC_##foo)
+ RDS_IB_WC_STATUS_STR(SUCCESS),
+ RDS_IB_WC_STATUS_STR(LOC_LEN_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_QP_OP_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_EEC_OP_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_PROT_ERR),
+ RDS_IB_WC_STATUS_STR(WR_FLUSH_ERR),
+ RDS_IB_WC_STATUS_STR(MW_BIND_ERR),
+ RDS_IB_WC_STATUS_STR(BAD_RESP_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_ACCESS_ERR),
+ RDS_IB_WC_STATUS_STR(REM_INV_REQ_ERR),
+ RDS_IB_WC_STATUS_STR(REM_ACCESS_ERR),
+ RDS_IB_WC_STATUS_STR(REM_OP_ERR),
+ RDS_IB_WC_STATUS_STR(RETRY_EXC_ERR),
+ RDS_IB_WC_STATUS_STR(RNR_RETRY_EXC_ERR),
+ RDS_IB_WC_STATUS_STR(LOC_RDD_VIOL_ERR),
+ RDS_IB_WC_STATUS_STR(REM_INV_RD_REQ_ERR),
+ RDS_IB_WC_STATUS_STR(REM_ABORT_ERR),
+ RDS_IB_WC_STATUS_STR(INV_EECN_ERR),
+ RDS_IB_WC_STATUS_STR(INV_EEC_STATE_ERR),
+ RDS_IB_WC_STATUS_STR(FATAL_ERR),
+ RDS_IB_WC_STATUS_STR(RESP_TIMEOUT_ERR),
+ RDS_IB_WC_STATUS_STR(GENERAL_ERR),
+#undef RDS_IB_WC_STATUS_STR
+};
+
+char *rds_ib_wc_status_str(enum ib_wc_status status)
+{
+ return rds_str_array(rds_ib_wc_status_strings,
+ ARRAY_SIZE(rds_ib_wc_status_strings), status);
+}
+
+/*
+ * Convert IB-specific error message to RDS error message and call core
+ * completion handler.
+ */
+static void rds_ib_send_complete(struct rds_message *rm,
+ int wc_status,
+ void (*complete)(struct rds_message *rm, int status))
{
int notify_status;
@@ -60,69 +98,125 @@ static void rds_ib_send_rdma_complete(struct rds_message *rm,
notify_status = RDS_RDMA_OTHER_ERROR;
break;
}
- rds_rdma_send_complete(rm, notify_status);
+ complete(rm, notify_status);
+}
+
+static void rds_ib_send_unmap_data(struct rds_ib_connection *ic,
+ struct rm_data_op *op,
+ int wc_status)
+{
+ if (op->op_nents)
+ ib_dma_unmap_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents,
+ DMA_TO_DEVICE);
}
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic,
- struct rds_rdma_op *op)
+ struct rm_rdma_op *op,
+ int wc_status)
{
- if (op->r_mapped) {
+ if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents,
- op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- op->r_mapped = 0;
+ op->op_sg, op->op_nents,
+ op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ op->op_mapped = 0;
}
+
+ /* If the user asked for a completion notification on this
+ * message, we can implement three different semantics:
+ * 1. Notify when we received the ACK on the RDS message
+ * that was queued with the RDMA. This provides reliable
+ * notification of RDMA status at the expense of a one-way
+ * packet delay.
+ * 2. Notify when the IB stack gives us the completion event for
+ * the RDMA operation.
+ * 3. Notify when the IB stack gives us the completion event for
+ * the accompanying RDS messages.
+ * Here, we implement approach #3. To implement approach #2,
+ * we would need to take an event for the rdma WR. To implement #1,
+ * don't call rds_rdma_send_complete at all, and fall back to the notify
+ * handling in the ACK processing code.
+ *
+ * Note: There's no need to explicitly sync any RDMA buffers using
+ * ib_dma_sync_sg_for_cpu - the completion for the RDMA
+ * operation itself unmapped the RDMA buffers, which takes care
+ * of synching.
+ */
+ rds_ib_send_complete(container_of(op, struct rds_message, rdma),
+ wc_status, rds_rdma_send_complete);
+
+ if (op->op_write)
+ rds_stats_add(s_send_rdma_bytes, op->op_bytes);
+ else
+ rds_stats_add(s_recv_rdma_bytes, op->op_bytes);
}
-static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
- struct rds_ib_send_work *send,
- int wc_status)
+static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic,
+ struct rm_atomic_op *op,
+ int wc_status)
{
- struct rds_message *rm = send->s_rm;
-
- rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
-
- ib_dma_unmap_sg(ic->i_cm_id->device,
- rm->m_sg, rm->m_nents,
- DMA_TO_DEVICE);
-
- if (rm->m_rdma_op != NULL) {
- rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
-
- /* If the user asked for a completion notification on this
- * message, we can implement three different semantics:
- * 1. Notify when we received the ACK on the RDS message
- * that was queued with the RDMA. This provides reliable
- * notification of RDMA status at the expense of a one-way
- * packet delay.
- * 2. Notify when the IB stack gives us the completion event for
- * the RDMA operation.
- * 3. Notify when the IB stack gives us the completion event for
- * the accompanying RDS messages.
- * Here, we implement approach #3. To implement approach #2,
- * call rds_rdma_send_complete from the cq_handler. To implement #1,
- * don't call rds_rdma_send_complete at all, and fall back to the notify
- * handling in the ACK processing code.
- *
- * Note: There's no need to explicitly sync any RDMA buffers using
- * ib_dma_sync_sg_for_cpu - the completion for the RDMA
- * operation itself unmapped the RDMA buffers, which takes care
- * of synching.
- */
- rds_ib_send_rdma_complete(rm, wc_status);
+ /* unmap atomic recvbuf */
+ if (op->op_mapped) {
+ ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
+ DMA_FROM_DEVICE);
+ op->op_mapped = 0;
+ }
- if (rm->m_rdma_op->r_write)
- rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes);
- else
- rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes);
+ rds_ib_send_complete(container_of(op, struct rds_message, atomic),
+ wc_status, rds_atomic_send_complete);
+
+ if (op->op_type == RDS_ATOMIC_TYPE_CSWP)
+ rds_ib_stats_inc(s_ib_atomic_cswp);
+ else
+ rds_ib_stats_inc(s_ib_atomic_fadd);
+}
+
+/*
+ * Unmap the resources associated with a struct send_work.
+ *
+ * Returns the rm for no good reason other than it is unobtainable
+ * other than by switching on wr.opcode, currently, and the caller,
+ * the event handler, needs it.
+ */
+static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
+ struct rds_ib_send_work *send,
+ int wc_status)
+{
+ struct rds_message *rm = NULL;
+
+ /* In the error case, wc.opcode sometimes contains garbage */
+ switch (send->s_wr.opcode) {
+ case IB_WR_SEND:
+ if (send->s_op) {
+ rm = container_of(send->s_op, struct rds_message, data);
+ rds_ib_send_unmap_data(ic, send->s_op, wc_status);
+ }
+ break;
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_READ:
+ if (send->s_op) {
+ rm = container_of(send->s_op, struct rds_message, rdma);
+ rds_ib_send_unmap_rdma(ic, send->s_op, wc_status);
+ }
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ if (send->s_op) {
+ rm = container_of(send->s_op, struct rds_message, atomic);
+ rds_ib_send_unmap_atomic(ic, send->s_op, wc_status);
+ }
+ break;
+ default:
+ if (printk_ratelimit())
+ printk(KERN_NOTICE
+ "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
+ __func__, send->s_wr.opcode);
+ break;
}
- /* If anyone waited for this message to get flushed out, wake
- * them up now */
- rds_message_unmapped(rm);
+ send->s_wr.opcode = 0xdead;
- rds_message_put(rm);
- send->s_rm = NULL;
+ return rm;
}
void rds_ib_send_init_ring(struct rds_ib_connection *ic)
@@ -133,23 +227,18 @@ void rds_ib_send_init_ring(struct rds_ib_connection *ic)
for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
struct ib_sge *sge;
- send->s_rm = NULL;
send->s_op = NULL;
send->s_wr.wr_id = i;
send->s_wr.sg_list = send->s_sge;
- send->s_wr.num_sge = 1;
- send->s_wr.opcode = IB_WR_SEND;
- send->s_wr.send_flags = 0;
send->s_wr.ex.imm_data = 0;
- sge = rds_ib_data_sge(ic, send->s_sge);
- sge->lkey = ic->i_mr->lkey;
-
- sge = rds_ib_header_sge(ic, send->s_sge);
+ sge = &send->s_sge[0];
sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
sge->length = sizeof(struct rds_header);
sge->lkey = ic->i_mr->lkey;
+
+ send->s_sge[1].lkey = ic->i_mr->lkey;
}
}
@@ -159,16 +248,24 @@ void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
u32 i;
for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
- if (send->s_wr.opcode == 0xdead)
- continue;
- if (send->s_rm)
- rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
- if (send->s_op)
- rds_ib_send_unmap_rdma(ic, send->s_op);
+ if (send->s_op && send->s_wr.opcode != 0xdead)
+ rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR);
}
}
/*
+ * The only fast path caller always has a non-zero nr, so we don't
+ * bother testing nr before performing the atomic sub.
+ */
+static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr)
+{
+ if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) &&
+ waitqueue_active(&rds_ib_ring_empty_wait))
+ wake_up(&rds_ib_ring_empty_wait);
+ BUG_ON(atomic_read(&ic->i_signaled_sends) < 0);
+}
+
+/*
* The _oldest/_free ring operations here race cleanly with the alloc/unalloc
* operations performed in the send path. As the sender allocs and potentially
* unallocs the next free entry in the ring it doesn't alter which is
@@ -178,12 +275,14 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
{
struct rds_connection *conn = context;
struct rds_ib_connection *ic = conn->c_transport_data;
+ struct rds_message *rm = NULL;
struct ib_wc wc;
struct rds_ib_send_work *send;
u32 completed;
u32 oldest;
u32 i = 0;
int ret;
+ int nr_sig = 0;
rdsdebug("cq %p conn %p\n", cq, conn);
rds_ib_stats_inc(s_ib_tx_cq_call);
@@ -192,8 +291,9 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
while (ib_poll_cq(cq, 1, &wc) > 0) {
- rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
- (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
+ rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n",
+ (unsigned long long)wc.wr_id, wc.status,
+ rds_ib_wc_status_str(wc.status), wc.byte_len,
be32_to_cpu(wc.ex.imm_data));
rds_ib_stats_inc(s_ib_tx_cq_event);
@@ -210,51 +310,30 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
for (i = 0; i < completed; i++) {
send = &ic->i_sends[oldest];
+ if (send->s_wr.send_flags & IB_SEND_SIGNALED)
+ nr_sig++;
- /* In the error case, wc.opcode sometimes contains garbage */
- switch (send->s_wr.opcode) {
- case IB_WR_SEND:
- if (send->s_rm)
- rds_ib_send_unmap_rm(ic, send, wc.status);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_READ:
- /* Nothing to be done - the SG list will be unmapped
- * when the SEND completes. */
- break;
- default:
- if (printk_ratelimit())
- printk(KERN_NOTICE
- "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
- __func__, send->s_wr.opcode);
- break;
- }
+ rm = rds_ib_send_unmap_op(ic, send, wc.status);
- send->s_wr.opcode = 0xdead;
- send->s_wr.num_sge = 1;
if (send->s_queued + HZ/2 < jiffies)
rds_ib_stats_inc(s_ib_tx_stalled);
- /* If a RDMA operation produced an error, signal this right
- * away. If we don't, the subsequent SEND that goes with this
- * RDMA will be canceled with ERR_WFLUSH, and the application
- * never learn that the RDMA failed. */
- if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
- struct rds_message *rm;
-
- rm = rds_send_get_message(conn, send->s_op);
- if (rm) {
- if (rm->m_rdma_op)
- rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
- rds_ib_send_rdma_complete(rm, wc.status);
- rds_message_put(rm);
+ if (send->s_op) {
+ if (send->s_op == rm->m_final_op) {
+ /* If anyone waited for this message to get flushed out, wake
+ * them up now */
+ rds_message_unmapped(rm);
}
+ rds_message_put(rm);
+ send->s_op = NULL;
}
oldest = (oldest + 1) % ic->i_send_ring.w_nr;
}
rds_ib_ring_free(&ic->i_send_ring, completed);
+ rds_ib_sub_signaled(ic, nr_sig);
+ nr_sig = 0;
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued))
@@ -262,10 +341,10 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
/* We expect errors as the qp is drained during shutdown */
if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
- rds_ib_conn_error(conn,
- "send completion on %pI4 "
- "had status %u, disconnecting and reconnecting\n",
- &conn->c_faddr, wc.status);
+ rds_ib_conn_error(conn, "send completion on %pI4 had status "
+ "%u (%s), disconnecting and reconnecting\n",
+ &conn->c_faddr, wc.status,
+ rds_ib_wc_status_str(wc.status));
}
}
}
@@ -294,7 +373,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
* credits (see rds_ib_send_add_credits below).
*
* The RDS send code is essentially single-threaded; rds_send_xmit
- * grabs c_send_lock to ensure exclusive access to the send ring.
+ * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
* However, the ACK sending code is independent and can race with
* message SENDs.
*
@@ -413,40 +492,21 @@ void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
}
-static inline void
-rds_ib_xmit_populate_wr(struct rds_ib_connection *ic,
- struct rds_ib_send_work *send, unsigned int pos,
- unsigned long buffer, unsigned int length,
- int send_flags)
+static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic,
+ struct rds_ib_send_work *send,
+ bool notify)
{
- struct ib_sge *sge;
-
- WARN_ON(pos != send - ic->i_sends);
-
- send->s_wr.send_flags = send_flags;
- send->s_wr.opcode = IB_WR_SEND;
- send->s_wr.num_sge = 2;
- send->s_wr.next = NULL;
- send->s_queued = jiffies;
- send->s_op = NULL;
-
- if (length != 0) {
- sge = rds_ib_data_sge(ic, send->s_sge);
- sge->addr = buffer;
- sge->length = length;
- sge->lkey = ic->i_mr->lkey;
-
- sge = rds_ib_header_sge(ic, send->s_sge);
- } else {
- /* We're sending a packet with no payload. There is only
- * one SGE */
- send->s_wr.num_sge = 1;
- sge = &send->s_sge[0];
+ /*
+ * We want to delay signaling completions just enough to get
+ * the batching benefits but not so much that we create dead time
+ * on the wire.
+ */
+ if (ic->i_unsignaled_wrs-- == 0 || notify) {
+ ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
+ send->s_wr.send_flags |= IB_SEND_SIGNALED;
+ return 1;
}
-
- sge->addr = ic->i_send_hdrs_dma + (pos * sizeof(struct rds_header));
- sge->length = sizeof(struct rds_header);
- sge->lkey = ic->i_mr->lkey;
+ return 0;
}
/*
@@ -475,13 +535,14 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
u32 pos;
u32 i;
u32 work_alloc;
- u32 credit_alloc;
+ u32 credit_alloc = 0;
u32 posted;
u32 adv_credits = 0;
int send_flags = 0;
- int sent;
+ int bytes_sent = 0;
int ret;
int flow_controlled = 0;
+ int nr_sig = 0;
BUG_ON(off % RDS_FRAG_SIZE);
BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
@@ -507,14 +568,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
goto out;
}
- credit_alloc = work_alloc;
if (ic->i_flowctl) {
credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
adv_credits += posted;
if (credit_alloc < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
work_alloc = credit_alloc;
- flow_controlled++;
+ flow_controlled = 1;
}
if (work_alloc == 0) {
set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
@@ -525,31 +585,25 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
}
/* map the message the first time we see it */
- if (ic->i_rm == NULL) {
- /*
- printk(KERN_NOTICE "rds_ib_xmit prep msg dport=%u flags=0x%x len=%d\n",
- be16_to_cpu(rm->m_inc.i_hdr.h_dport),
- rm->m_inc.i_hdr.h_flags,
- be32_to_cpu(rm->m_inc.i_hdr.h_len));
- */
- if (rm->m_nents) {
- rm->m_count = ib_dma_map_sg(dev,
- rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
- rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
- if (rm->m_count == 0) {
+ if (!ic->i_data_op) {
+ if (rm->data.op_nents) {
+ rm->data.op_count = ib_dma_map_sg(dev,
+ rm->data.op_sg,
+ rm->data.op_nents,
+ DMA_TO_DEVICE);
+ rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
+ if (rm->data.op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
- rm->m_count = 0;
+ rm->data.op_count = 0;
}
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
rds_message_addref(rm);
- ic->i_rm = rm;
+ ic->i_data_op = &rm->data;
/* Finalize the header */
if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
@@ -559,10 +613,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
- if (rm->m_rdma_op) {
+ if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
- ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key);
+ ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
@@ -582,99 +636,77 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
/*
* Update adv_credits since we reset the ACK_REQUIRED bit.
*/
- rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
- adv_credits += posted;
- BUG_ON(adv_credits > 255);
+ if (ic->i_flowctl) {
+ rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
+ adv_credits += posted;
+ BUG_ON(adv_credits > 255);
+ }
}
- send = &ic->i_sends[pos];
- first = send;
- prev = NULL;
- scat = &rm->m_sg[sg];
- sent = 0;
- i = 0;
-
/* Sometimes you want to put a fence between an RDMA
* READ and the following SEND.
* We could either do this all the time
* or when requested by the user. Right now, we let
* the application choose.
*/
- if (rm->m_rdma_op && rm->m_rdma_op->r_fence)
+ if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
- /*
- * We could be copying the header into the unused tail of the page.
- * That would need to be changed in the future when those pages might
- * be mapped userspace pages or page cache pages. So instead we always
- * use a second sge and our long-lived ring of mapped headers. We send
- * the header after the data so that the data payload can be aligned on
- * the receiver.
- */
+ /* Each frag gets a header. Msgs may be 0 bytes */
+ send = &ic->i_sends[pos];
+ first = send;
+ prev = NULL;
+ scat = &ic->i_data_op->op_sg[sg];
+ i = 0;
+ do {
+ unsigned int len = 0;
- /* handle a 0-len message */
- if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) {
- rds_ib_xmit_populate_wr(ic, send, pos, 0, 0, send_flags);
- goto add_header;
- }
+ /* Set up the header */
+ send->s_wr.send_flags = send_flags;
+ send->s_wr.opcode = IB_WR_SEND;
+ send->s_wr.num_sge = 1;
+ send->s_wr.next = NULL;
+ send->s_queued = jiffies;
+ send->s_op = NULL;
- /* if there's data reference it with a chain of work reqs */
- for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
- unsigned int len;
+ send->s_sge[0].addr = ic->i_send_hdrs_dma
+ + (pos * sizeof(struct rds_header));
+ send->s_sge[0].length = sizeof(struct rds_header);
- send = &ic->i_sends[pos];
+ memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
- len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
- rds_ib_xmit_populate_wr(ic, send, pos,
- ib_sg_dma_address(dev, scat) + off, len,
- send_flags);
+ /* Set up the data, if present */
+ if (i < work_alloc
+ && scat != &rm->data.op_sg[rm->data.op_count]) {
+ len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
+ send->s_wr.num_sge = 2;
- /*
- * We want to delay signaling completions just enough to get
- * the batching benefits but not so much that we create dead time
- * on the wire.
- */
- if (ic->i_unsignaled_wrs-- == 0) {
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
- }
+ send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
+ send->s_sge[1].length = len;
- ic->i_unsignaled_bytes -= len;
- if (ic->i_unsignaled_bytes <= 0) {
- ic->i_unsignaled_bytes = rds_ib_sysctl_max_unsig_bytes;
- send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ bytes_sent += len;
+ off += len;
+ if (off == ib_sg_dma_len(dev, scat)) {
+ scat++;
+ off = 0;
+ }
}
+ rds_ib_set_wr_signal_state(ic, send, 0);
+
/*
* Always signal the last one if we're stopping due to flow control.
*/
- if (flow_controlled && i == (work_alloc-1))
+ if (ic->i_flowctl && flow_controlled && i == (work_alloc-1))
send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
+ if (send->s_wr.send_flags & IB_SEND_SIGNALED)
+ nr_sig++;
+
rdsdebug("send %p wr %p num_sge %u next %p\n", send,
&send->s_wr, send->s_wr.num_sge, send->s_wr.next);
- sent += len;
- off += len;
- if (off == ib_sg_dma_len(dev, scat)) {
- scat++;
- off = 0;
- }
-
-add_header:
- /* Tack on the header after the data. The header SGE should already
- * have been set up to point to the right header buffer. */
- memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
-
- if (0) {
- struct rds_header *hdr = &ic->i_send_hdrs[pos];
-
- printk(KERN_NOTICE "send WR dport=%u flags=0x%x len=%d\n",
- be16_to_cpu(hdr->h_dport),
- hdr->h_flags,
- be32_to_cpu(hdr->h_len));
- }
- if (adv_credits) {
+ if (ic->i_flowctl && adv_credits) {
struct rds_header *hdr = &ic->i_send_hdrs[pos];
/* add credit and redo the header checksum */
@@ -689,20 +721,25 @@ add_header:
prev = send;
pos = (pos + 1) % ic->i_send_ring.w_nr;
- }
+ send = &ic->i_sends[pos];
+ i++;
+
+ } while (i < work_alloc
+ && scat != &rm->data.op_sg[rm->data.op_count]);
/* Account the RDS header in the number of bytes we sent, but just once.
* The caller has no concept of fragmentation. */
if (hdr_off == 0)
- sent += sizeof(struct rds_header);
+ bytes_sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
- if (scat == &rm->m_sg[rm->m_count]) {
- prev->s_rm = ic->i_rm;
- prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
- ic->i_rm = NULL;
+ if (scat == &rm->data.op_sg[rm->data.op_count]) {
+ prev->s_op = ic->i_data_op;
+ prev->s_wr.send_flags |= IB_SEND_SOLICITED;
+ ic->i_data_op = NULL;
}
+ /* Put back wrs & credits we didn't use */
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
@@ -710,6 +747,9 @@ add_header:
if (ic->i_flowctl && i < credit_alloc)
rds_ib_send_add_credits(conn, credit_alloc - i);
+ if (nr_sig)
+ atomic_add(nr_sig, &ic->i_signaled_sends);
+
/* XXX need to worry about failed_wr and partial sends. */
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
@@ -720,32 +760,127 @@ add_header:
printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
- if (prev->s_rm) {
- ic->i_rm = prev->s_rm;
- prev->s_rm = NULL;
+ rds_ib_sub_signaled(ic, nr_sig);
+ if (prev->s_op) {
+ ic->i_data_op = prev->s_op;
+ prev->s_op = NULL;
}
rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
goto out;
}
- ret = sent;
+ ret = bytes_sent;
out:
BUG_ON(adv_credits);
return ret;
}
-int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
+/*
+ * Issue atomic operation.
+ * A simplified version of the rdma case, we always map 1 SG, and
+ * only 8 bytes, for the return value from the atomic operation.
+ */
+int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
+{
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ struct rds_ib_send_work *send = NULL;
+ struct ib_send_wr *failed_wr;
+ struct rds_ib_device *rds_ibdev;
+ u32 pos;
+ u32 work_alloc;
+ int ret;
+ int nr_sig = 0;
+
+ rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
+
+ work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
+ if (work_alloc != 1) {
+ rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_stats_inc(s_ib_tx_ring_full);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* address of send request in ring */
+ send = &ic->i_sends[pos];
+ send->s_queued = jiffies;
+
+ if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
+ send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP;
+ send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare;
+ send->s_wr.wr.atomic.swap = op->op_m_cswp.swap;
+ send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask;
+ send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask;
+ } else { /* FADD */
+ send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD;
+ send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add;
+ send->s_wr.wr.atomic.swap = 0;
+ send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask;
+ send->s_wr.wr.atomic.swap_mask = 0;
+ }
+ nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify);
+ send->s_wr.num_sge = 1;
+ send->s_wr.next = NULL;
+ send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
+ send->s_wr.wr.atomic.rkey = op->op_rkey;
+ send->s_op = op;
+ rds_message_addref(container_of(send->s_op, struct rds_message, atomic));
+
+ /* map 8 byte retval buffer to the device */
+ ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
+ rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
+ if (ret != 1) {
+ rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
+ ret = -ENOMEM; /* XXX ? */
+ goto out;
+ }
+
+ /* Convert our struct scatterlist to struct ib_sge */
+ send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
+ send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
+ send->s_sge[0].lkey = ic->i_mr->lkey;
+
+ rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
+ send->s_sge[0].addr, send->s_sge[0].length);
+
+ if (nr_sig)
+ atomic_add(nr_sig, &ic->i_signaled_sends);
+
+ failed_wr = &send->s_wr;
+ ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
+ rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
+ send, &send->s_wr, ret, failed_wr);
+ BUG_ON(failed_wr != &send->s_wr);
+ if (ret) {
+ printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
+ "returned %d\n", &conn->c_faddr, ret);
+ rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_sub_signaled(ic, nr_sig);
+ goto out;
+ }
+
+ if (unlikely(failed_wr != &send->s_wr)) {
+ printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
+ BUG_ON(failed_wr != &send->s_wr);
+ }
+
+out:
+ return ret;
+}
+
+int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
{
struct rds_ib_connection *ic = conn->c_transport_data;
struct rds_ib_send_work *send = NULL;
struct rds_ib_send_work *first;
struct rds_ib_send_work *prev;
struct ib_send_wr *failed_wr;
- struct rds_ib_device *rds_ibdev;
struct scatterlist *scat;
unsigned long len;
- u64 remote_addr = op->r_remote_addr;
+ u64 remote_addr = op->op_remote_addr;
+ u32 max_sge = ic->rds_ibdev->max_sge;
u32 pos;
u32 work_alloc;
u32 i;
@@ -753,29 +888,28 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
int sent;
int ret;
int num_sge;
-
- rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
-
- /* map the message the first time we see it */
- if (!op->r_mapped) {
- op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents, (op->r_write) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
- if (op->r_count == 0) {
+ int nr_sig = 0;
+
+ /* map the op the first time we see it */
+ if (!op->op_mapped) {
+ op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents, (op->op_write) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
+ if (op->op_count == 0) {
rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */
goto out;
}
- op->r_mapped = 1;
+ op->op_mapped = 1;
}
/*
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
- i = ceil(op->r_count, rds_ibdev->max_sge);
+ i = ceil(op->op_count, max_sge);
work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
@@ -788,30 +922,24 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
send = &ic->i_sends[pos];
first = send;
prev = NULL;
- scat = &op->r_sg[0];
+ scat = &op->op_sg[0];
sent = 0;
- num_sge = op->r_count;
+ num_sge = op->op_count;
- for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
+ for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
send->s_wr.send_flags = 0;
send->s_queued = jiffies;
- /*
- * We want to delay signaling completions just enough to get
- * the batching benefits but not so much that we create dead time on the wire.
- */
- if (ic->i_unsignaled_wrs-- == 0) {
- ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
- send->s_wr.send_flags = IB_SEND_SIGNALED;
- }
+ send->s_op = NULL;
+
+ nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify);
- send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
+ send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
send->s_wr.wr.rdma.remote_addr = remote_addr;
- send->s_wr.wr.rdma.rkey = op->r_key;
- send->s_op = op;
+ send->s_wr.wr.rdma.rkey = op->op_rkey;
- if (num_sge > rds_ibdev->max_sge) {
- send->s_wr.num_sge = rds_ibdev->max_sge;
- num_sge -= rds_ibdev->max_sge;
+ if (num_sge > max_sge) {
+ send->s_wr.num_sge = max_sge;
+ num_sge -= max_sge;
} else {
send->s_wr.num_sge = num_sge;
}
@@ -821,7 +949,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
if (prev)
prev->s_wr.next = &send->s_wr;
- for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
+ for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
len = ib_sg_dma_len(ic->i_cm_id->device, scat);
send->s_sge[j].addr =
ib_sg_dma_address(ic->i_cm_id->device, scat);
@@ -843,15 +971,20 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
send = ic->i_sends;
}
- /* if we finished the message then send completion owns it */
- if (scat == &op->r_sg[op->r_count])
- prev->s_wr.send_flags = IB_SEND_SIGNALED;
+ /* give a reference to the last op */
+ if (scat == &op->op_sg[op->op_count]) {
+ prev->s_op = op;
+ rds_message_addref(container_of(op, struct rds_message, rdma));
+ }
if (i < work_alloc) {
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
work_alloc = i;
}
+ if (nr_sig)
+ atomic_add(nr_sig, &ic->i_signaled_sends);
+
failed_wr = &first->s_wr;
ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
@@ -861,6 +994,7 @@ int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
"returned %d\n", &conn->c_faddr, ret);
rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
+ rds_ib_sub_signaled(ic, nr_sig);
goto out;
}
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index d2c904dd6fbc..2d5965d6e97c 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -67,6 +67,8 @@ static const char *const rds_ib_stat_names[] = {
"ib_rdma_mr_pool_flush",
"ib_rdma_mr_pool_wait",
"ib_rdma_mr_pool_depleted",
+ "ib_atomic_cswp",
+ "ib_atomic_fadd",
};
unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 03f01cb4e0fe..fc3da37220fd 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -49,10 +49,6 @@ unsigned long rds_ib_sysctl_max_unsig_wrs = 16;
static unsigned long rds_ib_sysctl_max_unsig_wr_min = 1;
static unsigned long rds_ib_sysctl_max_unsig_wr_max = 64;
-unsigned long rds_ib_sysctl_max_unsig_bytes = (16 << 20);
-static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1;
-static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL;
-
/*
* This sysctl does nothing.
*
@@ -94,15 +90,6 @@ ctl_table rds_ib_sysctl_table[] = {
.extra2 = &rds_ib_sysctl_max_unsig_wr_max,
},
{
- .procname = "max_unsignaled_bytes",
- .data = &rds_ib_sysctl_max_unsig_bytes,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_doulongvec_minmax,
- .extra1 = &rds_ib_sysctl_max_unsig_bytes_min,
- .extra2 = &rds_ib_sysctl_max_unsig_bytes_max,
- },
- {
.procname = "max_recv_allocation",
.data = &rds_ib_sysctl_max_recv_allocation,
.maxlen = sizeof(unsigned long),
@@ -132,10 +119,10 @@ void rds_ib_sysctl_exit(void)
unregister_sysctl_table(rds_ib_sysctl_hdr);
}
-int __init rds_ib_sysctl_init(void)
+int rds_ib_sysctl_init(void)
{
rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table);
- if (rds_ib_sysctl_hdr == NULL)
+ if (!rds_ib_sysctl_hdr)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/info.c b/net/rds/info.c
index c45c4173a44d..4fdf1b6e84ff 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -76,7 +76,7 @@ void rds_info_register_func(int optname, rds_info_func func)
BUG_ON(optname < RDS_INFO_FIRST || optname > RDS_INFO_LAST);
spin_lock(&rds_info_lock);
- BUG_ON(rds_info_funcs[offset] != NULL);
+ BUG_ON(rds_info_funcs[offset]);
rds_info_funcs[offset] = func;
spin_unlock(&rds_info_lock);
}
@@ -102,7 +102,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func);
*/
void rds_info_iter_unmap(struct rds_info_iterator *iter)
{
- if (iter->addr != NULL) {
+ if (iter->addr) {
kunmap_atomic(iter->addr, KM_USER0);
iter->addr = NULL;
}
@@ -117,7 +117,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
unsigned long this;
while (bytes) {
- if (iter->addr == NULL)
+ if (!iter->addr)
iter->addr = kmap_atomic(*iter->pages, KM_USER0);
this = min(bytes, PAGE_SIZE - iter->offset);
@@ -188,7 +188,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
>> PAGE_SHIFT;
pages = kmalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
+ if (!pages) {
ret = -ENOMEM;
goto out;
}
@@ -206,7 +206,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
call_func:
func = rds_info_funcs[optname - RDS_INFO_FIRST];
- if (func == NULL) {
+ if (!func) {
ret = -ENOPROTOOPT;
goto out;
}
@@ -234,7 +234,7 @@ call_func:
ret = -EFAULT;
out:
- for (i = 0; pages != NULL && i < nr_pages; i++)
+ for (i = 0; pages && i < nr_pages; i++)
put_page(pages[i]);
kfree(pages);
diff --git a/net/rds/iw.c b/net/rds/iw.c
index c8f3d3525cb9..56808cac0fc7 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -264,7 +264,6 @@ struct rds_transport rds_iw_transport = {
.laddr_check = rds_iw_laddr_check,
.xmit_complete = rds_iw_xmit_complete,
.xmit = rds_iw_xmit,
- .xmit_cong_map = NULL,
.xmit_rdma = rds_iw_xmit_rdma,
.recv = rds_iw_recv,
.conn_alloc = rds_iw_conn_alloc,
@@ -272,7 +271,6 @@ struct rds_transport rds_iw_transport = {
.conn_connect = rds_iw_conn_connect,
.conn_shutdown = rds_iw_conn_shutdown,
.inc_copy_to_user = rds_iw_inc_copy_to_user,
- .inc_purge = rds_iw_inc_purge,
.inc_free = rds_iw_inc_free,
.cm_initiate_connect = rds_iw_cm_initiate_connect,
.cm_handle_connect = rds_iw_cm_handle_connect,
@@ -289,7 +287,7 @@ struct rds_transport rds_iw_transport = {
.t_prefer_loopback = 1,
};
-int __init rds_iw_init(void)
+int rds_iw_init(void)
{
int ret;
diff --git a/net/rds/iw.h b/net/rds/iw.h
index eef2f0c28476..543e665fafe3 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -70,7 +70,7 @@ struct rds_iw_send_work {
struct rds_message *s_rm;
/* We should really put these into a union: */
- struct rds_rdma_op *s_op;
+ struct rm_rdma_op *s_op;
struct rds_iw_mapping *s_mapping;
struct ib_mr *s_mr;
struct ib_fast_reg_page_list *s_page_list;
@@ -284,7 +284,7 @@ void rds_iw_conn_free(void *arg);
int rds_iw_conn_connect(struct rds_connection *conn);
void rds_iw_conn_shutdown(struct rds_connection *conn);
void rds_iw_state_change(struct sock *sk);
-int __init rds_iw_listen_init(void);
+int rds_iw_listen_init(void);
void rds_iw_listen_stop(void);
void __rds_iw_conn_error(struct rds_connection *conn, const char *, ...);
int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
@@ -321,12 +321,11 @@ void rds_iw_flush_mrs(void);
void rds_iw_remove_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
/* ib_recv.c */
-int __init rds_iw_recv_init(void);
+int rds_iw_recv_init(void);
void rds_iw_recv_exit(void);
int rds_iw_recv(struct rds_connection *conn);
int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
gfp_t page_gfp, int prefill);
-void rds_iw_inc_purge(struct rds_incoming *inc);
void rds_iw_inc_free(struct rds_incoming *inc);
int rds_iw_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
@@ -358,7 +357,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context);
void rds_iw_send_init_ring(struct rds_iw_connection *ic);
void rds_iw_send_clear_ring(struct rds_iw_connection *ic);
-int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op);
+int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
void rds_iw_send_add_credits(struct rds_connection *conn, unsigned int credits);
void rds_iw_advertise_credits(struct rds_connection *conn, unsigned int posted);
int rds_iw_send_grab_credits(struct rds_iw_connection *ic, u32 wanted,
@@ -371,7 +370,7 @@ unsigned int rds_iw_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
/* ib_sysctl.c */
-int __init rds_iw_sysctl_init(void);
+int rds_iw_sysctl_init(void);
void rds_iw_sysctl_exit(void);
extern unsigned long rds_iw_sysctl_max_send_wr;
extern unsigned long rds_iw_sysctl_max_recv_wr;
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index b5dd6ac39be8..712cf2d1f28e 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -257,7 +257,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
* the rds_iwdev at all.
*/
rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
- if (rds_iwdev == NULL) {
+ if (!rds_iwdev) {
if (printk_ratelimit())
printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
dev->name);
@@ -292,7 +292,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
ic->i_send_ring.w_nr *
sizeof(struct rds_header),
&ic->i_send_hdrs_dma, GFP_KERNEL);
- if (ic->i_send_hdrs == NULL) {
+ if (!ic->i_send_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent send failed\n");
goto out;
@@ -302,7 +302,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
ic->i_recv_ring.w_nr *
sizeof(struct rds_header),
&ic->i_recv_hdrs_dma, GFP_KERNEL);
- if (ic->i_recv_hdrs == NULL) {
+ if (!ic->i_recv_hdrs) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent recv failed\n");
goto out;
@@ -310,14 +310,14 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header),
&ic->i_ack_dma, GFP_KERNEL);
- if (ic->i_ack == NULL) {
+ if (!ic->i_ack) {
ret = -ENOMEM;
rdsdebug("ib_dma_alloc_coherent ack failed\n");
goto out;
}
ic->i_sends = vmalloc(ic->i_send_ring.w_nr * sizeof(struct rds_iw_send_work));
- if (ic->i_sends == NULL) {
+ if (!ic->i_sends) {
ret = -ENOMEM;
rdsdebug("send allocation failed\n");
goto out;
@@ -325,7 +325,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
rds_iw_send_init_ring(ic);
ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_iw_recv_work));
- if (ic->i_recvs == NULL) {
+ if (!ic->i_recvs) {
ret = -ENOMEM;
rdsdebug("recv allocation failed\n");
goto out;
@@ -696,7 +696,7 @@ int rds_iw_conn_alloc(struct rds_connection *conn, gfp_t gfp)
/* XXX too lazy? */
ic = kzalloc(sizeof(struct rds_iw_connection), GFP_KERNEL);
- if (ic == NULL)
+ if (!ic)
return -ENOMEM;
INIT_LIST_HEAD(&ic->iw_node);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 13dc1862d862..0e7accc23ee2 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -34,7 +34,6 @@
#include <linux/slab.h>
#include "rds.h"
-#include "rdma.h"
#include "iw.h"
@@ -207,9 +206,9 @@ void rds_iw_add_conn(struct rds_iw_device *rds_iwdev, struct rds_connection *con
BUG_ON(list_empty(&ic->iw_node));
list_del(&ic->iw_node);
- spin_lock_irq(&rds_iwdev->spinlock);
+ spin_lock(&rds_iwdev->spinlock);
list_add_tail(&ic->iw_node, &rds_iwdev->conn_list);
- spin_unlock_irq(&rds_iwdev->spinlock);
+ spin_unlock(&rds_iwdev->spinlock);
spin_unlock_irq(&iw_nodev_conns_lock);
ic->rds_iwdev = rds_iwdev;
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 3d479067d54d..5e57347f49ff 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -53,7 +53,7 @@ static void rds_iw_frag_drop_page(struct rds_page_frag *frag)
static void rds_iw_frag_free(struct rds_page_frag *frag)
{
rdsdebug("frag %p page %p\n", frag, frag->f_page);
- BUG_ON(frag->f_page != NULL);
+ BUG_ON(frag->f_page);
kmem_cache_free(rds_iw_frag_slab, frag);
}
@@ -143,14 +143,14 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
struct ib_sge *sge;
int ret = -ENOMEM;
- if (recv->r_iwinc == NULL) {
+ if (!recv->r_iwinc) {
if (!atomic_add_unless(&rds_iw_allocation, 1, rds_iw_sysctl_max_recv_allocation)) {
rds_iw_stats_inc(s_iw_rx_alloc_limit);
goto out;
}
recv->r_iwinc = kmem_cache_alloc(rds_iw_incoming_slab,
kptr_gfp);
- if (recv->r_iwinc == NULL) {
+ if (!recv->r_iwinc) {
atomic_dec(&rds_iw_allocation);
goto out;
}
@@ -158,17 +158,17 @@ static int rds_iw_recv_refill_one(struct rds_connection *conn,
rds_inc_init(&recv->r_iwinc->ii_inc, conn, conn->c_faddr);
}
- if (recv->r_frag == NULL) {
+ if (!recv->r_frag) {
recv->r_frag = kmem_cache_alloc(rds_iw_frag_slab, kptr_gfp);
- if (recv->r_frag == NULL)
+ if (!recv->r_frag)
goto out;
INIT_LIST_HEAD(&recv->r_frag->f_item);
recv->r_frag->f_page = NULL;
}
- if (ic->i_frag.f_page == NULL) {
+ if (!ic->i_frag.f_page) {
ic->i_frag.f_page = alloc_page(page_gfp);
- if (ic->i_frag.f_page == NULL)
+ if (!ic->i_frag.f_page)
goto out;
ic->i_frag.f_offset = 0;
}
@@ -273,7 +273,7 @@ int rds_iw_recv_refill(struct rds_connection *conn, gfp_t kptr_gfp,
return ret;
}
-void rds_iw_inc_purge(struct rds_incoming *inc)
+static void rds_iw_inc_purge(struct rds_incoming *inc)
{
struct rds_iw_incoming *iwinc;
struct rds_page_frag *frag;
@@ -716,7 +716,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
* into the inc and save the inc so we can hang upcoming fragments
* off its list.
*/
- if (iwinc == NULL) {
+ if (!iwinc) {
iwinc = recv->r_iwinc;
recv->r_iwinc = NULL;
ic->i_iwinc = iwinc;
@@ -887,7 +887,7 @@ int rds_iw_recv(struct rds_connection *conn)
return ret;
}
-int __init rds_iw_recv_init(void)
+int rds_iw_recv_init(void)
{
struct sysinfo si;
int ret = -ENOMEM;
@@ -899,13 +899,13 @@ int __init rds_iw_recv_init(void)
rds_iw_incoming_slab = kmem_cache_create("rds_iw_incoming",
sizeof(struct rds_iw_incoming),
0, 0, NULL);
- if (rds_iw_incoming_slab == NULL)
+ if (!rds_iw_incoming_slab)
goto out;
rds_iw_frag_slab = kmem_cache_create("rds_iw_frag",
sizeof(struct rds_page_frag),
0, 0, NULL);
- if (rds_iw_frag_slab == NULL)
+ if (!rds_iw_frag_slab)
kmem_cache_destroy(rds_iw_incoming_slab);
else
ret = 0;
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 52182ff7519e..6280ea020d4e 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -36,7 +36,6 @@
#include <linux/dmapool.h>
#include "rds.h"
-#include "rdma.h"
#include "iw.h"
static void rds_iw_send_rdma_complete(struct rds_message *rm,
@@ -64,13 +63,13 @@ static void rds_iw_send_rdma_complete(struct rds_message *rm,
}
static void rds_iw_send_unmap_rdma(struct rds_iw_connection *ic,
- struct rds_rdma_op *op)
+ struct rm_rdma_op *op)
{
- if (op->r_mapped) {
+ if (op->op_mapped) {
ib_dma_unmap_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents,
- op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
- op->r_mapped = 0;
+ op->op_sg, op->op_nents,
+ op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ op->op_mapped = 0;
}
}
@@ -83,11 +82,11 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
ib_dma_unmap_sg(ic->i_cm_id->device,
- rm->m_sg, rm->m_nents,
+ rm->data.op_sg, rm->data.op_nents,
DMA_TO_DEVICE);
- if (rm->m_rdma_op != NULL) {
- rds_iw_send_unmap_rdma(ic, rm->m_rdma_op);
+ if (rm->rdma.op_active) {
+ rds_iw_send_unmap_rdma(ic, &rm->rdma);
/* If the user asked for a completion notification on this
* message, we can implement three different semantics:
@@ -111,10 +110,10 @@ static void rds_iw_send_unmap_rm(struct rds_iw_connection *ic,
*/
rds_iw_send_rdma_complete(rm, wc_status);
- if (rm->m_rdma_op->r_write)
- rds_stats_add(s_send_rdma_bytes, rm->m_rdma_op->r_bytes);
+ if (rm->rdma.op_write)
+ rds_stats_add(s_send_rdma_bytes, rm->rdma.op_bytes);
else
- rds_stats_add(s_recv_rdma_bytes, rm->m_rdma_op->r_bytes);
+ rds_stats_add(s_recv_rdma_bytes, rm->rdma.op_bytes);
}
/* If anyone waited for this message to get flushed out, wake
@@ -556,25 +555,27 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
}
/* map the message the first time we see it */
- if (ic->i_rm == NULL) {
+ if (!ic->i_rm) {
/*
printk(KERN_NOTICE "rds_iw_xmit prep msg dport=%u flags=0x%x len=%d\n",
be16_to_cpu(rm->m_inc.i_hdr.h_dport),
rm->m_inc.i_hdr.h_flags,
be32_to_cpu(rm->m_inc.i_hdr.h_len));
*/
- if (rm->m_nents) {
- rm->m_count = ib_dma_map_sg(dev,
- rm->m_sg, rm->m_nents, DMA_TO_DEVICE);
- rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->m_count);
- if (rm->m_count == 0) {
+ if (rm->data.op_nents) {
+ rm->data.op_count = ib_dma_map_sg(dev,
+ rm->data.op_sg,
+ rm->data.op_nents,
+ DMA_TO_DEVICE);
+ rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count);
+ if (rm->data.op_count == 0) {
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
rds_iw_ring_unalloc(&ic->i_send_ring, work_alloc);
ret = -ENOMEM; /* XXX ? */
goto out;
}
} else {
- rm->m_count = 0;
+ rm->data.op_count = 0;
}
ic->i_unsignaled_wrs = rds_iw_sysctl_max_unsig_wrs;
@@ -590,10 +591,10 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
/* If it has a RDMA op, tell the peer we did it. This is
* used by the peer to release use-once RDMA MRs. */
- if (rm->m_rdma_op) {
+ if (rm->rdma.op_active) {
struct rds_ext_header_rdma ext_hdr;
- ext_hdr.h_rdma_rkey = cpu_to_be32(rm->m_rdma_op->r_key);
+ ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey);
rds_message_add_extension(&rm->m_inc.i_hdr,
RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
}
@@ -621,7 +622,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
send = &ic->i_sends[pos];
first = send;
prev = NULL;
- scat = &rm->m_sg[sg];
+ scat = &rm->data.op_sg[sg];
sent = 0;
i = 0;
@@ -631,7 +632,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
* or when requested by the user. Right now, we let
* the application choose.
*/
- if (rm->m_rdma_op && rm->m_rdma_op->r_fence)
+ if (rm->rdma.op_active && rm->rdma.op_fence)
send_flags = IB_SEND_FENCE;
/*
@@ -650,7 +651,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
}
/* if there's data reference it with a chain of work reqs */
- for (; i < work_alloc && scat != &rm->m_sg[rm->m_count]; i++) {
+ for (; i < work_alloc && scat != &rm->data.op_sg[rm->data.op_count]; i++) {
unsigned int len;
send = &ic->i_sends[pos];
@@ -728,7 +729,7 @@ add_header:
sent += sizeof(struct rds_header);
/* if we finished the message then send completion owns it */
- if (scat == &rm->m_sg[rm->m_count]) {
+ if (scat == &rm->data.op_sg[rm->data.op_count]) {
prev->s_rm = ic->i_rm;
prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
ic->i_rm = NULL;
@@ -784,7 +785,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
ib_update_fast_reg_key(send->s_mr, send->s_remap_count++);
}
-int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
+int rds_iw_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op)
{
struct rds_iw_connection *ic = conn->c_transport_data;
struct rds_iw_send_work *send = NULL;
@@ -794,7 +795,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
struct rds_iw_device *rds_iwdev;
struct scatterlist *scat;
unsigned long len;
- u64 remote_addr = op->r_remote_addr;
+ u64 remote_addr = op->op_remote_addr;
u32 pos, fr_pos;
u32 work_alloc;
u32 i;
@@ -806,21 +807,21 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
rds_iwdev = ib_get_client_data(ic->i_cm_id->device, &rds_iw_client);
/* map the message the first time we see it */
- if (!op->r_mapped) {
- op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
- op->r_sg, op->r_nents, (op->r_write) ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
- if (op->r_count == 0) {
+ if (!op->op_mapped) {
+ op->op_count = ib_dma_map_sg(ic->i_cm_id->device,
+ op->op_sg, op->op_nents, (op->op_write) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count);
+ if (op->op_count == 0) {
rds_iw_stats_inc(s_iw_tx_sg_mapping_failure);
ret = -ENOMEM; /* XXX ? */
goto out;
}
- op->r_mapped = 1;
+ op->op_mapped = 1;
}
- if (!op->r_write) {
+ if (!op->op_write) {
/* Alloc space on the send queue for the fastreg */
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, 1, &fr_pos);
if (work_alloc != 1) {
@@ -835,7 +836,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* Instead of knowing how to return a partial rdma read/write we insist that there
* be enough work requests to send the entire message.
*/
- i = ceil(op->r_count, rds_iwdev->max_sge);
+ i = ceil(op->op_count, rds_iwdev->max_sge);
work_alloc = rds_iw_ring_alloc(&ic->i_send_ring, i, &pos);
if (work_alloc != i) {
@@ -846,17 +847,17 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
}
send = &ic->i_sends[pos];
- if (!op->r_write) {
+ if (!op->op_write) {
first = prev = &ic->i_sends[fr_pos];
} else {
first = send;
prev = NULL;
}
- scat = &op->r_sg[0];
+ scat = &op->op_sg[0];
sent = 0;
- num_sge = op->r_count;
+ num_sge = op->op_count;
- for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
+ for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) {
send->s_wr.send_flags = 0;
send->s_queued = jiffies;
@@ -873,13 +874,13 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* for local access after RDS is finished with it, using
* IB_WR_RDMA_READ_WITH_INV will invalidate it after the read has completed.
*/
- if (op->r_write)
+ if (op->op_write)
send->s_wr.opcode = IB_WR_RDMA_WRITE;
else
send->s_wr.opcode = IB_WR_RDMA_READ_WITH_INV;
send->s_wr.wr.rdma.remote_addr = remote_addr;
- send->s_wr.wr.rdma.rkey = op->r_key;
+ send->s_wr.wr.rdma.rkey = op->op_rkey;
send->s_op = op;
if (num_sge > rds_iwdev->max_sge) {
@@ -893,7 +894,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
if (prev)
prev->s_wr.next = &send->s_wr;
- for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
+ for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) {
len = ib_sg_dma_len(ic->i_cm_id->device, scat);
if (send->s_wr.opcode == IB_WR_RDMA_READ_WITH_INV)
@@ -927,7 +928,7 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
}
/* if we finished the message then send completion owns it */
- if (scat == &op->r_sg[op->r_count])
+ if (scat == &op->op_sg[op->op_count])
first->s_wr.send_flags = IB_SEND_SIGNALED;
if (i < work_alloc) {
@@ -941,9 +942,9 @@ int rds_iw_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
* adapters do not allow using the lkey for this at all. To bypass this use a
* fastreg_mr (or possibly a dma_mr)
*/
- if (!op->r_write) {
+ if (!op->op_write) {
rds_iw_build_send_fastreg(rds_iwdev, ic, &ic->i_sends[fr_pos],
- op->r_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
+ op->op_count, sent, conn->c_xmit_rm->m_rs->rs_user_addr);
work_alloc++;
}
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index 1c4428a61a02..23e3a9a26aaf 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -122,10 +122,10 @@ void rds_iw_sysctl_exit(void)
unregister_sysctl_table(rds_iw_sysctl_hdr);
}
-int __init rds_iw_sysctl_init(void)
+int rds_iw_sysctl_init(void)
{
rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table);
- if (rds_iw_sysctl_hdr == NULL)
+ if (!rds_iw_sysctl_hdr)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/loop.c b/net/rds/loop.c
index dd9879379457..c390156b426f 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,17 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg,
unsigned int off)
{
+ /* Do not send cong updates to loopback */
+ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
+ rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
+ return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+ }
+
BUG_ON(hdr_off || sg || off);
rds_inc_init(&rm->m_inc, conn, conn->c_laddr);
- rds_message_addref(rm); /* for the inc */
+ /* For the embedded inc. Matching put is in loop_inc_free() */
+ rds_message_addref(rm);
rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
GFP_KERNEL, KM_USER0);
@@ -77,16 +84,14 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len);
}
-static int rds_loop_xmit_cong_map(struct rds_connection *conn,
- struct rds_cong_map *map,
- unsigned long offset)
+/*
+ * See rds_loop_xmit(). Since our inc is embedded in the rm, we
+ * make sure the rm lives at least until the inc is done.
+ */
+static void rds_loop_inc_free(struct rds_incoming *inc)
{
- BUG_ON(offset);
- BUG_ON(map != conn->c_lcong);
-
- rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
-
- return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
+ struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
+ rds_message_put(rm);
}
/* we need to at least give the thread something to succeed */
@@ -112,7 +117,7 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp)
unsigned long flags;
lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL);
- if (lc == NULL)
+ if (!lc)
return -ENOMEM;
INIT_LIST_HEAD(&lc->loop_node);
@@ -169,14 +174,12 @@ void rds_loop_exit(void)
*/
struct rds_transport rds_loop_transport = {
.xmit = rds_loop_xmit,
- .xmit_cong_map = rds_loop_xmit_cong_map,
.recv = rds_loop_recv,
.conn_alloc = rds_loop_conn_alloc,
.conn_free = rds_loop_conn_free,
.conn_connect = rds_loop_conn_connect,
.conn_shutdown = rds_loop_conn_shutdown,
.inc_copy_to_user = rds_message_inc_copy_to_user,
- .inc_purge = rds_message_inc_purge,
- .inc_free = rds_message_inc_free,
+ .inc_free = rds_loop_inc_free,
.t_name = "loopback",
};
diff --git a/net/rds/message.c b/net/rds/message.c
index 9a1d67e001ba..84f937f11d47 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -34,9 +34,6 @@
#include <linux/slab.h>
#include "rds.h"
-#include "rdma.h"
-
-static DECLARE_WAIT_QUEUE_HEAD(rds_message_flush_waitq);
static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
[RDS_EXTHDR_NONE] = 0,
@@ -63,29 +60,31 @@ static void rds_message_purge(struct rds_message *rm)
if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
return;
- for (i = 0; i < rm->m_nents; i++) {
- rdsdebug("putting data page %p\n", (void *)sg_page(&rm->m_sg[i]));
+ for (i = 0; i < rm->data.op_nents; i++) {
+ rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.op_sg[i]));
/* XXX will have to put_page for page refs */
- __free_page(sg_page(&rm->m_sg[i]));
+ __free_page(sg_page(&rm->data.op_sg[i]));
}
- rm->m_nents = 0;
+ rm->data.op_nents = 0;
- if (rm->m_rdma_op)
- rds_rdma_free_op(rm->m_rdma_op);
- if (rm->m_rdma_mr)
- rds_mr_put(rm->m_rdma_mr);
-}
+ if (rm->rdma.op_active)
+ rds_rdma_free_op(&rm->rdma);
+ if (rm->rdma.op_rdma_mr)
+ rds_mr_put(rm->rdma.op_rdma_mr);
-void rds_message_inc_purge(struct rds_incoming *inc)
-{
- struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
- rds_message_purge(rm);
+ if (rm->atomic.op_active)
+ rds_atomic_free_op(&rm->atomic);
+ if (rm->atomic.op_rdma_mr)
+ rds_mr_put(rm->atomic.op_rdma_mr);
}
void rds_message_put(struct rds_message *rm)
{
rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
-
+ if (atomic_read(&rm->m_refcount) == 0) {
+printk(KERN_CRIT "danger refcount zero on %p\n", rm);
+WARN_ON(1);
+ }
if (atomic_dec_and_test(&rm->m_refcount)) {
BUG_ON(!list_empty(&rm->m_sock_item));
BUG_ON(!list_empty(&rm->m_conn_item));
@@ -96,12 +95,6 @@ void rds_message_put(struct rds_message *rm)
}
EXPORT_SYMBOL_GPL(rds_message_put);
-void rds_message_inc_free(struct rds_incoming *inc)
-{
- struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
- rds_message_put(rm);
-}
-
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
__be16 dport, u64 seq)
{
@@ -214,41 +207,68 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o
}
EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
-struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp)
+/*
+ * Each rds_message is allocated with extra space for the scatterlist entries
+ * rds ops will need. This is to minimize memory allocation count. Then, each rds op
+ * can grab SGs when initializing its part of the rds_message.
+ */
+struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
{
struct rds_message *rm;
- rm = kzalloc(sizeof(struct rds_message) +
- (nents * sizeof(struct scatterlist)), gfp);
+ rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
if (!rm)
goto out;
- if (nents)
- sg_init_table(rm->m_sg, nents);
+ rm->m_used_sgs = 0;
+ rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
+
atomic_set(&rm->m_refcount, 1);
INIT_LIST_HEAD(&rm->m_sock_item);
INIT_LIST_HEAD(&rm->m_conn_item);
spin_lock_init(&rm->m_rs_lock);
+ init_waitqueue_head(&rm->m_flush_wait);
out:
return rm;
}
+/*
+ * RDS ops use this to grab SG entries from the rm's sg pool.
+ */
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
+{
+ struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
+ struct scatterlist *sg_ret;
+
+ WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
+ WARN_ON(!nents);
+
+ sg_ret = &sg_first[rm->m_used_sgs];
+ sg_init_table(sg_ret, nents);
+ rm->m_used_sgs += nents;
+
+ return sg_ret;
+}
+
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
{
struct rds_message *rm;
unsigned int i;
+ int num_sgs = ceil(total_len, PAGE_SIZE);
+ int extra_bytes = num_sgs * sizeof(struct scatterlist);
- rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
- if (rm == NULL)
+ rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
+ if (!rm)
return ERR_PTR(-ENOMEM);
set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
- rm->m_nents = ceil(total_len, PAGE_SIZE);
+ rm->data.op_nents = ceil(total_len, PAGE_SIZE);
+ rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
- for (i = 0; i < rm->m_nents; ++i) {
- sg_set_page(&rm->m_sg[i],
+ for (i = 0; i < rm->data.op_nents; ++i) {
+ sg_set_page(&rm->data.op_sg[i],
virt_to_page(page_addrs[i]),
PAGE_SIZE, 0);
}
@@ -256,40 +276,33 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in
return rm;
}
-struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
+int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
size_t total_len)
{
unsigned long to_copy;
unsigned long iov_off;
unsigned long sg_off;
- struct rds_message *rm;
struct iovec *iov;
struct scatterlist *sg;
- int ret;
-
- rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
- if (rm == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ int ret = 0;
rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
/*
* now allocate and copy in the data payload.
*/
- sg = rm->m_sg;
+ sg = rm->data.op_sg;
iov = first_iov;
iov_off = 0;
sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
while (total_len) {
- if (sg_page(sg) == NULL) {
+ if (!sg_page(sg)) {
ret = rds_page_remainder_alloc(sg, total_len,
GFP_HIGHUSER);
if (ret)
goto out;
- rm->m_nents++;
+ rm->data.op_nents++;
sg_off = 0;
}
@@ -320,14 +333,8 @@ struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
sg++;
}
- ret = 0;
out:
- if (ret) {
- if (rm)
- rds_message_put(rm);
- rm = ERR_PTR(ret);
- }
- return rm;
+ return ret;
}
int rds_message_inc_copy_to_user(struct rds_incoming *inc,
@@ -348,7 +355,7 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
iov = first_iov;
iov_off = 0;
- sg = rm->m_sg;
+ sg = rm->data.op_sg;
vec_off = 0;
copied = 0;
@@ -394,15 +401,14 @@ int rds_message_inc_copy_to_user(struct rds_incoming *inc,
*/
void rds_message_wait(struct rds_message *rm)
{
- wait_event(rds_message_flush_waitq,
+ wait_event_interruptible(rm->m_flush_wait,
!test_bit(RDS_MSG_MAPPED, &rm->m_flags));
}
void rds_message_unmapped(struct rds_message *rm)
{
clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
- if (waitqueue_active(&rds_message_flush_waitq))
- wake_up(&rds_message_flush_waitq);
+ wake_up_interruptible(&rm->m_flush_wait);
}
EXPORT_SYMBOL_GPL(rds_message_unmapped);
diff --git a/net/rds/page.c b/net/rds/page.c
index 595a952d4b17..5e44f5ae7898 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -116,7 +116,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
/* jump straight to allocation if we're trying for a huge page */
if (bytes >= PAGE_SIZE) {
page = alloc_page(gfp);
- if (page == NULL) {
+ if (!page) {
ret = -ENOMEM;
} else {
sg_set_page(scat, page, PAGE_SIZE, 0);
@@ -162,7 +162,7 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes,
rem = &per_cpu(rds_page_remainders, get_cpu());
local_irq_save(flags);
- if (page == NULL) {
+ if (!page) {
ret = -ENOMEM;
break;
}
@@ -186,6 +186,7 @@ out:
ret ? 0 : scat->length);
return ret;
}
+EXPORT_SYMBOL_GPL(rds_page_remainder_alloc);
static int rds_page_remainder_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 75fd13bb631b..1a41debca1ce 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -35,7 +35,7 @@
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
-#include "rdma.h"
+#include "rds.h"
/*
* XXX
@@ -130,14 +130,22 @@ void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
+ unsigned long flags;
/* Release any MRs associated with this socket */
+ spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = container_of(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
+ rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
+ RB_CLEAR_NODE(&mr->r_rb_node);
+ spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
+ rds_destroy_mr(mr);
rds_mr_put(mr);
+ spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
+ spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
@@ -181,7 +189,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
goto out;
}
- if (rs->rs_transport->get_mr == NULL) {
+ if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -197,13 +205,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
+ if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
- if (mr == NULL) {
+ if (!mr) {
ret = -ENOMEM;
goto out;
}
@@ -230,13 +238,13 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
- ret = rds_pin_pages(args->vec.addr & PAGE_MASK, nr_pages, pages, 1);
+ ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
- if (sg == NULL) {
+ if (!sg) {
ret = -ENOMEM;
goto out;
}
@@ -406,68 +414,127 @@ void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
- if (mr && (mr->r_use_once || force)) {
+ if (!mr) {
+ printk(KERN_ERR "rds: trying to unuse MR with unknown r_key %u!\n", r_key);
+ spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
+ return;
+ }
+
+ if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
- } else if (mr)
- atomic_inc(&mr->r_refcount);
+ }
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
- if (mr != NULL) {
- if (mr->r_trans->sync_mr)
- mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
-
- /* If the MR was marked as invalidate, this will
- * trigger an async flush. */
- if (zot_me)
- rds_destroy_mr(mr);
- rds_mr_put(mr);
- }
+ if (mr->r_trans->sync_mr)
+ mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
+
+ /* If the MR was marked as invalidate, this will
+ * trigger an async flush. */
+ if (zot_me)
+ rds_destroy_mr(mr);
+ rds_mr_put(mr);
}
-void rds_rdma_free_op(struct rds_rdma_op *ro)
+void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
- for (i = 0; i < ro->r_nents; i++) {
- struct page *page = sg_page(&ro->r_sg[i]);
+ for (i = 0; i < ro->op_nents; i++) {
+ struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
- if (!ro->r_write) {
- BUG_ON(in_interrupt());
+ if (!ro->op_write) {
+ BUG_ON(irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
- kfree(ro->r_notifier);
- kfree(ro);
+ kfree(ro->op_notifier);
+ ro->op_notifier = NULL;
+ ro->op_active = 0;
+}
+
+void rds_atomic_free_op(struct rm_atomic_op *ao)
+{
+ struct page *page = sg_page(ao->op_sg);
+
+ /* Mark page dirty if it was possibly modified, which
+ * is the case for a RDMA_READ which copies from remote
+ * to local memory */
+ set_page_dirty(page);
+ put_page(page);
+
+ kfree(ao->op_notifier);
+ ao->op_notifier = NULL;
+ ao->op_active = 0;
}
+
/*
- * args is a pointer to an in-kernel copy in the sendmsg cmsg.
+ * Count the number of pages needed to describe an incoming iovec.
*/
-static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
- struct rds_rdma_args *args)
+static int rds_rdma_pages(struct rds_rdma_args *args)
{
struct rds_iovec vec;
- struct rds_rdma_op *op = NULL;
+ struct rds_iovec __user *local_vec;
+ unsigned int tot_pages = 0;
unsigned int nr_pages;
- unsigned int max_pages;
+ unsigned int i;
+
+ local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
+
+ /* figure out the number of pages in the vector */
+ for (i = 0; i < args->nr_local; i++) {
+ if (copy_from_user(&vec, &local_vec[i],
+ sizeof(struct rds_iovec)))
+ return -EFAULT;
+
+ nr_pages = rds_pages_in_vec(&vec);
+ if (nr_pages == 0)
+ return -EINVAL;
+
+ tot_pages += nr_pages;
+ }
+
+ return tot_pages;
+}
+
+int rds_rdma_extra_size(struct rds_rdma_args *args)
+{
+ return rds_rdma_pages(args) * sizeof(struct scatterlist);
+}
+
+/*
+ * The application asks for a RDMA transfer.
+ * Extract all arguments and set up the rdma_op
+ */
+int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg)
+{
+ struct rds_rdma_args *args;
+ struct rds_iovec vec;
+ struct rm_rdma_op *op = &rm->rdma;
+ int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec __user *local_vec;
- struct scatterlist *sg;
unsigned int nr;
unsigned int i, j;
- int ret;
+ int ret = 0;
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
+ || rm->rdma.op_active)
+ return -EINVAL;
+
+ args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
@@ -479,61 +546,38 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
goto out;
}
- nr_pages = 0;
- max_pages = 0;
-
- local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
-
- /* figure out the number of pages in the vector */
- for (i = 0; i < args->nr_local; i++) {
- if (copy_from_user(&vec, &local_vec[i],
- sizeof(struct rds_iovec))) {
- ret = -EFAULT;
- goto out;
- }
-
- nr = rds_pages_in_vec(&vec);
- if (nr == 0) {
- ret = -EINVAL;
- goto out;
- }
-
- max_pages = max(nr, max_pages);
- nr_pages += nr;
- }
-
- pages = kcalloc(max_pages, sizeof(struct page *), GFP_KERNEL);
- if (pages == NULL) {
- ret = -ENOMEM;
+ nr_pages = rds_rdma_pages(args);
+ if (nr_pages < 0)
goto out;
- }
- op = kzalloc(offsetof(struct rds_rdma_op, r_sg[nr_pages]), GFP_KERNEL);
- if (op == NULL) {
+ pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
ret = -ENOMEM;
goto out;
}
- op->r_write = !!(args->flags & RDS_RDMA_READWRITE);
- op->r_fence = !!(args->flags & RDS_RDMA_FENCE);
- op->r_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
- op->r_recverr = rs->rs_recverr;
+ op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
+ op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
+ op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
+ op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
+ op->op_active = 1;
+ op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
- sg_init_table(op->r_sg, nr_pages);
+ op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
- if (op->r_notify || op->r_recverr) {
+ if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
- op->r_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
- if (!op->r_notifier) {
+ op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
+ if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
- op->r_notifier->n_user_token = args->user_token;
- op->r_notifier->n_status = RDS_RDMA_SUCCESS;
+ op->op_notifier->n_user_token = args->user_token;
+ op->op_notifier->n_status = RDS_RDMA_SUCCESS;
}
/* The cookie contains the R_Key of the remote memory region, and
@@ -543,15 +587,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
- op->r_key = rds_rdma_cookie_key(args->cookie);
- op->r_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
+ op->op_rkey = rds_rdma_cookie_key(args->cookie);
+ op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
- op->r_key);
+ op->op_rkey);
+
+ local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
@@ -569,15 +615,10 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
rs->rs_user_addr = vec.addr;
rs->rs_user_bytes = vec.bytes;
- /* did the user change the vec under us? */
- if (nr > max_pages || op->r_nents + nr > nr_pages) {
- ret = -EINVAL;
- goto out;
- }
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
- ret = rds_pin_pages(vec.addr & PAGE_MASK, nr, pages, !op->r_write);
+ ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
@@ -588,8 +629,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
for (j = 0; j < nr; j++) {
unsigned int offset = vec.addr & ~PAGE_MASK;
+ struct scatterlist *sg;
- sg = &op->r_sg[op->r_nents + j];
+ sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, vec.bytes, PAGE_SIZE - offset),
offset);
@@ -601,10 +643,9 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
vec.bytes -= sg->length;
}
- op->r_nents += nr;
+ op->op_nents += nr;
}
-
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
@@ -612,38 +653,17 @@ static struct rds_rdma_op *rds_rdma_prepare(struct rds_sock *rs,
ret = -EINVAL;
goto out;
}
- op->r_bytes = nr_bytes;
+ op->op_bytes = nr_bytes;
ret = 0;
out:
kfree(pages);
- if (ret) {
- if (op)
- rds_rdma_free_op(op);
- op = ERR_PTR(ret);
- }
- return op;
-}
-
-/*
- * The application asks for a RDMA transfer.
- * Extract all arguments and set up the rdma_op
- */
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg)
-{
- struct rds_rdma_op *op;
-
- if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args)) ||
- rm->m_rdma_op != NULL)
- return -EINVAL;
+ if (ret)
+ rds_rdma_free_op(op);
- op = rds_rdma_prepare(rs, CMSG_DATA(cmsg));
- if (IS_ERR(op))
- return PTR_ERR(op);
rds_stats_inc(s_send_rdma);
- rm->m_rdma_op = op;
- return 0;
+
+ return ret;
}
/*
@@ -673,7 +693,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
- if (mr == NULL)
+ if (!mr)
err = -EINVAL; /* invalid r_key */
else
atomic_inc(&mr->r_refcount);
@@ -681,7 +701,7 @@ int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
- rm->m_rdma_mr = mr;
+ rm->rdma.op_rdma_mr = mr;
}
return err;
}
@@ -699,5 +719,98 @@ int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
rm->m_rdma_cookie != 0)
return -EINVAL;
- return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->m_rdma_mr);
+ return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
+}
+
+/*
+ * Fill in rds_message for an atomic request.
+ */
+int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg)
+{
+ struct page *page = NULL;
+ struct rds_atomic_args *args;
+ int ret = 0;
+
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
+ || rm->atomic.op_active)
+ return -EINVAL;
+
+ args = CMSG_DATA(cmsg);
+
+ /* Nonmasked & masked cmsg ops converted to masked hw ops */
+ switch (cmsg->cmsg_type) {
+ case RDS_CMSG_ATOMIC_FADD:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
+ rm->atomic.op_m_fadd.add = args->fadd.add;
+ rm->atomic.op_m_fadd.nocarry_mask = 0;
+ break;
+ case RDS_CMSG_MASKED_ATOMIC_FADD:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
+ rm->atomic.op_m_fadd.add = args->m_fadd.add;
+ rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
+ break;
+ case RDS_CMSG_ATOMIC_CSWP:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
+ rm->atomic.op_m_cswp.compare = args->cswp.compare;
+ rm->atomic.op_m_cswp.swap = args->cswp.swap;
+ rm->atomic.op_m_cswp.compare_mask = ~0;
+ rm->atomic.op_m_cswp.swap_mask = ~0;
+ break;
+ case RDS_CMSG_MASKED_ATOMIC_CSWP:
+ rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
+ rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
+ rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
+ rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
+ rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
+ break;
+ default:
+ BUG(); /* should never happen */
+ }
+
+ rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
+ rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
+ rm->atomic.op_active = 1;
+ rm->atomic.op_recverr = rs->rs_recverr;
+ rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
+
+ /* verify 8 byte-aligned */
+ if (args->local_addr & 0x7) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = rds_pin_pages(args->local_addr, 1, &page, 1);
+ if (ret != 1)
+ goto err;
+ ret = 0;
+
+ sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
+
+ if (rm->atomic.op_notify || rm->atomic.op_recverr) {
+ /* We allocate an uninitialized notifier here, because
+ * we don't want to do that in the completion handler. We
+ * would have to use GFP_ATOMIC there, and don't want to deal
+ * with failed allocations.
+ */
+ rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
+ if (!rm->atomic.op_notifier) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rm->atomic.op_notifier->n_user_token = args->user_token;
+ rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
+ }
+
+ rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
+ rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
+
+ return ret;
+err:
+ if (page)
+ put_page(page);
+ kfree(rm->atomic.op_notifier);
+
+ return ret;
}
diff --git a/net/rds/rdma.h b/net/rds/rdma.h
deleted file mode 100644
index 909c39835a5d..000000000000
--- a/net/rds/rdma.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef _RDS_RDMA_H
-#define _RDS_RDMA_H
-
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/scatterlist.h>
-
-#include "rds.h"
-
-struct rds_mr {
- struct rb_node r_rb_node;
- atomic_t r_refcount;
- u32 r_key;
-
- /* A copy of the creation flags */
- unsigned int r_use_once:1;
- unsigned int r_invalidate:1;
- unsigned int r_write:1;
-
- /* This is for RDS_MR_DEAD.
- * It would be nice & consistent to make this part of the above
- * bit field here, but we need to use test_and_set_bit.
- */
- unsigned long r_state;
- struct rds_sock *r_sock; /* back pointer to the socket that owns us */
- struct rds_transport *r_trans;
- void *r_trans_private;
-};
-
-/* Flags for mr->r_state */
-#define RDS_MR_DEAD 0
-
-struct rds_rdma_op {
- u32 r_key;
- u64 r_remote_addr;
- unsigned int r_write:1;
- unsigned int r_fence:1;
- unsigned int r_notify:1;
- unsigned int r_recverr:1;
- unsigned int r_mapped:1;
- struct rds_notifier *r_notifier;
- unsigned int r_bytes;
- unsigned int r_nents;
- unsigned int r_count;
- struct scatterlist r_sg[0];
-};
-
-static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
-{
- return r_key | (((u64) offset) << 32);
-}
-
-static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
-{
- return cookie;
-}
-
-static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
-{
- return cookie >> 32;
-}
-
-int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
-int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
-int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
-void rds_rdma_drop_keys(struct rds_sock *rs);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
- struct cmsghdr *cmsg);
-void rds_rdma_free_op(struct rds_rdma_op *ro);
-void rds_rdma_send_complete(struct rds_message *rm, int);
-
-extern void __rds_put_mr_final(struct rds_mr *mr);
-static inline void rds_mr_put(struct rds_mr *mr)
-{
- if (atomic_dec_and_test(&mr->r_refcount))
- __rds_put_mr_final(mr);
-}
-
-#endif
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index e599ba2f950d..e6ed10aee190 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -36,6 +36,34 @@
static struct rdma_cm_id *rds_rdma_listen_id;
+static char *rds_cm_event_strings[] = {
+#define RDS_CM_EVENT_STRING(foo) \
+ [RDMA_CM_EVENT_##foo] = __stringify(RDMA_CM_EVENT_##foo)
+ RDS_CM_EVENT_STRING(ADDR_RESOLVED),
+ RDS_CM_EVENT_STRING(ADDR_ERROR),
+ RDS_CM_EVENT_STRING(ROUTE_RESOLVED),
+ RDS_CM_EVENT_STRING(ROUTE_ERROR),
+ RDS_CM_EVENT_STRING(CONNECT_REQUEST),
+ RDS_CM_EVENT_STRING(CONNECT_RESPONSE),
+ RDS_CM_EVENT_STRING(CONNECT_ERROR),
+ RDS_CM_EVENT_STRING(UNREACHABLE),
+ RDS_CM_EVENT_STRING(REJECTED),
+ RDS_CM_EVENT_STRING(ESTABLISHED),
+ RDS_CM_EVENT_STRING(DISCONNECTED),
+ RDS_CM_EVENT_STRING(DEVICE_REMOVAL),
+ RDS_CM_EVENT_STRING(MULTICAST_JOIN),
+ RDS_CM_EVENT_STRING(MULTICAST_ERROR),
+ RDS_CM_EVENT_STRING(ADDR_CHANGE),
+ RDS_CM_EVENT_STRING(TIMEWAIT_EXIT),
+#undef RDS_CM_EVENT_STRING
+};
+
+static char *rds_cm_event_str(enum rdma_cm_event_type type)
+{
+ return rds_str_array(rds_cm_event_strings,
+ ARRAY_SIZE(rds_cm_event_strings), type);
+};
+
int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
@@ -44,8 +72,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
struct rds_transport *trans;
int ret = 0;
- rdsdebug("conn %p id %p handling event %u\n", conn, cm_id,
- event->event);
+ rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id,
+ event->event, rds_cm_event_str(event->event));
if (cm_id->device->node_type == RDMA_NODE_RNIC)
trans = &rds_iw_transport;
@@ -109,7 +137,8 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
default:
/* things like device disconnect? */
- printk(KERN_ERR "RDS: unknown event %u!\n", event->event);
+ printk(KERN_ERR "RDS: unknown event %u (%s)!\n",
+ event->event, rds_cm_event_str(event->event));
break;
}
@@ -117,12 +146,13 @@ out:
if (conn)
mutex_unlock(&conn->c_cm_lock);
- rdsdebug("id %p event %u handling ret %d\n", cm_id, event->event, ret);
+ rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event,
+ rds_cm_event_str(event->event), ret);
return ret;
}
-static int __init rds_rdma_listen_init(void)
+static int rds_rdma_listen_init(void)
{
struct sockaddr_in sin;
struct rdma_cm_id *cm_id;
@@ -177,7 +207,7 @@ static void rds_rdma_listen_stop(void)
}
}
-int __init rds_rdma_init(void)
+int rds_rdma_init(void)
{
int ret;
diff --git a/net/rds/rds.h b/net/rds/rds.h
index c224b5bb3ba9..8103dcf8b976 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -80,6 +80,7 @@ enum {
/* Bits for c_flags */
#define RDS_LL_SEND_FULL 0
#define RDS_RECONNECT_PENDING 1
+#define RDS_IN_XMIT 2
struct rds_connection {
struct hlist_node c_hash_node;
@@ -91,12 +92,13 @@ struct rds_connection {
struct rds_cong_map *c_lcong;
struct rds_cong_map *c_fcong;
- struct mutex c_send_lock; /* protect send ring */
struct rds_message *c_xmit_rm;
unsigned long c_xmit_sg;
unsigned int c_xmit_hdr_off;
unsigned int c_xmit_data_off;
+ unsigned int c_xmit_atomic_sent;
unsigned int c_xmit_rdma_sent;
+ unsigned int c_xmit_data_sent;
spinlock_t c_lock; /* protect msg queues */
u64 c_next_tx_seq;
@@ -116,11 +118,10 @@ struct rds_connection {
struct delayed_work c_conn_w;
struct work_struct c_down_w;
struct mutex c_cm_lock; /* protect conn state & cm */
+ wait_queue_head_t c_waitq;
struct list_head c_map_item;
unsigned long c_map_queued;
- unsigned long c_map_offset;
- unsigned long c_map_bytes;
unsigned int c_unacked_packets;
unsigned int c_unacked_bytes;
@@ -206,6 +207,48 @@ struct rds_incoming {
rds_rdma_cookie_t i_rdma_cookie;
};
+struct rds_mr {
+ struct rb_node r_rb_node;
+ atomic_t r_refcount;
+ u32 r_key;
+
+ /* A copy of the creation flags */
+ unsigned int r_use_once:1;
+ unsigned int r_invalidate:1;
+ unsigned int r_write:1;
+
+ /* This is for RDS_MR_DEAD.
+ * It would be nice & consistent to make this part of the above
+ * bit field here, but we need to use test_and_set_bit.
+ */
+ unsigned long r_state;
+ struct rds_sock *r_sock; /* back pointer to the socket that owns us */
+ struct rds_transport *r_trans;
+ void *r_trans_private;
+};
+
+/* Flags for mr->r_state */
+#define RDS_MR_DEAD 0
+
+static inline rds_rdma_cookie_t rds_rdma_make_cookie(u32 r_key, u32 offset)
+{
+ return r_key | (((u64) offset) << 32);
+}
+
+static inline u32 rds_rdma_cookie_key(rds_rdma_cookie_t cookie)
+{
+ return cookie;
+}
+
+static inline u32 rds_rdma_cookie_offset(rds_rdma_cookie_t cookie)
+{
+ return cookie >> 32;
+}
+
+/* atomic operation types */
+#define RDS_ATOMIC_TYPE_CSWP 0
+#define RDS_ATOMIC_TYPE_FADD 1
+
/*
* m_sock_item and m_conn_item are on lists that are serialized under
* conn->c_lock. m_sock_item has additional meaning in that once it is empty
@@ -258,13 +301,71 @@ struct rds_message {
* -> rs->rs_lock
*/
spinlock_t m_rs_lock;
+ wait_queue_head_t m_flush_wait;
+
struct rds_sock *m_rs;
- struct rds_rdma_op *m_rdma_op;
+
+ /* cookie to send to remote, in rds header */
rds_rdma_cookie_t m_rdma_cookie;
- struct rds_mr *m_rdma_mr;
- unsigned int m_nents;
- unsigned int m_count;
- struct scatterlist m_sg[0];
+
+ unsigned int m_used_sgs;
+ unsigned int m_total_sgs;
+
+ void *m_final_op;
+
+ struct {
+ struct rm_atomic_op {
+ int op_type;
+ union {
+ struct {
+ uint64_t compare;
+ uint64_t swap;
+ uint64_t compare_mask;
+ uint64_t swap_mask;
+ } op_m_cswp;
+ struct {
+ uint64_t add;
+ uint64_t nocarry_mask;
+ } op_m_fadd;
+ };
+
+ u32 op_rkey;
+ u64 op_remote_addr;
+ unsigned int op_notify:1;
+ unsigned int op_recverr:1;
+ unsigned int op_mapped:1;
+ unsigned int op_silent:1;
+ unsigned int op_active:1;
+ struct scatterlist *op_sg;
+ struct rds_notifier *op_notifier;
+
+ struct rds_mr *op_rdma_mr;
+ } atomic;
+ struct rm_rdma_op {
+ u32 op_rkey;
+ u64 op_remote_addr;
+ unsigned int op_write:1;
+ unsigned int op_fence:1;
+ unsigned int op_notify:1;
+ unsigned int op_recverr:1;
+ unsigned int op_mapped:1;
+ unsigned int op_silent:1;
+ unsigned int op_active:1;
+ unsigned int op_bytes;
+ unsigned int op_nents;
+ unsigned int op_count;
+ struct scatterlist *op_sg;
+ struct rds_notifier *op_notifier;
+
+ struct rds_mr *op_rdma_mr;
+ } rdma;
+ struct rm_data_op {
+ unsigned int op_active:1;
+ unsigned int op_nents;
+ unsigned int op_count;
+ struct scatterlist *op_sg;
+ } data;
+ };
};
/*
@@ -305,10 +406,6 @@ struct rds_notifier {
* transport is responsible for other serialization, including
* rds_recv_incoming(). This is called in process context but
* should try hard not to block.
- *
- * @xmit_cong_map: This asks the transport to send the local bitmap down the
- * given connection. XXX get a better story about the bitmap
- * flag and header.
*/
#define RDS_TRANS_IB 0
@@ -332,13 +429,11 @@ struct rds_transport {
void (*xmit_complete)(struct rds_connection *conn);
int (*xmit)(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off);
- int (*xmit_cong_map)(struct rds_connection *conn,
- struct rds_cong_map *map, unsigned long offset);
- int (*xmit_rdma)(struct rds_connection *conn, struct rds_rdma_op *op);
+ int (*xmit_rdma)(struct rds_connection *conn, struct rm_rdma_op *op);
+ int (*xmit_atomic)(struct rds_connection *conn, struct rm_atomic_op *op);
int (*recv)(struct rds_connection *conn);
int (*inc_copy_to_user)(struct rds_incoming *inc, struct iovec *iov,
size_t size);
- void (*inc_purge)(struct rds_incoming *inc);
void (*inc_free)(struct rds_incoming *inc);
int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
@@ -367,17 +462,11 @@ struct rds_sock {
* bound_addr used for both incoming and outgoing, no INADDR_ANY
* support.
*/
- struct rb_node rs_bound_node;
+ struct hlist_node rs_bound_node;
__be32 rs_bound_addr;
__be32 rs_conn_addr;
__be16 rs_bound_port;
__be16 rs_conn_port;
-
- /*
- * This is only used to communicate the transport between bind and
- * initiating connections. All other trans use is referenced through
- * the connection.
- */
struct rds_transport *rs_transport;
/*
@@ -466,8 +555,8 @@ struct rds_statistics {
uint64_t s_recv_ping;
uint64_t s_send_queue_empty;
uint64_t s_send_queue_full;
- uint64_t s_send_sem_contention;
- uint64_t s_send_sem_queue_raced;
+ uint64_t s_send_lock_contention;
+ uint64_t s_send_lock_queue_raced;
uint64_t s_send_immediate_retry;
uint64_t s_send_delayed_retry;
uint64_t s_send_drop_acked;
@@ -487,6 +576,7 @@ struct rds_statistics {
};
/* af_rds.c */
+char *rds_str_array(char **array, size_t elements, size_t index);
void rds_sock_addref(struct rds_sock *rs);
void rds_sock_put(struct rds_sock *rs);
void rds_wake_sk_sleep(struct rds_sock *rs);
@@ -521,15 +611,17 @@ void rds_cong_exit(void);
struct rds_message *rds_cong_update_alloc(struct rds_connection *conn);
/* conn.c */
-int __init rds_conn_init(void);
+int rds_conn_init(void);
void rds_conn_exit(void);
struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp);
+void rds_conn_shutdown(struct rds_connection *conn);
void rds_conn_destroy(struct rds_connection *conn);
void rds_conn_reset(struct rds_connection *conn);
void rds_conn_drop(struct rds_connection *conn);
+void rds_conn_connect_if_down(struct rds_connection *conn);
void rds_for_each_conn_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens,
@@ -566,7 +658,8 @@ rds_conn_connecting(struct rds_connection *conn)
/* message.c */
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp);
-struct rds_message *rds_message_copy_from_user(struct iovec *first_iov,
+struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents);
+int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
size_t total_len);
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len);
void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
@@ -580,7 +673,6 @@ int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *vers
int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset);
int rds_message_inc_copy_to_user(struct rds_incoming *inc,
struct iovec *first_iov, size_t size);
-void rds_message_inc_purge(struct rds_incoming *inc);
void rds_message_inc_free(struct rds_incoming *inc);
void rds_message_addref(struct rds_message *rm);
void rds_message_put(struct rds_message *rm);
@@ -636,14 +728,39 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest);
typedef int (*is_acked_func)(struct rds_message *rm, uint64_t ack);
void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
is_acked_func is_acked);
-int rds_send_acked_before(struct rds_connection *conn, u64 seq);
void rds_send_remove_from_sock(struct list_head *messages, int status);
int rds_send_pong(struct rds_connection *conn, __be16 dport);
struct rds_message *rds_send_get_message(struct rds_connection *,
- struct rds_rdma_op *);
+ struct rm_rdma_op *);
/* rdma.c */
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force);
+int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen);
+int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen);
+int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen);
+void rds_rdma_drop_keys(struct rds_sock *rs);
+int rds_rdma_extra_size(struct rds_rdma_args *args);
+int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+void rds_rdma_free_op(struct rm_rdma_op *ro);
+void rds_atomic_free_op(struct rm_atomic_op *ao);
+void rds_rdma_send_complete(struct rds_message *rm, int wc_status);
+void rds_atomic_send_complete(struct rds_message *rm, int wc_status);
+int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
+ struct cmsghdr *cmsg);
+
+extern void __rds_put_mr_final(struct rds_mr *mr);
+static inline void rds_mr_put(struct rds_mr *mr)
+{
+ if (atomic_dec_and_test(&mr->r_refcount))
+ __rds_put_mr_final(mr);
+}
/* stats.c */
DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
@@ -657,14 +774,14 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
put_cpu(); \
} while (0)
#define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
-int __init rds_stats_init(void);
+int rds_stats_init(void);
void rds_stats_exit(void);
void rds_stats_info_copy(struct rds_info_iterator *iter,
uint64_t *values, const char *const *names,
size_t nr);
/* sysctl.c */
-int __init rds_sysctl_init(void);
+int rds_sysctl_init(void);
void rds_sysctl_exit(void);
extern unsigned long rds_sysctl_sndbuf_min;
extern unsigned long rds_sysctl_sndbuf_default;
@@ -678,9 +795,10 @@ extern unsigned long rds_sysctl_trace_flags;
extern unsigned int rds_sysctl_trace_level;
/* threads.c */
-int __init rds_threads_init(void);
+int rds_threads_init(void);
void rds_threads_exit(void);
extern struct workqueue_struct *rds_wq;
+void rds_queue_reconnect(struct rds_connection *conn);
void rds_connect_worker(struct work_struct *);
void rds_shutdown_worker(struct work_struct *);
void rds_send_worker(struct work_struct *);
@@ -691,9 +809,10 @@ void rds_connect_complete(struct rds_connection *conn);
int rds_trans_register(struct rds_transport *trans);
void rds_trans_unregister(struct rds_transport *trans);
struct rds_transport *rds_trans_get_preferred(__be32 addr);
+void rds_trans_put(struct rds_transport *trans);
unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
unsigned int avail);
-int __init rds_trans_init(void);
+int rds_trans_init(void);
void rds_trans_exit(void);
#endif
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 795a00b7f2cb..68800f02aa30 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -36,7 +36,6 @@
#include <linux/in.h>
#include "rds.h"
-#include "rdma.h"
void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
__be32 saddr)
@@ -210,7 +209,7 @@ void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
}
rs = rds_find_bound(daddr, inc->i_hdr.h_dport);
- if (rs == NULL) {
+ if (!rs) {
rds_stats_inc(s_recv_drop_no_sock);
goto out;
}
@@ -251,7 +250,7 @@ static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc)
{
unsigned long flags;
- if (*inc == NULL) {
+ if (!*inc) {
read_lock_irqsave(&rs->rs_recv_lock, flags);
if (!list_empty(&rs->rs_recv_queue)) {
*inc = list_entry(rs->rs_recv_queue.next,
@@ -297,7 +296,7 @@ static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc,
int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
{
struct rds_notifier *notifier;
- struct rds_rdma_notify cmsg;
+ struct rds_rdma_notify cmsg = { 0 }; /* fill holes with zero */
unsigned int count = 0, max_messages = ~0U;
unsigned long flags;
LIST_HEAD(copy);
@@ -334,10 +333,10 @@ int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr)
if (msghdr) {
cmsg.user_token = notifier->n_user_token;
- cmsg.status = notifier->n_status;
+ cmsg.status = notifier->n_status;
err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS,
- sizeof(cmsg), &cmsg);
+ sizeof(cmsg), &cmsg);
if (err)
break;
}
diff --git a/net/rds/send.c b/net/rds/send.c
index 9c1c6bcaa6c9..9b951a0ab6b7 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -37,7 +37,6 @@
#include <linux/list.h>
#include "rds.h"
-#include "rdma.h"
/* When transmitting messages in rds_send_xmit, we need to emerge from
* time to time and briefly release the CPU. Otherwise the softlock watchdog
@@ -54,7 +53,8 @@ module_param(send_batch_count, int, 0444);
MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
/*
- * Reset the send state. Caller must hold c_send_lock when calling here.
+ * Reset the send state. Callers must ensure that this doesn't race with
+ * rds_send_xmit().
*/
void rds_send_reset(struct rds_connection *conn)
{
@@ -62,18 +62,22 @@ void rds_send_reset(struct rds_connection *conn)
unsigned long flags;
if (conn->c_xmit_rm) {
+ rm = conn->c_xmit_rm;
+ conn->c_xmit_rm = NULL;
/* Tell the user the RDMA op is no longer mapped by the
* transport. This isn't entirely true (it's flushed out
* independently) but as the connection is down, there's
* no ongoing RDMA to/from that memory */
- rds_message_unmapped(conn->c_xmit_rm);
- rds_message_put(conn->c_xmit_rm);
- conn->c_xmit_rm = NULL;
+ rds_message_unmapped(rm);
+ rds_message_put(rm);
}
+
conn->c_xmit_sg = 0;
conn->c_xmit_hdr_off = 0;
conn->c_xmit_data_off = 0;
+ conn->c_xmit_atomic_sent = 0;
conn->c_xmit_rdma_sent = 0;
+ conn->c_xmit_data_sent = 0;
conn->c_map_queued = 0;
@@ -90,6 +94,25 @@ void rds_send_reset(struct rds_connection *conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
}
+static int acquire_in_xmit(struct rds_connection *conn)
+{
+ return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0;
+}
+
+static void release_in_xmit(struct rds_connection *conn)
+{
+ clear_bit(RDS_IN_XMIT, &conn->c_flags);
+ smp_mb__after_clear_bit();
+ /*
+ * We don't use wait_on_bit()/wake_up_bit() because our waking is in a
+ * hot path and finding waiters is very rare. We don't want to walk
+ * the system-wide hashed waitqueue buckets in the fast path only to
+ * almost never find waiters.
+ */
+ if (waitqueue_active(&conn->c_waitq))
+ wake_up_all(&conn->c_waitq);
+}
+
/*
* We're making the concious trade-off here to only send one message
* down the connection at a time.
@@ -109,102 +132,69 @@ int rds_send_xmit(struct rds_connection *conn)
struct rds_message *rm;
unsigned long flags;
unsigned int tmp;
- unsigned int send_quota = send_batch_count;
struct scatterlist *sg;
int ret = 0;
- int was_empty = 0;
LIST_HEAD(to_be_dropped);
+restart:
+
/*
* sendmsg calls here after having queued its message on the send
* queue. We only have one task feeding the connection at a time. If
* another thread is already feeding the queue then we back off. This
* avoids blocking the caller and trading per-connection data between
* caches per message.
- *
- * The sem holder will issue a retry if they notice that someone queued
- * a message after they stopped walking the send queue but before they
- * dropped the sem.
*/
- if (!mutex_trylock(&conn->c_send_lock)) {
- rds_stats_inc(s_send_sem_contention);
+ if (!acquire_in_xmit(conn)) {
+ rds_stats_inc(s_send_lock_contention);
ret = -ENOMEM;
goto out;
}
+ /*
+ * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT,
+ * we do the opposite to avoid races.
+ */
+ if (!rds_conn_up(conn)) {
+ release_in_xmit(conn);
+ ret = 0;
+ goto out;
+ }
+
if (conn->c_trans->xmit_prepare)
conn->c_trans->xmit_prepare(conn);
/*
* spin trying to push headers and data down the connection until
- * the connection doens't make forward progress.
+ * the connection doesn't make forward progress.
*/
- while (--send_quota) {
- /*
- * See if need to send a congestion map update if we're
- * between sending messages. The send_sem protects our sole
- * use of c_map_offset and _bytes.
- * Note this is used only by transports that define a special
- * xmit_cong_map function. For all others, we create allocate
- * a cong_map message and treat it just like any other send.
- */
- if (conn->c_map_bytes) {
- ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
- conn->c_map_offset);
- if (ret <= 0)
- break;
+ while (1) {
- conn->c_map_offset += ret;
- conn->c_map_bytes -= ret;
- if (conn->c_map_bytes)
- continue;
- }
-
- /* If we're done sending the current message, clear the
- * offset and S/G temporaries.
- */
rm = conn->c_xmit_rm;
- if (rm != NULL &&
- conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
- conn->c_xmit_sg == rm->m_nents) {
- conn->c_xmit_rm = NULL;
- conn->c_xmit_sg = 0;
- conn->c_xmit_hdr_off = 0;
- conn->c_xmit_data_off = 0;
- conn->c_xmit_rdma_sent = 0;
- /* Release the reference to the previous message. */
- rds_message_put(rm);
- rm = NULL;
- }
-
- /* If we're asked to send a cong map update, do so.
+ /*
+ * If between sending messages, we can send a pending congestion
+ * map update.
*/
- if (rm == NULL && test_and_clear_bit(0, &conn->c_map_queued)) {
- if (conn->c_trans->xmit_cong_map != NULL) {
- conn->c_map_offset = 0;
- conn->c_map_bytes = sizeof(struct rds_header) +
- RDS_CONG_MAP_BYTES;
- continue;
- }
-
+ if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
rm = rds_cong_update_alloc(conn);
if (IS_ERR(rm)) {
ret = PTR_ERR(rm);
break;
}
+ rm->data.op_active = 1;
conn->c_xmit_rm = rm;
}
/*
- * Grab the next message from the send queue, if there is one.
+ * If not already working on one, grab the next message.
*
* c_xmit_rm holds a ref while we're sending this message down
* the connction. We can use this ref while holding the
* send_sem.. rds_send_reset() is serialized with it.
*/
- if (rm == NULL) {
+ if (!rm) {
unsigned int len;
spin_lock_irqsave(&conn->c_lock, flags);
@@ -224,10 +214,8 @@ int rds_send_xmit(struct rds_connection *conn)
spin_unlock_irqrestore(&conn->c_lock, flags);
- if (rm == NULL) {
- was_empty = 1;
+ if (!rm)
break;
- }
/* Unfortunately, the way Infiniband deals with
* RDMA to a bad MR key is by moving the entire
@@ -236,13 +224,12 @@ int rds_send_xmit(struct rds_connection *conn)
* connection.
* Therefore, we never retransmit messages with RDMA ops.
*/
- if (rm->m_rdma_op &&
+ if (rm->rdma.op_active &&
test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
spin_lock_irqsave(&conn->c_lock, flags);
if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
list_move(&rm->m_conn_item, &to_be_dropped);
spin_unlock_irqrestore(&conn->c_lock, flags);
- rds_message_put(rm);
continue;
}
@@ -263,23 +250,55 @@ int rds_send_xmit(struct rds_connection *conn)
conn->c_xmit_rm = rm;
}
- /*
- * Try and send an rdma message. Let's see if we can
- * keep this simple and require that the transport either
- * send the whole rdma or none of it.
- */
- if (rm->m_rdma_op && !conn->c_xmit_rdma_sent) {
- ret = conn->c_trans->xmit_rdma(conn, rm->m_rdma_op);
+ /* The transport either sends the whole rdma or none of it */
+ if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
+ rm->m_final_op = &rm->rdma;
+ ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
if (ret)
break;
conn->c_xmit_rdma_sent = 1;
+
/* The transport owns the mapped memory for now.
* You can't unmap it while it's on the send queue */
set_bit(RDS_MSG_MAPPED, &rm->m_flags);
}
- if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
- conn->c_xmit_sg < rm->m_nents) {
+ if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
+ rm->m_final_op = &rm->atomic;
+ ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
+ if (ret)
+ break;
+ conn->c_xmit_atomic_sent = 1;
+
+ /* The transport owns the mapped memory for now.
+ * You can't unmap it while it's on the send queue */
+ set_bit(RDS_MSG_MAPPED, &rm->m_flags);
+ }
+
+ /*
+ * A number of cases require an RDS header to be sent
+ * even if there is no data.
+ * We permit 0-byte sends; rds-ping depends on this.
+ * However, if there are exclusively attached silent ops,
+ * we skip the hdr/data send, to enable silent operation.
+ */
+ if (rm->data.op_nents == 0) {
+ int ops_present;
+ int all_ops_are_silent = 1;
+
+ ops_present = (rm->atomic.op_active || rm->rdma.op_active);
+ if (rm->atomic.op_active && !rm->atomic.op_silent)
+ all_ops_are_silent = 0;
+ if (rm->rdma.op_active && !rm->rdma.op_silent)
+ all_ops_are_silent = 0;
+
+ if (ops_present && all_ops_are_silent
+ && !rm->m_rdma_cookie)
+ rm->data.op_active = 0;
+ }
+
+ if (rm->data.op_active && !conn->c_xmit_data_sent) {
+ rm->m_final_op = &rm->data;
ret = conn->c_trans->xmit(conn, rm,
conn->c_xmit_hdr_off,
conn->c_xmit_sg,
@@ -295,7 +314,7 @@ int rds_send_xmit(struct rds_connection *conn)
ret -= tmp;
}
- sg = &rm->m_sg[conn->c_xmit_sg];
+ sg = &rm->data.op_sg[conn->c_xmit_sg];
while (ret) {
tmp = min_t(int, ret, sg->length -
conn->c_xmit_data_off);
@@ -306,49 +325,63 @@ int rds_send_xmit(struct rds_connection *conn)
sg++;
conn->c_xmit_sg++;
BUG_ON(ret != 0 &&
- conn->c_xmit_sg == rm->m_nents);
+ conn->c_xmit_sg == rm->data.op_nents);
}
}
+
+ if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
+ (conn->c_xmit_sg == rm->data.op_nents))
+ conn->c_xmit_data_sent = 1;
}
- }
- /* Nuke any messages we decided not to retransmit. */
- if (!list_empty(&to_be_dropped))
- rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
+ /*
+ * A rm will only take multiple times through this loop
+ * if there is a data op. Thus, if the data is sent (or there was
+ * none), then we're done with the rm.
+ */
+ if (!rm->data.op_active || conn->c_xmit_data_sent) {
+ conn->c_xmit_rm = NULL;
+ conn->c_xmit_sg = 0;
+ conn->c_xmit_hdr_off = 0;
+ conn->c_xmit_data_off = 0;
+ conn->c_xmit_rdma_sent = 0;
+ conn->c_xmit_atomic_sent = 0;
+ conn->c_xmit_data_sent = 0;
+
+ rds_message_put(rm);
+ }
+ }
if (conn->c_trans->xmit_complete)
conn->c_trans->xmit_complete(conn);
- /*
- * We might be racing with another sender who queued a message but
- * backed off on noticing that we held the c_send_lock. If we check
- * for queued messages after dropping the sem then either we'll
- * see the queued message or the queuer will get the sem. If we
- * notice the queued message then we trigger an immediate retry.
- *
- * We need to be careful only to do this when we stopped processing
- * the send queue because it was empty. It's the only way we
- * stop processing the loop when the transport hasn't taken
- * responsibility for forward progress.
- */
- mutex_unlock(&conn->c_send_lock);
+ release_in_xmit(conn);
- if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
- /* We exhausted the send quota, but there's work left to
- * do. Return and (re-)schedule the send worker.
- */
- ret = -EAGAIN;
+ /* Nuke any messages we decided not to retransmit. */
+ if (!list_empty(&to_be_dropped)) {
+ /* irqs on here, so we can put(), unlike above */
+ list_for_each_entry(rm, &to_be_dropped, m_conn_item)
+ rds_message_put(rm);
+ rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
}
- if (ret == 0 && was_empty) {
- /* A simple bit test would be way faster than taking the
- * spin lock */
- spin_lock_irqsave(&conn->c_lock, flags);
+ /*
+ * Other senders can queue a message after we last test the send queue
+ * but before we clear RDS_IN_XMIT. In that case they'd back off and
+ * not try and send their newly queued message. We need to check the
+ * send queue after having cleared RDS_IN_XMIT so that their message
+ * doesn't get stuck on the send queue.
+ *
+ * If the transport cannot continue (i.e ret != 0), then it must
+ * call us when more room is available, such as from the tx
+ * completion handler.
+ */
+ if (ret == 0) {
+ smp_mb();
if (!list_empty(&conn->c_send_queue)) {
- rds_stats_inc(s_send_sem_queue_raced);
- ret = -EAGAIN;
+ rds_stats_inc(s_send_lock_queue_raced);
+ goto restart;
}
- spin_unlock_irqrestore(&conn->c_lock, flags);
}
out:
return ret;
@@ -376,52 +409,60 @@ static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
}
/*
- * Returns true if there are no messages on the send and retransmit queues
- * which have a sequence number greater than or equal to the given sequence
- * number.
+ * This is pretty similar to what happens below in the ACK
+ * handling code - except that we call here as soon as we get
+ * the IB send completion on the RDMA op and the accompanying
+ * message.
*/
-int rds_send_acked_before(struct rds_connection *conn, u64 seq)
+void rds_rdma_send_complete(struct rds_message *rm, int status)
{
- struct rds_message *rm, *tmp;
- int ret = 1;
+ struct rds_sock *rs = NULL;
+ struct rm_rdma_op *ro;
+ struct rds_notifier *notifier;
+ unsigned long flags;
- spin_lock(&conn->c_lock);
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
- list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
- if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
- ret = 0;
- break;
- }
+ ro = &rm->rdma;
+ if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
+ ro->op_active && ro->op_notify && ro->op_notifier) {
+ notifier = ro->op_notifier;
+ rs = rm->m_rs;
+ sock_hold(rds_rs_to_sk(rs));
- list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
- if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
- ret = 0;
- break;
+ notifier->n_status = status;
+ spin_lock(&rs->rs_lock);
+ list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
+ spin_unlock(&rs->rs_lock);
+
+ ro->op_notifier = NULL;
}
- spin_unlock(&conn->c_lock);
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
- return ret;
+ if (rs) {
+ rds_wake_sk_sleep(rs);
+ sock_put(rds_rs_to_sk(rs));
+ }
}
+EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
/*
- * This is pretty similar to what happens below in the ACK
- * handling code - except that we call here as soon as we get
- * the IB send completion on the RDMA op and the accompanying
- * message.
+ * Just like above, except looks at atomic op
*/
-void rds_rdma_send_complete(struct rds_message *rm, int status)
+void rds_atomic_send_complete(struct rds_message *rm, int status)
{
struct rds_sock *rs = NULL;
- struct rds_rdma_op *ro;
+ struct rm_atomic_op *ao;
struct rds_notifier *notifier;
+ unsigned long flags;
- spin_lock(&rm->m_rs_lock);
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
- ro = rm->m_rdma_op;
- if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
- ro && ro->r_notify && ro->r_notifier) {
- notifier = ro->r_notifier;
+ ao = &rm->atomic;
+ if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
+ && ao->op_active && ao->op_notify && ao->op_notifier) {
+ notifier = ao->op_notifier;
rs = rm->m_rs;
sock_hold(rds_rs_to_sk(rs));
@@ -430,17 +471,17 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
spin_unlock(&rs->rs_lock);
- ro->r_notifier = NULL;
+ ao->op_notifier = NULL;
}
- spin_unlock(&rm->m_rs_lock);
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
if (rs) {
rds_wake_sk_sleep(rs);
sock_put(rds_rs_to_sk(rs));
}
}
-EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
+EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
/*
* This is the same as rds_rdma_send_complete except we
@@ -448,15 +489,23 @@ EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
* socket, socket lock) and can just move the notifier.
*/
static inline void
-__rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
+__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
{
- struct rds_rdma_op *ro;
+ struct rm_rdma_op *ro;
+ struct rm_atomic_op *ao;
+
+ ro = &rm->rdma;
+ if (ro->op_active && ro->op_notify && ro->op_notifier) {
+ ro->op_notifier->n_status = status;
+ list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
+ ro->op_notifier = NULL;
+ }
- ro = rm->m_rdma_op;
- if (ro && ro->r_notify && ro->r_notifier) {
- ro->r_notifier->n_status = status;
- list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
- ro->r_notifier = NULL;
+ ao = &rm->atomic;
+ if (ao->op_active && ao->op_notify && ao->op_notifier) {
+ ao->op_notifier->n_status = status;
+ list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
+ ao->op_notifier = NULL;
}
/* No need to wake the app - caller does this */
@@ -468,7 +517,7 @@ __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status
* So speed is not an issue here.
*/
struct rds_message *rds_send_get_message(struct rds_connection *conn,
- struct rds_rdma_op *op)
+ struct rm_rdma_op *op)
{
struct rds_message *rm, *tmp, *found = NULL;
unsigned long flags;
@@ -476,7 +525,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
spin_lock_irqsave(&conn->c_lock, flags);
list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
- if (rm->m_rdma_op == op) {
+ if (&rm->rdma == op) {
atomic_inc(&rm->m_refcount);
found = rm;
goto out;
@@ -484,7 +533,7 @@ struct rds_message *rds_send_get_message(struct rds_connection *conn,
}
list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
- if (rm->m_rdma_op == op) {
+ if (&rm->rdma == op) {
atomic_inc(&rm->m_refcount);
found = rm;
break;
@@ -544,19 +593,20 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
spin_lock(&rs->rs_lock);
if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
- struct rds_rdma_op *ro = rm->m_rdma_op;
+ struct rm_rdma_op *ro = &rm->rdma;
struct rds_notifier *notifier;
list_del_init(&rm->m_sock_item);
rds_send_sndbuf_remove(rs, rm);
- if (ro && ro->r_notifier && (status || ro->r_notify)) {
- notifier = ro->r_notifier;
+ if (ro->op_active && ro->op_notifier &&
+ (ro->op_notify || (ro->op_recverr && status))) {
+ notifier = ro->op_notifier;
list_add_tail(&notifier->n_list,
&rs->rs_notify_queue);
if (!notifier->n_status)
notifier->n_status = status;
- rm->m_rdma_op->r_notifier = NULL;
+ rm->rdma.op_notifier = NULL;
}
was_on_sock = 1;
rm->m_rs = NULL;
@@ -619,9 +669,8 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
{
struct rds_message *rm, *tmp;
struct rds_connection *conn;
- unsigned long flags, flags2;
+ unsigned long flags;
LIST_HEAD(list);
- int wake = 0;
/* get all the messages we're dropping under the rs lock */
spin_lock_irqsave(&rs->rs_lock, flags);
@@ -631,59 +680,54 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
dest->sin_port != rm->m_inc.i_hdr.h_dport))
continue;
- wake = 1;
list_move(&rm->m_sock_item, &list);
rds_send_sndbuf_remove(rs, rm);
clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
}
/* order flag updates with the rs lock */
- if (wake)
- smp_mb__after_clear_bit();
+ smp_mb__after_clear_bit();
spin_unlock_irqrestore(&rs->rs_lock, flags);
- conn = NULL;
+ if (list_empty(&list))
+ return;
- /* now remove the messages from the conn list as needed */
+ /* Remove the messages from the conn */
list_for_each_entry(rm, &list, m_sock_item) {
- /* We do this here rather than in the loop above, so that
- * we don't have to nest m_rs_lock under rs->rs_lock */
- spin_lock_irqsave(&rm->m_rs_lock, flags2);
- /* If this is a RDMA operation, notify the app. */
- spin_lock(&rs->rs_lock);
- __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
- spin_unlock(&rs->rs_lock);
- rm->m_rs = NULL;
- spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
+ conn = rm->m_inc.i_conn;
+
+ spin_lock_irqsave(&conn->c_lock, flags);
/*
- * If we see this flag cleared then we're *sure* that someone
- * else beat us to removing it from the conn. If we race
- * with their flag update we'll get the lock and then really
- * see that the flag has been cleared.
+ * Maybe someone else beat us to removing rm from the conn.
+ * If we race with their flag update we'll get the lock and
+ * then really see that the flag has been cleared.
*/
- if (!test_bit(RDS_MSG_ON_CONN, &rm->m_flags))
+ if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
+ spin_unlock_irqrestore(&conn->c_lock, flags);
continue;
-
- if (conn != rm->m_inc.i_conn) {
- if (conn)
- spin_unlock_irqrestore(&conn->c_lock, flags);
- conn = rm->m_inc.i_conn;
- spin_lock_irqsave(&conn->c_lock, flags);
}
+ list_del_init(&rm->m_conn_item);
+ spin_unlock_irqrestore(&conn->c_lock, flags);
- if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
- list_del_init(&rm->m_conn_item);
- rds_message_put(rm);
- }
- }
+ /*
+ * Couldn't grab m_rs_lock in top loop (lock ordering),
+ * but we can now.
+ */
+ spin_lock_irqsave(&rm->m_rs_lock, flags);
- if (conn)
- spin_unlock_irqrestore(&conn->c_lock, flags);
+ spin_lock(&rs->rs_lock);
+ __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
+ spin_unlock(&rs->rs_lock);
- if (wake)
- rds_wake_sk_sleep(rs);
+ rm->m_rs = NULL;
+ spin_unlock_irqrestore(&rm->m_rs_lock, flags);
+
+ rds_message_put(rm);
+ }
+
+ rds_wake_sk_sleep(rs);
while (!list_empty(&list)) {
rm = list_entry(list.next, struct rds_message, m_sock_item);
@@ -763,6 +807,63 @@ out:
return *queued;
}
+/*
+ * rds_message is getting to be quite complicated, and we'd like to allocate
+ * it all in one go. This figures out how big it needs to be up front.
+ */
+static int rds_rm_size(struct msghdr *msg, int data_len)
+{
+ struct cmsghdr *cmsg;
+ int size = 0;
+ int cmsg_groups = 0;
+ int retval;
+
+ for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (!CMSG_OK(msg, cmsg))
+ return -EINVAL;
+
+ if (cmsg->cmsg_level != SOL_RDS)
+ continue;
+
+ switch (cmsg->cmsg_type) {
+ case RDS_CMSG_RDMA_ARGS:
+ cmsg_groups |= 1;
+ retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
+ if (retval < 0)
+ return retval;
+ size += retval;
+
+ break;
+
+ case RDS_CMSG_RDMA_DEST:
+ case RDS_CMSG_RDMA_MAP:
+ cmsg_groups |= 2;
+ /* these are valid but do no add any size */
+ break;
+
+ case RDS_CMSG_ATOMIC_CSWP:
+ case RDS_CMSG_ATOMIC_FADD:
+ case RDS_CMSG_MASKED_ATOMIC_CSWP:
+ case RDS_CMSG_MASKED_ATOMIC_FADD:
+ cmsg_groups |= 1;
+ size += sizeof(struct scatterlist);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ }
+
+ size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
+
+ /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
+ if (cmsg_groups == 3)
+ return -EINVAL;
+
+ return size;
+}
+
static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
struct msghdr *msg, int *allocated_mr)
{
@@ -777,7 +878,7 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
continue;
/* As a side effect, RDMA_DEST and RDMA_MAP will set
- * rm->m_rdma_cookie and rm->m_rdma_mr.
+ * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
*/
switch (cmsg->cmsg_type) {
case RDS_CMSG_RDMA_ARGS:
@@ -793,6 +894,12 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
if (!ret)
*allocated_mr = 1;
break;
+ case RDS_CMSG_ATOMIC_CSWP:
+ case RDS_CMSG_ATOMIC_FADD:
+ case RDS_CMSG_MASKED_ATOMIC_CSWP:
+ case RDS_CMSG_MASKED_ATOMIC_FADD:
+ ret = rds_cmsg_atomic(rs, rm, cmsg);
+ break;
default:
return -EINVAL;
@@ -850,13 +957,26 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
goto out;
}
- rm = rds_message_copy_from_user(msg->msg_iov, payload_len);
- if (IS_ERR(rm)) {
- ret = PTR_ERR(rm);
- rm = NULL;
+ /* size of rm including all sgs */
+ ret = rds_rm_size(msg, payload_len);
+ if (ret < 0)
+ goto out;
+
+ rm = rds_message_alloc(ret, GFP_KERNEL);
+ if (!rm) {
+ ret = -ENOMEM;
goto out;
}
+ /* Attach data to the rm */
+ if (payload_len) {
+ rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
+ ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
+ if (ret)
+ goto out;
+ }
+ rm->data.op_active = 1;
+
rm->m_daddr = daddr;
/* rds_conn_create has a spinlock that runs with IRQ off.
@@ -879,22 +999,23 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (ret)
goto out;
- if ((rm->m_rdma_cookie || rm->m_rdma_op) &&
- conn->c_trans->xmit_rdma == NULL) {
+ if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
if (printk_ratelimit())
printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
- rm->m_rdma_op, conn->c_trans->xmit_rdma);
+ &rm->rdma, conn->c_trans->xmit_rdma);
ret = -EOPNOTSUPP;
goto out;
}
- /* If the connection is down, trigger a connect. We may
- * have scheduled a delayed reconnect however - in this case
- * we should not interfere.
- */
- if (rds_conn_state(conn) == RDS_CONN_DOWN &&
- !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
+ if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
+ if (printk_ratelimit())
+ printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
+ &rm->atomic, conn->c_trans->xmit_atomic);
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ rds_conn_connect_if_down(conn);
ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
if (ret) {
@@ -938,7 +1059,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
rds_stats_inc(s_send_queued);
if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- rds_send_worker(&conn->c_send_w.work);
+ rds_send_xmit(conn);
rds_message_put(rm);
return payload_len;
@@ -966,20 +1087,15 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
int ret = 0;
rm = rds_message_alloc(0, GFP_ATOMIC);
- if (rm == NULL) {
+ if (!rm) {
ret = -ENOMEM;
goto out;
}
rm->m_daddr = conn->c_faddr;
+ rm->data.op_active = 1;
- /* If the connection is down, trigger a connect. We may
- * have scheduled a delayed reconnect however - in this case
- * we should not interfere.
- */
- if (rds_conn_state(conn) == RDS_CONN_DOWN &&
- !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
+ rds_conn_connect_if_down(conn);
ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
if (ret)
@@ -999,7 +1115,9 @@ rds_send_pong(struct rds_connection *conn, __be16 dport)
rds_stats_inc(s_send_queued);
rds_stats_inc(s_send_pong);
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
+ rds_send_xmit(conn);
+
rds_message_put(rm);
return 0;
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 7598eb07cfb1..10c759ccac0c 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -57,8 +57,8 @@ static const char *const rds_stat_names[] = {
"recv_ping",
"send_queue_empty",
"send_queue_full",
- "send_sem_contention",
- "send_sem_queue_raced",
+ "send_lock_contention",
+ "send_lock_queue_raced",
"send_immediate_retry",
"send_delayed_retry",
"send_drop_acked",
@@ -143,7 +143,7 @@ void rds_stats_exit(void)
rds_info_deregister_func(RDS_INFO_COUNTERS, rds_stats_info);
}
-int __init rds_stats_init(void)
+int rds_stats_init(void)
{
rds_info_register_func(RDS_INFO_COUNTERS, rds_stats_info);
return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 7829a20325d3..25ad0c77a26c 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -105,13 +105,13 @@ void rds_sysctl_exit(void)
unregister_sysctl_table(rds_sysctl_reg_table);
}
-int __init rds_sysctl_init(void)
+int rds_sysctl_init(void)
{
rds_sysctl_reconnect_min = msecs_to_jiffies(1);
rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table);
- if (rds_sysctl_reg_table == NULL)
+ if (!rds_sysctl_reg_table)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index babf4577ff7d..eeb08e6ab96b 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -200,7 +200,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
struct rds_tcp_connection *tc;
tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
- if (tc == NULL)
+ if (!tc)
return -ENOMEM;
tc->t_sock = NULL;
@@ -258,7 +258,6 @@ struct rds_transport rds_tcp_transport = {
.laddr_check = rds_tcp_laddr_check,
.xmit_prepare = rds_tcp_xmit_prepare,
.xmit_complete = rds_tcp_xmit_complete,
- .xmit_cong_map = rds_tcp_xmit_cong_map,
.xmit = rds_tcp_xmit,
.recv = rds_tcp_recv,
.conn_alloc = rds_tcp_conn_alloc,
@@ -266,7 +265,6 @@ struct rds_transport rds_tcp_transport = {
.conn_connect = rds_tcp_conn_connect,
.conn_shutdown = rds_tcp_conn_shutdown,
.inc_copy_to_user = rds_tcp_inc_copy_to_user,
- .inc_purge = rds_tcp_inc_purge,
.inc_free = rds_tcp_inc_free,
.stats_info_copy = rds_tcp_stats_info_copy,
.exit = rds_tcp_exit,
@@ -276,14 +274,14 @@ struct rds_transport rds_tcp_transport = {
.t_prefer_loopback = 1,
};
-int __init rds_tcp_init(void)
+int rds_tcp_init(void)
{
int ret;
rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
sizeof(struct rds_tcp_connection),
0, 0, NULL);
- if (rds_tcp_conn_slab == NULL) {
+ if (!rds_tcp_conn_slab) {
ret = -ENOMEM;
goto out;
}
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
index 844fa6b9cf5a..f5e6f7bebb50 100644
--- a/net/rds/tcp.h
+++ b/net/rds/tcp.h
@@ -43,7 +43,7 @@ struct rds_tcp_statistics {
};
/* tcp.c */
-int __init rds_tcp_init(void);
+int rds_tcp_init(void);
void rds_tcp_exit(void);
void rds_tcp_tune(struct socket *sock);
void rds_tcp_nonagle(struct socket *sock);
@@ -61,16 +61,15 @@ void rds_tcp_conn_shutdown(struct rds_connection *conn);
void rds_tcp_state_change(struct sock *sk);
/* tcp_listen.c */
-int __init rds_tcp_listen_init(void);
+int rds_tcp_listen_init(void);
void rds_tcp_listen_stop(void);
void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
/* tcp_recv.c */
-int __init rds_tcp_recv_init(void);
+int rds_tcp_recv_init(void);
void rds_tcp_recv_exit(void);
void rds_tcp_data_ready(struct sock *sk, int bytes);
int rds_tcp_recv(struct rds_connection *conn);
-void rds_tcp_inc_purge(struct rds_incoming *inc);
void rds_tcp_inc_free(struct rds_incoming *inc);
int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
size_t size);
@@ -81,8 +80,6 @@ void rds_tcp_xmit_complete(struct rds_connection *conn);
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off);
void rds_tcp_write_space(struct sock *sk);
-int rds_tcp_xmit_cong_map(struct rds_connection *conn,
- struct rds_cong_map *map, unsigned long offset);
/* tcp_stats.c */
DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats);
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
index c397524c039c..af95c8e058fc 100644
--- a/net/rds/tcp_connect.c
+++ b/net/rds/tcp_connect.c
@@ -43,9 +43,9 @@ void rds_tcp_state_change(struct sock *sk)
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
- if (conn == NULL) {
+ if (!conn) {
state_change = sk->sk_state_change;
goto out;
}
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
break;
}
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
state_change(sk);
}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 975183fe6950..8b5cc4aa8868 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -114,9 +114,9 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
rdsdebug("listen data ready sk %p\n", sk);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
ready = sk->sk_user_data;
- if (ready == NULL) { /* check for teardown race */
+ if (!ready) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
@@ -131,11 +131,11 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
queue_work(rds_wq, &rds_tcp_listen_work);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
ready(sk, bytes);
}
-int __init rds_tcp_listen_init(void)
+int rds_tcp_listen_init(void)
{
struct sockaddr_in sin;
struct socket *sock = NULL;
@@ -178,7 +178,7 @@ void rds_tcp_listen_stop(void)
struct socket *sock = rds_tcp_listen_sock;
struct sock *sk;
- if (sock == NULL)
+ if (!sock)
return;
sk = sock->sk;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 1aba6878fa5d..67263fbee623 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -39,7 +39,7 @@
static struct kmem_cache *rds_tcp_incoming_slab;
-void rds_tcp_inc_purge(struct rds_incoming *inc)
+static void rds_tcp_inc_purge(struct rds_incoming *inc)
{
struct rds_tcp_incoming *tinc;
tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
@@ -190,10 +190,10 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
* processing.
*/
while (left) {
- if (tinc == NULL) {
+ if (!tinc) {
tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
arg->gfp);
- if (tinc == NULL) {
+ if (!tinc) {
desc->error = -ENOMEM;
goto out;
}
@@ -229,7 +229,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
if (left && tc->t_tinc_data_rem) {
clone = skb_clone(skb, arg->gfp);
- if (clone == NULL) {
+ if (!clone) {
desc->error = -ENOMEM;
goto out;
}
@@ -324,9 +324,9 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
- if (conn == NULL) { /* check for teardown race */
+ if (!conn) { /* check for teardown race */
ready = sk->sk_data_ready;
goto out;
}
@@ -338,16 +338,16 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
ready(sk, bytes);
}
-int __init rds_tcp_recv_init(void)
+int rds_tcp_recv_init(void)
{
rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
sizeof(struct rds_tcp_incoming),
0, 0, NULL);
- if (rds_tcp_incoming_slab == NULL)
+ if (!rds_tcp_incoming_slab)
return -ENOMEM;
return 0;
}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index a28b895ff0d1..aa16841afbdf 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -77,56 +77,6 @@ int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
}
/* the core send_sem serializes this with other xmit and shutdown */
-int rds_tcp_xmit_cong_map(struct rds_connection *conn,
- struct rds_cong_map *map, unsigned long offset)
-{
- static struct rds_header rds_tcp_map_header = {
- .h_flags = RDS_FLAG_CONG_BITMAP,
- };
- struct rds_tcp_connection *tc = conn->c_transport_data;
- unsigned long i;
- int ret;
- int copied = 0;
-
- /* Some problem claims cpu_to_be32(constant) isn't a constant. */
- rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES);
-
- if (offset < sizeof(struct rds_header)) {
- ret = rds_tcp_sendmsg(tc->t_sock,
- (void *)&rds_tcp_map_header + offset,
- sizeof(struct rds_header) - offset);
- if (ret <= 0)
- return ret;
- offset += ret;
- copied = ret;
- if (offset < sizeof(struct rds_header))
- return ret;
- }
-
- offset -= sizeof(struct rds_header);
- i = offset / PAGE_SIZE;
- offset = offset % PAGE_SIZE;
- BUG_ON(i >= RDS_CONG_MAP_PAGES);
-
- do {
- ret = tc->t_sock->ops->sendpage(tc->t_sock,
- virt_to_page(map->m_page_addrs[i]),
- offset, PAGE_SIZE - offset,
- MSG_DONTWAIT);
- if (ret <= 0)
- break;
- copied += ret;
- offset += ret;
- if (offset == PAGE_SIZE) {
- offset = 0;
- i++;
- }
- } while (i < RDS_CONG_MAP_PAGES);
-
- return copied ? copied : ret;
-}
-
-/* the core send_sem serializes this with other xmit and shutdown */
int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
unsigned int hdr_off, unsigned int sg, unsigned int off)
{
@@ -166,21 +116,21 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
goto out;
}
- while (sg < rm->m_nents) {
+ while (sg < rm->data.op_nents) {
ret = tc->t_sock->ops->sendpage(tc->t_sock,
- sg_page(&rm->m_sg[sg]),
- rm->m_sg[sg].offset + off,
- rm->m_sg[sg].length - off,
+ sg_page(&rm->data.op_sg[sg]),
+ rm->data.op_sg[sg].offset + off,
+ rm->data.op_sg[sg].length - off,
MSG_DONTWAIT|MSG_NOSIGNAL);
- rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]),
- rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off,
+ rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
+ rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
ret);
if (ret <= 0)
break;
off += ret;
done += ret;
- if (off == rm->m_sg[sg].length) {
+ if (off == rm->data.op_sg[sg].length) {
off = 0;
sg++;
}
@@ -224,9 +174,9 @@ void rds_tcp_write_space(struct sock *sk)
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
- if (conn == NULL) {
+ if (!conn) {
write_space = sk->sk_write_space;
goto out;
}
@@ -244,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
/*
* write_space is only called when data leaves tcp's send queue if
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 786c20eaaf5e..0fd90f8c5f59 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -61,7 +61,7 @@
*
* Transition to state DISCONNECTING/DOWN:
* - Inside the shutdown worker; synchronizes with xmit path
- * through c_send_lock, and with connection management callbacks
+ * through RDS_IN_XMIT, and with connection management callbacks
* via c_cm_lock.
*
* For receive callbacks, we rely on the underlying transport
@@ -110,7 +110,7 @@ EXPORT_SYMBOL_GPL(rds_connect_complete);
* We should *always* start with a random backoff; otherwise a broken connection
* will always take several iterations to be re-established.
*/
-static void rds_queue_reconnect(struct rds_connection *conn)
+void rds_queue_reconnect(struct rds_connection *conn)
{
unsigned long rand;
@@ -156,58 +156,6 @@ void rds_connect_worker(struct work_struct *work)
}
}
-void rds_shutdown_worker(struct work_struct *work)
-{
- struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
-
- /* shut it down unless it's down already */
- if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_DOWN)) {
- /*
- * Quiesce the connection mgmt handlers before we start tearing
- * things down. We don't hold the mutex for the entire
- * duration of the shutdown operation, else we may be
- * deadlocking with the CM handler. Instead, the CM event
- * handler is supposed to check for state DISCONNECTING
- */
- mutex_lock(&conn->c_cm_lock);
- if (!rds_conn_transition(conn, RDS_CONN_UP, RDS_CONN_DISCONNECTING) &&
- !rds_conn_transition(conn, RDS_CONN_ERROR, RDS_CONN_DISCONNECTING)) {
- rds_conn_error(conn, "shutdown called in state %d\n",
- atomic_read(&conn->c_state));
- mutex_unlock(&conn->c_cm_lock);
- return;
- }
- mutex_unlock(&conn->c_cm_lock);
-
- mutex_lock(&conn->c_send_lock);
- conn->c_trans->conn_shutdown(conn);
- rds_conn_reset(conn);
- mutex_unlock(&conn->c_send_lock);
-
- if (!rds_conn_transition(conn, RDS_CONN_DISCONNECTING, RDS_CONN_DOWN)) {
- /* This can happen - eg when we're in the middle of tearing
- * down the connection, and someone unloads the rds module.
- * Quite reproduceable with loopback connections.
- * Mostly harmless.
- */
- rds_conn_error(conn,
- "%s: failed to transition to state DOWN, "
- "current state is %d\n",
- __func__,
- atomic_read(&conn->c_state));
- return;
- }
- }
-
- /* Then reconnect if it's still live.
- * The passive side of an IB loopback connection is never added
- * to the conn hash, so we never trigger a reconnect on this
- * conn - the reconnect is always triggered by the active peer. */
- cancel_delayed_work(&conn->c_conn_w);
- if (!hlist_unhashed(&conn->c_hash_node))
- rds_queue_reconnect(conn);
-}
-
void rds_send_worker(struct work_struct *work)
{
struct rds_connection *conn = container_of(work, struct rds_connection, c_send_w.work);
@@ -252,15 +200,22 @@ void rds_recv_worker(struct work_struct *work)
}
}
+void rds_shutdown_worker(struct work_struct *work)
+{
+ struct rds_connection *conn = container_of(work, struct rds_connection, c_down_w);
+
+ rds_conn_shutdown(conn);
+}
+
void rds_threads_exit(void)
{
destroy_workqueue(rds_wq);
}
-int __init rds_threads_init(void)
+int rds_threads_init(void)
{
- rds_wq = create_workqueue("krdsd");
- if (rds_wq == NULL)
+ rds_wq = create_singlethread_workqueue("krdsd");
+ if (!rds_wq)
return -ENOMEM;
return 0;
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 7e1067901353..7f2ac4fec367 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -71,19 +71,28 @@ void rds_trans_unregister(struct rds_transport *trans)
}
EXPORT_SYMBOL_GPL(rds_trans_unregister);
+void rds_trans_put(struct rds_transport *trans)
+{
+ if (trans && trans->t_owner)
+ module_put(trans->t_owner);
+}
+
struct rds_transport *rds_trans_get_preferred(__be32 addr)
{
struct rds_transport *ret = NULL;
- int i;
+ struct rds_transport *trans;
+ unsigned int i;
if (IN_LOOPBACK(ntohl(addr)))
return &rds_loop_transport;
down_read(&rds_trans_sem);
- for (i = 0; i < RDS_TRANS_COUNT; i++)
- {
- if (transports[i] && (transports[i]->laddr_check(addr) == 0)) {
- ret = transports[i];
+ for (i = 0; i < RDS_TRANS_COUNT; i++) {
+ trans = transports[i];
+
+ if (trans && (trans->laddr_check(addr) == 0) &&
+ (!trans->t_owner || try_module_get(trans->t_owner))) {
+ ret = trans;
break;
}
}
diff --git a/net/rds/xlist.h b/net/rds/xlist.h
new file mode 100644
index 000000000000..e6b5190daddd
--- /dev/null
+++ b/net/rds/xlist.h
@@ -0,0 +1,80 @@
+#ifndef _LINUX_XLIST_H
+#define _LINUX_XLIST_H
+
+#include <linux/stddef.h>
+#include <linux/poison.h>
+#include <linux/prefetch.h>
+#include <asm/system.h>
+
+struct xlist_head {
+ struct xlist_head *next;
+};
+
+static inline void INIT_XLIST_HEAD(struct xlist_head *list)
+{
+ list->next = NULL;
+}
+
+static inline int xlist_empty(struct xlist_head *head)
+{
+ return head->next == NULL;
+}
+
+static inline void xlist_add(struct xlist_head *new, struct xlist_head *tail,
+ struct xlist_head *head)
+{
+ struct xlist_head *cur;
+ struct xlist_head *check;
+
+ while (1) {
+ cur = head->next;
+ tail->next = cur;
+ check = cmpxchg(&head->next, cur, new);
+ if (check == cur)
+ break;
+ }
+}
+
+static inline struct xlist_head *xlist_del_head(struct xlist_head *head)
+{
+ struct xlist_head *cur;
+ struct xlist_head *check;
+ struct xlist_head *next;
+
+ while (1) {
+ cur = head->next;
+ if (!cur)
+ goto out;
+
+ next = cur->next;
+ check = cmpxchg(&head->next, cur, next);
+ if (check == cur)
+ goto out;
+ }
+out:
+ return cur;
+}
+
+static inline struct xlist_head *xlist_del_head_fast(struct xlist_head *head)
+{
+ struct xlist_head *cur;
+
+ cur = head->next;
+ if (!cur)
+ return NULL;
+
+ head->next = cur->next;
+ return cur;
+}
+
+static inline void xlist_splice(struct xlist_head *list,
+ struct xlist_head *head)
+{
+ struct xlist_head *cur;
+
+ WARN_ON(head->next);
+ cur = xchg(&list->next, NULL);
+ head->next = cur;
+}
+
+#endif
diff --git a/net/rfkill/input.c b/net/rfkill/input.c
index 3713d7ecab96..1bca6d49ec96 100644
--- a/net/rfkill/input.c
+++ b/net/rfkill/input.c
@@ -142,7 +142,7 @@ static unsigned long rfkill_last_scheduled;
static unsigned long rfkill_ratelimit(const unsigned long last)
{
const unsigned long delay = msecs_to_jiffies(RFKILL_OPS_DELAY);
- return (time_after(jiffies, last + delay)) ? 0 : delay;
+ return time_after(jiffies, last + delay) ? 0 : delay;
}
static void rfkill_schedule_ratelimited(void)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 8e45e76a95f5..d952e7eac188 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -679,7 +679,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) {
@@ -739,7 +739,7 @@ static int rose_connect(struct socket *sock, struct sockaddr *uaddr, int addr_le
if (addr_len == sizeof(struct sockaddr_rose) && addr->srose_ndigis > 1)
return -EINVAL;
- if (addr->srose_ndigis > ROSE_MAX_DIGIS)
+ if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
return -EINVAL;
/* Source + Destination digis should not exceed ROSE_MAX_DIGIS */
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index a750a28e0221..fa5f5641a2c2 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -114,7 +114,7 @@ static int rose_send_frame(struct sk_buff *skb, struct rose_neigh *neigh)
if (ax25s)
ax25_cb_put(ax25s);
- return (neigh->ax25 != NULL);
+ return neigh->ax25 != NULL;
}
/*
@@ -137,7 +137,7 @@ static int rose_link_up(struct rose_neigh *neigh)
if (ax25s)
ax25_cb_put(ax25s);
- return (neigh->ax25 != NULL);
+ return neigh->ax25 != NULL;
}
/*
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7043b294bb67..8e22bd345e71 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -597,12 +597,6 @@ extern unsigned rxrpc_debug;
#define dbgprintk(FMT,...) \
printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
-/* make sure we maintain the format strings, even when debugging is disabled */
-static inline __attribute__((format(printf,1,2)))
-void _dbprintk(const char *fmt, ...)
-{
-}
-
#define kenter(FMT,...) dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
#define kleave(FMT,...) dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
#define kdebug(FMT,...) dbgprintk(" "FMT ,##__VA_ARGS__)
@@ -655,11 +649,11 @@ do { \
} while (0)
#else
-#define _enter(FMT,...) _dbprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
-#define _leave(FMT,...) _dbprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
-#define _debug(FMT,...) _dbprintk(" "FMT ,##__VA_ARGS__)
-#define _proto(FMT,...) _dbprintk("### "FMT ,##__VA_ARGS__)
-#define _net(FMT,...) _dbprintk("@@@ "FMT ,##__VA_ARGS__)
+#define _enter(FMT,...) no_printk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
+#define _leave(FMT,...) no_printk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
+#define _debug(FMT,...) no_printk(" "FMT ,##__VA_ARGS__)
+#define _proto(FMT,...) no_printk("### "FMT ,##__VA_ARGS__)
+#define _net(FMT,...) no_printk("@@@ "FMT ,##__VA_ARGS__)
#endif
/*
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 2f691fb180d1..a36270a994d7 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -518,6 +518,16 @@ config NET_ACT_SKBEDIT
To compile this code as a module, choose M here: the
module will be called act_skbedit.
+config NET_ACT_CSUM
+ tristate "Checksum Updating"
+ depends on NET_CLS_ACT && INET
+ ---help---
+ Say Y here to update some common checksum after some direct
+ packet alterations.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_csum.
+
config NET_CLS_IND
bool "Incoming device classification"
depends on NET_CLS_U32 || NET_CLS_FW
diff --git a/net/sched/Makefile b/net/sched/Makefile
index f14e71bfa58f..960f5dba6304 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_ACT_NAT) += act_nat.o
obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
obj-$(CONFIG_NET_ACT_SIMP) += act_simple.o
obj-$(CONFIG_NET_ACT_SKBEDIT) += act_skbedit.o
+obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o
obj-$(CONFIG_NET_SCH_FIFO) += sch_fifo.o
obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
new file mode 100644
index 000000000000..67dc7ce9b63a
--- /dev/null
+++ b/net/sched/act_csum.c
@@ -0,0 +1,595 @@
+/*
+ * Checksum updating actions
+ *
+ * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+#include <linux/netlink.h>
+#include <net/netlink.h>
+#include <linux/rtnetlink.h>
+
+#include <linux/skbuff.h>
+
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/igmp.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
+
+#include <net/act_api.h>
+
+#include <linux/tc_act/tc_csum.h>
+#include <net/tc_act/tc_csum.h>
+
+#define CSUM_TAB_MASK 15
+static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1];
+static u32 csum_idx_gen;
+static DEFINE_RWLOCK(csum_lock);
+
+static struct tcf_hashinfo csum_hash_info = {
+ .htab = tcf_csum_ht,
+ .hmask = CSUM_TAB_MASK,
+ .lock = &csum_lock,
+};
+
+static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
+ [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
+};
+
+static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
+ struct tc_action *a, int ovr, int bind)
+{
+ struct nlattr *tb[TCA_CSUM_MAX + 1];
+ struct tc_csum *parm;
+ struct tcf_common *pc;
+ struct tcf_csum *p;
+ int ret = 0, err;
+
+ if (nla == NULL)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_CSUM_MAX, nla,csum_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[TCA_CSUM_PARMS] == NULL)
+ return -EINVAL;
+ parm = nla_data(tb[TCA_CSUM_PARMS]);
+
+ pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info);
+ if (!pc) {
+ pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
+ &csum_idx_gen, &csum_hash_info);
+ if (IS_ERR(pc))
+ return PTR_ERR(pc);
+ p = to_tcf_csum(pc);
+ ret = ACT_P_CREATED;
+ } else {
+ p = to_tcf_csum(pc);
+ if (!ovr) {
+ tcf_hash_release(pc, bind, &csum_hash_info);
+ return -EEXIST;
+ }
+ }
+
+ spin_lock_bh(&p->tcf_lock);
+ p->tcf_action = parm->action;
+ p->update_flags = parm->update_flags;
+ spin_unlock_bh(&p->tcf_lock);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(pc, &csum_hash_info);
+
+ return ret;
+}
+
+static int tcf_csum_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_csum *p = a->priv;
+ return tcf_hash_release(&p->common, bind, &csum_hash_info);
+}
+
+/**
+ * tcf_csum_skb_nextlayer - Get next layer pointer
+ * @skb: sk_buff to use
+ * @ihl: previous summed headers length
+ * @ipl: complete packet length
+ * @jhl: next header length
+ *
+ * Check the expected next layer availability in the specified sk_buff.
+ * Return the next layer pointer if pass, NULL otherwise.
+ */
+static void *tcf_csum_skb_nextlayer(struct sk_buff *skb,
+ unsigned int ihl, unsigned int ipl,
+ unsigned int jhl)
+{
+ int ntkoff = skb_network_offset(skb);
+ int hl = ihl + jhl;
+
+ if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) ||
+ (skb_cloned(skb) &&
+ !skb_clone_writable(skb, hl + ntkoff) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ return NULL;
+ else
+ return (void *)(skb_network_header(skb) + ihl);
+}
+
+static int tcf_csum_ipv4_icmp(struct sk_buff *skb,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct icmphdr *icmph;
+
+ icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph));
+ if (icmph == NULL)
+ return 0;
+
+ icmph->checksum = 0;
+ skb->csum = csum_partial(icmph, ipl - ihl, 0);
+ icmph->checksum = csum_fold(skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv4_igmp(struct sk_buff *skb,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct igmphdr *igmph;
+
+ igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph));
+ if (igmph == NULL)
+ return 0;
+
+ igmph->csum = 0;
+ skb->csum = csum_partial(igmph, ipl - ihl, 0);
+ igmph->csum = csum_fold(skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv6_icmp(struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct icmp6hdr *icmp6h;
+
+ icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h));
+ if (icmp6h == NULL)
+ return 0;
+
+ icmp6h->icmp6_cksum = 0;
+ skb->csum = csum_partial(icmp6h, ipl - ihl, 0);
+ icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ipl - ihl, IPPROTO_ICMPV6,
+ skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct iphdr *iph,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct tcphdr *tcph;
+
+ tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
+ if (tcph == NULL)
+ return 0;
+
+ tcph->check = 0;
+ skb->csum = csum_partial(tcph, ipl - ihl, 0);
+ tcph->check = tcp_v4_check(ipl - ihl,
+ iph->saddr, iph->daddr, skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int ihl, unsigned int ipl)
+{
+ struct tcphdr *tcph;
+
+ tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph));
+ if (tcph == NULL)
+ return 0;
+
+ tcph->check = 0;
+ skb->csum = csum_partial(tcph, ipl - ihl, 0);
+ tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ipl - ihl, IPPROTO_TCP,
+ skb->csum);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ return 1;
+}
+
+static int tcf_csum_ipv4_udp(struct sk_buff *skb, struct iphdr *iph,
+ unsigned int ihl, unsigned int ipl, int udplite)
+{
+ struct udphdr *udph;
+ u16 ul;
+
+ /*
+ * Support both UDP and UDPLITE checksum algorithms, Don't use
+ * udph->len to get the real length without any protocol check,
+ * UDPLITE uses udph->len for another thing,
+ * Use iph->tot_len, or just ipl.
+ */
+
+ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
+ if (udph == NULL)
+ return 0;
+
+ ul = ntohs(udph->len);
+
+ if (udplite || udph->check) {
+
+ udph->check = 0;
+
+ if (udplite) {
+ if (ul == 0)
+ skb->csum = csum_partial(udph, ipl - ihl, 0);
+ else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
+ skb->csum = csum_partial(udph, ul, 0);
+ else
+ goto ignore_obscure_skb;
+ } else {
+ if (ul != ipl - ihl)
+ goto ignore_obscure_skb;
+
+ skb->csum = csum_partial(udph, ul, 0);
+ }
+
+ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+ ul, iph->protocol,
+ skb->csum);
+
+ if (!udph->check)
+ udph->check = CSUM_MANGLED_0;
+ }
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ignore_obscure_skb:
+ return 1;
+}
+
+static int tcf_csum_ipv6_udp(struct sk_buff *skb, struct ipv6hdr *ip6h,
+ unsigned int ihl, unsigned int ipl, int udplite)
+{
+ struct udphdr *udph;
+ u16 ul;
+
+ /*
+ * Support both UDP and UDPLITE checksum algorithms, Don't use
+ * udph->len to get the real length without any protocol check,
+ * UDPLITE uses udph->len for another thing,
+ * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl.
+ */
+
+ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph));
+ if (udph == NULL)
+ return 0;
+
+ ul = ntohs(udph->len);
+
+ udph->check = 0;
+
+ if (udplite) {
+ if (ul == 0)
+ skb->csum = csum_partial(udph, ipl - ihl, 0);
+
+ else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
+ skb->csum = csum_partial(udph, ul, 0);
+
+ else
+ goto ignore_obscure_skb;
+ } else {
+ if (ul != ipl - ihl)
+ goto ignore_obscure_skb;
+
+ skb->csum = csum_partial(udph, ul, 0);
+ }
+
+ udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
+ udplite ? IPPROTO_UDPLITE : IPPROTO_UDP,
+ skb->csum);
+
+ if (!udph->check)
+ udph->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ignore_obscure_skb:
+ return 1;
+}
+
+static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags)
+{
+ struct iphdr *iph;
+ int ntkoff;
+
+ ntkoff = skb_network_offset(skb);
+
+ if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff))
+ goto fail;
+
+ iph = ip_hdr(skb);
+
+ switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
+ case IPPROTO_ICMP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
+ if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4,
+ ntohs(iph->tot_len)))
+ goto fail;
+ break;
+ case IPPROTO_IGMP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP)
+ if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4,
+ ntohs(iph->tot_len)))
+ goto fail;
+ break;
+ case IPPROTO_TCP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
+ if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4,
+ ntohs(iph->tot_len)))
+ goto fail;
+ break;
+ case IPPROTO_UDP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
+ if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
+ ntohs(iph->tot_len), 0))
+ goto fail;
+ break;
+ case IPPROTO_UDPLITE:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
+ if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4,
+ ntohs(iph->tot_len), 1))
+ goto fail;
+ break;
+ }
+
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) {
+ if (skb_cloned(skb) &&
+ !skb_clone_writable(skb, sizeof(*iph) + ntkoff) &&
+ pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ goto fail;
+
+ ip_send_check(iph);
+ }
+
+ return 1;
+
+fail:
+ return 0;
+}
+
+static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh,
+ unsigned int ixhl, unsigned int *pl)
+{
+ int off, len, optlen;
+ unsigned char *xh = (void *)ip6xh;
+
+ off = sizeof(*ip6xh);
+ len = ixhl - off;
+
+ while (len > 1) {
+ switch (xh[off]) {
+ case IPV6_TLV_PAD0:
+ optlen = 1;
+ break;
+ case IPV6_TLV_JUMBO:
+ optlen = xh[off + 1] + 2;
+ if (optlen != 6 || len < 6 || (off & 3) != 2)
+ /* wrong jumbo option length/alignment */
+ return 0;
+ *pl = ntohl(*(__be32 *)(xh + off + 2));
+ goto done;
+ default:
+ optlen = xh[off + 1] + 2;
+ if (optlen > len)
+ /* ignore obscure options */
+ goto done;
+ break;
+ }
+ off += optlen;
+ len -= optlen;
+ }
+
+done:
+ return 1;
+}
+
+static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags)
+{
+ struct ipv6hdr *ip6h;
+ struct ipv6_opt_hdr *ip6xh;
+ unsigned int hl, ixhl;
+ unsigned int pl;
+ int ntkoff;
+ u8 nexthdr;
+
+ ntkoff = skb_network_offset(skb);
+
+ hl = sizeof(*ip6h);
+
+ if (!pskb_may_pull(skb, hl + ntkoff))
+ goto fail;
+
+ ip6h = ipv6_hdr(skb);
+
+ pl = ntohs(ip6h->payload_len);
+ nexthdr = ip6h->nexthdr;
+
+ do {
+ switch (nexthdr) {
+ case NEXTHDR_FRAGMENT:
+ goto ignore_skb;
+ case NEXTHDR_ROUTING:
+ case NEXTHDR_HOP:
+ case NEXTHDR_DEST:
+ if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff))
+ goto fail;
+ ip6xh = (void *)(skb_network_header(skb) + hl);
+ ixhl = ipv6_optlen(ip6xh);
+ if (!pskb_may_pull(skb, hl + ixhl + ntkoff))
+ goto fail;
+ if ((nexthdr == NEXTHDR_HOP) &&
+ !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl)))
+ goto fail;
+ nexthdr = ip6xh->nexthdr;
+ hl += ixhl;
+ break;
+ case IPPROTO_ICMPV6:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP)
+ if (!tcf_csum_ipv6_icmp(skb, ip6h,
+ hl, pl + sizeof(*ip6h)))
+ goto fail;
+ goto done;
+ case IPPROTO_TCP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP)
+ if (!tcf_csum_ipv6_tcp(skb, ip6h,
+ hl, pl + sizeof(*ip6h)))
+ goto fail;
+ goto done;
+ case IPPROTO_UDP:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP)
+ if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
+ pl + sizeof(*ip6h), 0))
+ goto fail;
+ goto done;
+ case IPPROTO_UDPLITE:
+ if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE)
+ if (!tcf_csum_ipv6_udp(skb, ip6h, hl,
+ pl + sizeof(*ip6h), 1))
+ goto fail;
+ goto done;
+ default:
+ goto ignore_skb;
+ }
+ } while (pskb_may_pull(skb, hl + 1 + ntkoff));
+
+done:
+ignore_skb:
+ return 1;
+
+fail:
+ return 0;
+}
+
+static int tcf_csum(struct sk_buff *skb,
+ struct tc_action *a, struct tcf_result *res)
+{
+ struct tcf_csum *p = a->priv;
+ int action;
+ u32 update_flags;
+
+ spin_lock(&p->tcf_lock);
+ p->tcf_tm.lastuse = jiffies;
+ p->tcf_bstats.bytes += qdisc_pkt_len(skb);
+ p->tcf_bstats.packets++;
+ action = p->tcf_action;
+ update_flags = p->update_flags;
+ spin_unlock(&p->tcf_lock);
+
+ if (unlikely(action == TC_ACT_SHOT))
+ goto drop;
+
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!tcf_csum_ipv4(skb, update_flags))
+ goto drop;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ if (!tcf_csum_ipv6(skb, update_flags))
+ goto drop;
+ break;
+ }
+
+ return action;
+
+drop:
+ spin_lock(&p->tcf_lock);
+ p->tcf_qstats.drops++;
+ spin_unlock(&p->tcf_lock);
+ return TC_ACT_SHOT;
+}
+
+static int tcf_csum_dump(struct sk_buff *skb,
+ struct tc_action *a, int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_csum *p = a->priv;
+ struct tc_csum opt = {
+ .update_flags = p->update_flags,
+ .index = p->tcf_index,
+ .action = p->tcf_action,
+ .refcnt = p->tcf_refcnt - ref,
+ .bindcnt = p->tcf_bindcnt - bind,
+ };
+ struct tcf_t t;
+
+ NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt);
+ t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
+ NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t);
+
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static struct tc_action_ops act_csum_ops = {
+ .kind = "csum",
+ .hinfo = &csum_hash_info,
+ .type = TCA_ACT_CSUM,
+ .capab = TCA_CAP_NONE,
+ .owner = THIS_MODULE,
+ .act = tcf_csum,
+ .dump = tcf_csum_dump,
+ .cleanup = tcf_csum_cleanup,
+ .lookup = tcf_hash_search,
+ .init = tcf_csum_init,
+ .walk = tcf_generic_walker
+};
+
+MODULE_DESCRIPTION("Checksum updating actions");
+MODULE_LICENSE("GPL");
+
+static int __init csum_init_module(void)
+{
+ return tcf_register_action(&act_csum_ops);
+}
+
+static void __exit csum_cleanup_module(void)
+{
+ tcf_unregister_action(&act_csum_ops);
+}
+
+module_init(csum_init_module);
+module_exit(csum_cleanup_module);
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 8406c6654990..c2ed90a4c0b4 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -152,21 +152,24 @@ static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
- struct tc_gact opt;
struct tcf_gact *gact = a->priv;
+ struct tc_gact opt = {
+ .index = gact->tcf_index,
+ .refcnt = gact->tcf_refcnt - ref,
+ .bindcnt = gact->tcf_bindcnt - bind,
+ .action = gact->tcf_action,
+ };
struct tcf_t t;
- opt.index = gact->tcf_index;
- opt.refcnt = gact->tcf_refcnt - ref;
- opt.bindcnt = gact->tcf_bindcnt - bind;
- opt.action = gact->tcf_action;
NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
#ifdef CONFIG_GACT_PROB
if (gact->tcfg_ptype) {
- struct tc_gact_p p_opt;
- p_opt.paction = gact->tcfg_paction;
- p_opt.pval = gact->tcfg_pval;
- p_opt.ptype = gact->tcfg_ptype;
+ struct tc_gact_p p_opt = {
+ .paction = gact->tcfg_paction,
+ .pval = gact->tcfg_pval,
+ .ptype = gact->tcfg_ptype,
+ };
+
NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
}
#endif
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 11f195af2da0..0c311be92827 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -219,15 +219,16 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_mirred *m = a->priv;
- struct tc_mirred opt;
+ struct tc_mirred opt = {
+ .index = m->tcf_index,
+ .action = m->tcf_action,
+ .refcnt = m->tcf_refcnt - ref,
+ .bindcnt = m->tcf_bindcnt - bind,
+ .eaction = m->tcfm_eaction,
+ .ifindex = m->tcfm_ifindex,
+ };
struct tcf_t t;
- opt.index = m->tcf_index;
- opt.action = m->tcf_action;
- opt.refcnt = m->tcf_refcnt - ref;
- opt.bindcnt = m->tcf_bindcnt - bind;
- opt.eaction = m->tcfm_eaction;
- opt.ifindex = m->tcfm_ifindex;
NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 509a2d53a99d..186eb837e600 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -272,19 +272,19 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_nat *p = a->priv;
- struct tc_nat opt;
+ struct tc_nat opt = {
+ .old_addr = p->old_addr,
+ .new_addr = p->new_addr,
+ .mask = p->mask,
+ .flags = p->flags,
+
+ .index = p->tcf_index,
+ .action = p->tcf_action,
+ .refcnt = p->tcf_refcnt - ref,
+ .bindcnt = p->tcf_bindcnt - bind,
+ };
struct tcf_t t;
- opt.old_addr = p->old_addr;
- opt.new_addr = p->new_addr;
- opt.mask = p->mask;
- opt.flags = p->flags;
-
- opt.index = p->tcf_index;
- opt.action = p->tcf_action;
- opt.refcnt = p->tcf_refcnt - ref;
- opt.bindcnt = p->tcf_bindcnt - bind;
-
NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 537a48732e9e..7ebf7439b478 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -350,22 +350,19 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_police *police = a->priv;
- struct tc_police opt;
-
- opt.index = police->tcf_index;
- opt.action = police->tcf_action;
- opt.mtu = police->tcfp_mtu;
- opt.burst = police->tcfp_burst;
- opt.refcnt = police->tcf_refcnt - ref;
- opt.bindcnt = police->tcf_bindcnt - bind;
+ struct tc_police opt = {
+ .index = police->tcf_index,
+ .action = police->tcf_action,
+ .mtu = police->tcfp_mtu,
+ .burst = police->tcfp_burst,
+ .refcnt = police->tcf_refcnt - ref,
+ .bindcnt = police->tcf_bindcnt - bind,
+ };
+
if (police->tcfp_R_tab)
opt.rate = police->tcfp_R_tab->rate;
- else
- memset(&opt.rate, 0, sizeof(opt.rate));
if (police->tcfp_P_tab)
opt.peakrate = police->tcfp_P_tab->rate;
- else
- memset(&opt.peakrate, 0, sizeof(opt.peakrate));
NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
if (police->tcfp_result)
NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 4a1d640b0cf1..97e84f3ee775 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -164,13 +164,14 @@ static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_defact *d = a->priv;
- struct tc_defact opt;
+ struct tc_defact opt = {
+ .index = d->tcf_index,
+ .refcnt = d->tcf_refcnt - ref,
+ .bindcnt = d->tcf_bindcnt - bind,
+ .action = d->tcf_action,
+ };
struct tcf_t t;
- opt.index = d->tcf_index;
- opt.refcnt = d->tcf_refcnt - ref;
- opt.bindcnt = d->tcf_bindcnt - bind;
- opt.action = d->tcf_action;
NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index e9607fe55b58..66cbf4eb8855 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -159,13 +159,14 @@ static inline int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
{
unsigned char *b = skb_tail_pointer(skb);
struct tcf_skbedit *d = a->priv;
- struct tc_skbedit opt;
+ struct tc_skbedit opt = {
+ .index = d->tcf_index,
+ .refcnt = d->tcf_refcnt - ref,
+ .bindcnt = d->tcf_bindcnt - bind,
+ .action = d->tcf_action,
+ };
struct tcf_t t;
- opt.index = d->tcf_index;
- opt.refcnt = d->tcf_refcnt - ref;
- opt.bindcnt = d->tcf_bindcnt - bind;
- opt.action = d->tcf_action;
NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt);
if (d->flags & SKBEDIT_F_PRIORITY)
NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index e17096e3913c..5b271a18bc3a 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -111,44 +111,41 @@ static u32 flow_get_proto(struct sk_buff *skb)
}
}
-static int has_ports(u8 protocol)
-{
- switch (protocol) {
- case IPPROTO_TCP:
- case IPPROTO_UDP:
- case IPPROTO_UDPLITE:
- case IPPROTO_SCTP:
- case IPPROTO_DCCP:
- case IPPROTO_ESP:
- return 1;
- default:
- return 0;
- }
-}
-
static u32 flow_get_proto_src(struct sk_buff *skb)
{
switch (skb->protocol) {
case htons(ETH_P_IP): {
struct iphdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
- has_ports(iph->protocol) &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 2))
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4));
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ break;
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, iph->ihl * 4 + 2 + poff)) {
+ iph = ip_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
+ poff));
+ }
break;
}
case htons(ETH_P_IPV6): {
struct ipv6hdr *iph;
+ int poff;
- if (!pskb_network_may_pull(skb, sizeof(*iph) + 2))
+ if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ipv6_hdr(skb);
- if (has_ports(iph->nexthdr))
- return ntohs(*(__be16 *)&iph[1]);
+ poff = proto_ports_offset(iph->nexthdr);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, sizeof(*iph) + poff + 2)) {
+ iph = ipv6_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
+ poff));
+ }
break;
}
}
@@ -161,24 +158,36 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
switch (skb->protocol) {
case htons(ETH_P_IP): {
struct iphdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ip_hdr(skb);
- if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
- has_ports(iph->protocol) &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 4))
- return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 + 2));
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ break;
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
+ iph = ip_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + iph->ihl * 4 +
+ 2 + poff));
+ }
break;
}
case htons(ETH_P_IPV6): {
struct ipv6hdr *iph;
+ int poff;
- if (!pskb_network_may_pull(skb, sizeof(*iph) + 4))
+ if (!pskb_network_may_pull(skb, sizeof(*iph)))
break;
iph = ipv6_hdr(skb);
- if (has_ports(iph->nexthdr))
- return ntohs(*(__be16 *)((void *)&iph[1] + 2));
+ poff = proto_ports_offset(iph->nexthdr);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, sizeof(*iph) + poff + 4)) {
+ iph = ipv6_hdr(skb);
+ return ntohs(*(__be16 *)((void *)iph + sizeof(*iph) +
+ poff + 2));
+ }
break;
}
}
@@ -297,6 +306,11 @@ static u32 flow_get_vlan_tag(const struct sk_buff *skb)
return tag & VLAN_VID_MASK;
}
+static u32 flow_get_rxhash(struct sk_buff *skb)
+{
+ return skb_get_rxhash(skb);
+}
+
static u32 flow_key_get(struct sk_buff *skb, int key)
{
switch (key) {
@@ -334,6 +348,8 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
return flow_get_skgid(skb);
case FLOW_KEY_VLAN_TAG:
return flow_get_vlan_tag(skb);
+ case FLOW_KEY_RXHASH:
+ return flow_get_rxhash(skb);
default:
WARN_ON(1);
return 0;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 3bcac8aa333c..34da5e29ea1a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -223,6 +223,11 @@ META_COLLECTOR(int_maclen)
dst->value = skb->mac_len;
}
+META_COLLECTOR(int_rxhash)
+{
+ dst->value = skb_get_rxhash(skb);
+}
+
/**************************************************************************
* Netfilter
**************************************************************************/
@@ -541,6 +546,7 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX+1][TCF_META_ID_MAX+1] = {
[META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
[META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
[META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
+ [META_ID(RXHASH)] = META_FUNC(int_rxhash),
}
};
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b9e8c3b7d406..b22ca2d1cebc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -150,22 +150,34 @@ int register_qdisc(struct Qdisc_ops *qops)
if (qops->enqueue == NULL)
qops->enqueue = noop_qdisc_ops.enqueue;
if (qops->peek == NULL) {
- if (qops->dequeue == NULL) {
+ if (qops->dequeue == NULL)
qops->peek = noop_qdisc_ops.peek;
- } else {
- rc = -EINVAL;
- goto out;
- }
+ else
+ goto out_einval;
}
if (qops->dequeue == NULL)
qops->dequeue = noop_qdisc_ops.dequeue;
+ if (qops->cl_ops) {
+ const struct Qdisc_class_ops *cops = qops->cl_ops;
+
+ if (!(cops->get && cops->put && cops->walk && cops->leaf))
+ goto out_einval;
+
+ if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
+ goto out_einval;
+ }
+
qops->next = NULL;
*qp = qops;
rc = 0;
out:
write_unlock(&qdisc_mod_lock);
return rc;
+
+out_einval:
+ rc = -EINVAL;
+ goto out;
}
EXPORT_SYMBOL(register_qdisc);
@@ -228,7 +240,10 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
if (q)
goto out;
- q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+ if (dev_ingress_queue(dev))
+ q = qdisc_match_from_root(
+ dev_ingress_queue(dev)->qdisc_sleeping,
+ handle);
out:
return q;
}
@@ -348,7 +363,7 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
}
- if (!s || tsize != s->tsize || (!tab && tsize > 0))
+ if (tsize != s->tsize || (!tab && tsize > 0))
return ERR_PTR(-EINVAL);
spin_lock(&qdisc_stab_lock);
@@ -678,6 +693,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
(new && new->flags & TCQ_F_INGRESS)) {
num_q = 1;
ingress = 1;
+ if (!dev_ingress_queue(dev))
+ return -ENOENT;
}
if (dev->flags & IFF_UP)
@@ -689,7 +706,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
}
for (i = 0; i < num_q; i++) {
- struct netdev_queue *dev_queue = &dev->rx_queue;
+ struct netdev_queue *dev_queue = dev_ingress_queue(dev);
if (!ingress)
dev_queue = netdev_get_tx_queue(dev, i);
@@ -967,7 +984,8 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /* ingress */
- q = dev->rx_queue.qdisc_sleeping;
+ if (dev_ingress_queue(dev))
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -1031,8 +1049,9 @@ replay:
if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
return -ENOENT;
q = qdisc_leaf(p, clid);
- } else { /*ingress */
- q = dev->rx_queue.qdisc_sleeping;
+ } else { /* ingress */
+ if (dev_ingress_queue_create(dev))
+ q = dev_ingress_queue(dev)->qdisc_sleeping;
}
} else {
q = dev->qdisc;
@@ -1111,11 +1130,14 @@ replay:
create_n_graft:
if (!(n->nlmsg_flags&NLM_F_CREATE))
return -ENOENT;
- if (clid == TC_H_INGRESS)
- q = qdisc_create(dev, &dev->rx_queue, p,
- tcm->tcm_parent, tcm->tcm_parent,
- tca, &err);
- else {
+ if (clid == TC_H_INGRESS) {
+ if (dev_ingress_queue(dev))
+ q = qdisc_create(dev, dev_ingress_queue(dev), p,
+ tcm->tcm_parent, tcm->tcm_parent,
+ tca, &err);
+ else
+ err = -ENOENT;
+ } else {
struct netdev_queue *dev_queue;
if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
@@ -1292,8 +1314,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
- dev_queue = &dev->rx_queue;
- if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
+ dev_queue = dev_ingress_queue(dev);
+ if (dev_queue &&
+ tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
+ &q_idx, s_q_idx) < 0)
goto done;
cont:
@@ -1583,8 +1607,10 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
goto done;
- dev_queue = &dev->rx_queue;
- if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
+ dev_queue = dev_ingress_queue(dev);
+ if (dev_queue &&
+ tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
+ &t, s_t) < 0)
goto done;
done:
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e114f23d5eae..6318e1136b83 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
error = -EINVAL;
goto err_out;
}
- if (!list_empty(&flow->list)) {
- error = -EEXIST;
- goto err_out;
- }
} else {
int i;
unsigned long cl;
@@ -418,7 +414,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
}
ret = qdisc_enqueue(skb, flow->q);
- if (ret != 0) {
+ if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused
if (net_xmit_drop_count(ret)) {
sch->qstats.drops++;
@@ -442,7 +438,7 @@ drop: __maybe_unused
*/
if (flow == &p->link) {
sch->q.qlen++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
tasklet_schedule(&p->task);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 2aeb3a4386a1..3d57681bdb76 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -753,7 +753,8 @@ void dev_activate(struct net_device *dev)
need_watchdog = 0;
netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
- transition_one_qdisc(dev, &dev->rx_queue, NULL);
+ if (dev_ingress_queue(dev))
+ transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
if (need_watchdog) {
dev->trans_start = jiffies;
@@ -812,7 +813,8 @@ static bool some_qdisc_is_busy(struct net_device *dev)
void dev_deactivate(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
- dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
+ if (dev_ingress_queue(dev))
+ dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
dev_watchdog_down(dev);
@@ -838,7 +840,8 @@ void dev_init_scheduler(struct net_device *dev)
{
dev->qdisc = &noop_qdisc;
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
- dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ if (dev_ingress_queue(dev))
+ dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
}
@@ -861,7 +864,8 @@ static void shutdown_scheduler_queue(struct net_device *dev,
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
- shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ if (dev_ingress_queue(dev))
+ shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
qdisc_destroy(dev->qdisc);
dev->qdisc = &noop_qdisc;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index abd904be4287..47496098d35c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -761,8 +761,8 @@ init_vf(struct hfsc_class *cl, unsigned int len)
if (f != cl->cl_f) {
cl->cl_f = f;
cftree_update(cl);
- update_cfmin(cl->cl_parent);
}
+ update_cfmin(cl->cl_parent);
}
}
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 534f33231c17..3cf478d012dd 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,40 +123,39 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
case htons(ETH_P_IP):
{
const struct iphdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
goto err;
iph = ip_hdr(skb);
h = (__force u32)iph->daddr;
h2 = (__force u32)iph->saddr ^ iph->protocol;
- if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
- (iph->protocol == IPPROTO_TCP ||
- iph->protocol == IPPROTO_UDP ||
- iph->protocol == IPPROTO_UDPLITE ||
- iph->protocol == IPPROTO_SCTP ||
- iph->protocol == IPPROTO_DCCP ||
- iph->protocol == IPPROTO_ESP) &&
- pskb_network_may_pull(skb, iph->ihl * 4 + 4))
- h2 ^= *(((u32*)iph) + iph->ihl);
+ if (iph->frag_off & htons(IP_MF|IP_OFFSET))
+ break;
+ poff = proto_ports_offset(iph->protocol);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, iph->ihl * 4 + 4 + poff)) {
+ iph = ip_hdr(skb);
+ h2 ^= *(u32*)((void *)iph + iph->ihl * 4 + poff);
+ }
break;
}
case htons(ETH_P_IPV6):
{
struct ipv6hdr *iph;
+ int poff;
if (!pskb_network_may_pull(skb, sizeof(*iph)))
goto err;
iph = ipv6_hdr(skb);
h = (__force u32)iph->daddr.s6_addr32[3];
h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
- if ((iph->nexthdr == IPPROTO_TCP ||
- iph->nexthdr == IPPROTO_UDP ||
- iph->nexthdr == IPPROTO_UDPLITE ||
- iph->nexthdr == IPPROTO_SCTP ||
- iph->nexthdr == IPPROTO_DCCP ||
- iph->nexthdr == IPPROTO_ESP) &&
- pskb_network_may_pull(skb, sizeof(*iph) + 4))
- h2 ^= *(u32*)&iph[1];
+ poff = proto_ports_offset(iph->nexthdr);
+ if (poff >= 0 &&
+ pskb_network_may_pull(skb, sizeof(*iph) + 4 + poff)) {
+ iph = ipv6_hdr(skb);
+ h2 ^= *(u32*)((void *)iph + sizeof(*iph) + poff);
+ }
break;
}
default:
@@ -334,7 +333,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (++sch->q.qlen <= q->limit) {
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
sfq_drop(sch);
@@ -508,6 +507,11 @@ nla_put_failure:
return -1;
}
+static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
+{
+ return NULL;
+}
+
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
return 0;
@@ -519,6 +523,10 @@ static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
return 0;
}
+static void sfq_put(struct Qdisc *q, unsigned long cl)
+{
+}
+
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct sfq_sched_data *q = qdisc_priv(sch);
@@ -571,9 +579,12 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
}
static const struct Qdisc_class_ops sfq_class_ops = {
+ .leaf = sfq_leaf,
.get = sfq_get,
+ .put = sfq_put,
.tcf_chain = sfq_find_tcf,
.bind_tcf = sfq_bind,
+ .unbind_tcf = sfq_put,
.dump = sfq_dump_class,
.dump_stats = sfq_dump_class_stats,
.walk = sfq_walk,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 0991c640cd3e..641a30d64635 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -127,7 +127,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
return qdisc_reshape_fail(skb, sch);
ret = qdisc_enqueue(skb, q->qdisc);
- if (ret != 0) {
+ if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
return ret;
@@ -136,7 +136,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
static unsigned int tbf_drop(struct Qdisc* sch)
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 807643bdcbac..feaabc103ce6 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -85,7 +85,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
__skb_queue_tail(&q->q, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->bstats.packets++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
kfree_skb(skb);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 0b85e5256434..5f1fb8bd862d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -48,6 +48,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 86366390038a..ddbbf7c81fa1 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -543,16 +543,20 @@ struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc)
id = ntohs(hmacs->hmac_ids[i]);
/* Check the id is in the supported range */
- if (id > SCTP_AUTH_HMAC_ID_MAX)
+ if (id > SCTP_AUTH_HMAC_ID_MAX) {
+ id = 0;
continue;
+ }
/* See is we support the id. Supported IDs have name and
* length fields set, so that we can allocated and use
* them. We can safely just check for name, for without the
* name, we can't allocate the TFM.
*/
- if (!sctp_hmac_list[id].hmac_name)
+ if (!sctp_hmac_list[id].hmac_name) {
+ id = 0;
continue;
+ }
break;
}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 476caaf100ed..6c8556459a75 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -37,6 +37,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/net.h>
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index ccb6dc48d15b..397296fb156f 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -43,6 +43,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <linux/interrupt.h>
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 732689140fb8..95e0c8eda1a0 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -47,6 +47,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -336,7 +338,7 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
memcpy(saddr, baddr, sizeof(union sctp_addr));
SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr);
} else {
- printk(KERN_ERR "%s: asoc:%p Could not find a valid source "
+ pr_err("%s: asoc:%p Could not find a valid source "
"address for the dest:%pI6\n",
__func__, asoc, &daddr->v6.sin6_addr);
}
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index f73ec0ea93ba..8ef8e7d9eb61 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -38,6 +38,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <net/sctp/sctp.h>
@@ -134,8 +136,7 @@ void sctp_dbg_objcnt_init(void)
ent = proc_create("sctp_dbg_objcnt", 0,
proc_net_sctp, &sctp_objcnt_ops);
if (!ent)
- printk(KERN_WARNING
- "sctp_dbg_objcnt: Unable to create /proc entry.\n");
+ pr_warn("sctp_dbg_objcnt: Unable to create /proc entry.\n");
}
/* Cleanup the objcount entry in the proc filesystem. */
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a646681f5acd..60600d337a3a 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -41,6 +41,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
@@ -92,7 +94,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
packet, vtag);
- sctp_packet_reset(packet);
packet->vtag = vtag;
if (ecn_capable && sctp_packet_empty(packet)) {
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index c04b2eb59186..8c6d379b4bb6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -46,6 +46,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/list.h> /* For struct list_head */
#include <linux/socket.h>
@@ -1463,23 +1465,23 @@ static void sctp_check_transmitted(struct sctp_outq *q,
/* Display the end of the
* current range.
*/
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_ack_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_ack_tsn);
}
/* Start a new range. */
- SCTP_DEBUG_PRINTK(",%08x", tsn);
+ SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
dbg_ack_tsn = tsn;
break;
case 1: /* The last TSN was NOT ACKed. */
if (dbg_last_kept_tsn != dbg_kept_tsn) {
/* Display the end of current range. */
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_kept_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_kept_tsn);
}
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("\n");
/* FALL THROUGH... */
default:
@@ -1526,18 +1528,18 @@ static void sctp_check_transmitted(struct sctp_outq *q,
break;
if (dbg_last_kept_tsn != dbg_kept_tsn)
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_kept_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_kept_tsn);
- SCTP_DEBUG_PRINTK(",%08x", tsn);
+ SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
dbg_kept_tsn = tsn;
break;
case 0:
if (dbg_last_ack_tsn != dbg_ack_tsn)
- SCTP_DEBUG_PRINTK("-%08x",
- dbg_last_ack_tsn);
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("-%08x",
+ dbg_last_ack_tsn);
+ SCTP_DEBUG_PRINTK_CONT("\n");
/* FALL THROUGH... */
default:
@@ -1556,17 +1558,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
switch (dbg_prt_state) {
case 0:
if (dbg_last_ack_tsn != dbg_ack_tsn) {
- SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
} else {
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("\n");
}
break;
case 1:
if (dbg_last_kept_tsn != dbg_kept_tsn) {
- SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn);
+ SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
} else {
- SCTP_DEBUG_PRINTK("\n");
+ SCTP_DEBUG_PRINTK_CONT("\n");
}
}
#endif /* SCTP_DEBUG */
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index db3a42b8b349..2e63e9dc010e 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -22,6 +22,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/socket.h>
@@ -192,7 +194,7 @@ static __init int sctpprobe_init(void)
if (ret)
goto remove_proc;
- pr_info("SCTP probe registered (port=%d)\n", port);
+ pr_info("probe registered (port=%d)\n", port);
return 0;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5027b83f1cc0..1ef29c74d85e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -46,6 +46,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
@@ -707,8 +709,7 @@ static int sctp_ctl_sock_init(void)
&init_net);
if (err < 0) {
- printk(KERN_ERR
- "SCTP: Failed to create the SCTP control socket.\n");
+ pr_err("Failed to create the SCTP control socket\n");
return err;
}
return 0;
@@ -798,7 +799,7 @@ static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len)
static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp)
{
/* PF_INET only supports AF_INET addresses. */
- return (AF_INET == family);
+ return AF_INET == family;
}
/* Address matching with wildcards allowed. */
@@ -1206,7 +1207,7 @@ SCTP_STATIC __init int sctp_init(void)
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_assoc_hashtable && --order > 0);
if (!sctp_assoc_hashtable) {
- printk(KERN_ERR "SCTP: Failed association hash alloc.\n");
+ pr_err("Failed association hash alloc\n");
status = -ENOMEM;
goto err_ahash_alloc;
}
@@ -1220,7 +1221,7 @@ SCTP_STATIC __init int sctp_init(void)
sctp_ep_hashtable = (struct sctp_hashbucket *)
kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL);
if (!sctp_ep_hashtable) {
- printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n");
+ pr_err("Failed endpoint_hash alloc\n");
status = -ENOMEM;
goto err_ehash_alloc;
}
@@ -1239,7 +1240,7 @@ SCTP_STATIC __init int sctp_init(void)
__get_free_pages(GFP_ATOMIC, order);
} while (!sctp_port_hashtable && --order > 0);
if (!sctp_port_hashtable) {
- printk(KERN_ERR "SCTP: Failed bind hash alloc.");
+ pr_err("Failed bind hash alloc\n");
status = -ENOMEM;
goto err_bhash_alloc;
}
@@ -1248,8 +1249,7 @@ SCTP_STATIC __init int sctp_init(void)
INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
}
- printk(KERN_INFO "SCTP: Hash tables configured "
- "(established %d bind %d)\n",
+ pr_info("Hash tables configured (established %d bind %d)\n",
sctp_assoc_hashsize, sctp_port_hashsize);
/* Disable ADDIP by default. */
@@ -1290,8 +1290,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Initialize the control inode/socket for handling OOTB packets. */
if ((status = sctp_ctl_sock_init())) {
- printk (KERN_ERR
- "SCTP: Failed to initialize the SCTP control sock.\n");
+ pr_err("Failed to initialize the SCTP control sock\n");
goto err_ctl_sock_init;
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 246f92924658..2cc46f0962ca 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -50,6 +50,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index f5e5e27cac5e..b21b218d564f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -47,6 +47,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/socket.h>
@@ -1146,26 +1148,23 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
case SCTP_DISPOSITION_VIOLATION:
if (net_ratelimit())
- printk(KERN_ERR "sctp protocol violation state %d "
- "chunkid %d\n", state, subtype.chunk);
+ pr_err("protocol violation state %d chunkid %d\n",
+ state, subtype.chunk);
break;
case SCTP_DISPOSITION_NOT_IMPL:
- printk(KERN_WARNING "sctp unimplemented feature in state %d, "
- "event_type %d, event_id %d\n",
- state, event_type, subtype.chunk);
+ pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
+ state, event_type, subtype.chunk);
break;
case SCTP_DISPOSITION_BUG:
- printk(KERN_ERR "sctp bug in state %d, "
- "event_type %d, event_id %d\n",
+ pr_err("bug in state %d, event_type %d, event_id %d\n",
state, event_type, subtype.chunk);
BUG();
break;
default:
- printk(KERN_ERR "sctp impossible disposition %d "
- "in state %d, event_type %d, event_id %d\n",
+ pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
status, state, event_type, subtype.chunk);
BUG();
break;
@@ -1679,8 +1678,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_cmd_send_asconf(asoc);
break;
default:
- printk(KERN_WARNING "Impossible command: %u, %p\n",
- cmd->verb, cmd->obj.ptr);
+ pr_warn("Impossible command: %u, %p\n",
+ cmd->verb, cmd->obj.ptr);
break;
}
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 24b2cd555637..4b4eb7c96bbd 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -50,6 +50,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
@@ -1138,18 +1140,16 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
if (net_ratelimit())
- printk(KERN_WARNING
- "%s association %p could not find address %pI6\n",
- __func__,
- asoc,
- &from_addr.v6.sin6_addr);
+ pr_warn("%s association %p could not find address %pI6\n",
+ __func__,
+ asoc,
+ &from_addr.v6.sin6_addr);
} else {
if (net_ratelimit())
- printk(KERN_WARNING
- "%s association %p could not find address %pI4\n",
- __func__,
- asoc,
- &from_addr.v4.sin_addr.s_addr);
+ pr_warn("%s association %p could not find address %pI4\n",
+ __func__,
+ asoc,
+ &from_addr.v4.sin_addr.s_addr);
}
return SCTP_DISPOSITION_DISCARD;
}
@@ -1232,6 +1232,18 @@ out:
return 0;
}
+static bool list_has_sctp_addr(const struct list_head *list,
+ union sctp_addr *ipaddr)
+{
+ struct sctp_transport *addr;
+
+ list_for_each_entry(addr, list, transports) {
+ if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
+ return true;
+ }
+
+ return false;
+}
/* A restart is occurring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
@@ -1240,10 +1252,10 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
- struct sctp_transport *new_addr, *addr;
- int found;
+ struct sctp_transport *new_addr;
+ int ret = 1;
- /* Implementor's Guide - Sectin 5.2.2
+ /* Implementor's Guide - Section 5.2.2
* ...
* Before responding the endpoint MUST check to see if the
* unexpected INIT adds new addresses to the association. If new
@@ -1254,31 +1266,19 @@ static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
- new_addr = NULL;
- found = 0;
-
list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
- transports) {
- found = 0;
- list_for_each_entry(addr, &asoc->peer.transport_addr_list,
- transports) {
- if (sctp_cmp_addr_exact(&new_addr->ipaddr,
- &addr->ipaddr)) {
- found = 1;
- break;
- }
- }
- if (!found)
+ transports) {
+ if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
+ &new_addr->ipaddr)) {
+ sctp_sf_send_restart_abort(&new_addr->ipaddr, init,
+ commands);
+ ret = 0;
break;
- }
-
- /* If a new address was added, ABORT the sender. */
- if (!found && new_addr) {
- sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands);
+ }
}
/* Return success if all addresses were found. */
- return found;
+ return ret;
}
/* Populate the verification/tie tags based on overlapping INIT
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 6d9b3aafcc5d..546d4387fb3c 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -46,6 +46,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/skbuff.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
@@ -66,15 +68,19 @@ static const sctp_sm_table_entry_t bug = {
.name = "sctp_sf_bug"
};
-#define DO_LOOKUP(_max, _type, _table) \
- if ((event_subtype._type > (_max))) { \
- printk(KERN_WARNING \
- "sctp table %p possible attack:" \
- " event %d exceeds max %d\n", \
- _table, event_subtype._type, _max); \
- return &bug; \
- } \
- return &_table[event_subtype._type][(int)state];
+#define DO_LOOKUP(_max, _type, _table) \
+({ \
+ const sctp_sm_table_entry_t *rtn; \
+ \
+ if ((event_subtype._type > (_max))) { \
+ pr_warn("table %p possible attack: event %d exceeds max %d\n", \
+ _table, event_subtype._type, _max); \
+ rtn = &bug; \
+ } else \
+ rtn = &_table[event_subtype._type][(int)state]; \
+ \
+ rtn; \
+})
const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
sctp_state_t state,
@@ -83,21 +89,15 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
switch (event_type) {
case SCTP_EVENT_T_CHUNK:
return sctp_chunk_event_lookup(event_subtype.chunk, state);
- break;
case SCTP_EVENT_T_TIMEOUT:
- DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
- timeout_event_table);
- break;
-
+ return DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout,
+ timeout_event_table);
case SCTP_EVENT_T_OTHER:
- DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other, other_event_table);
- break;
-
+ return DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other,
+ other_event_table);
case SCTP_EVENT_T_PRIMITIVE:
- DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive,
- primitive_event_table);
- break;
-
+ return DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive,
+ primitive_event_table);
default:
/* Yikes! We got an illegal event type. */
return &bug;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ca44917872d2..e34ca9cc1167 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -57,6 +57,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/wait.h>
@@ -916,6 +918,11 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
+ if (walk_size + sizeof(sa_family_t) > addrs_size) {
+ kfree(kaddrs);
+ return -EINVAL;
+ }
+
sa_addr = (struct sockaddr *)addr_buf;
af = sctp_get_af_specific(sa_addr->sa_family);
@@ -1002,9 +1009,13 @@ static int __sctp_connect(struct sock* sk,
/* Walk through the addrs buffer and count the number of addresses. */
addr_buf = kaddrs;
while (walk_size < addrs_size) {
+ if (walk_size + sizeof(sa_family_t) > addrs_size) {
+ err = -EINVAL;
+ goto out_free;
+ }
+
sa_addr = (union sctp_addr *)addr_buf;
af = sctp_get_af_specific(sa_addr->sa.sa_family);
- port = ntohs(sa_addr->v4.sin_port);
/* If the address family is not supported or if this address
* causes the address buffer to overflow return EINVAL.
@@ -1014,6 +1025,8 @@ static int __sctp_connect(struct sock* sk,
goto out_free;
}
+ port = ntohs(sa_addr->v4.sin_port);
+
/* Save current address so we can work with it */
memcpy(&to, sa_addr, af->sockaddr_len);
@@ -2458,9 +2471,8 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
if (params.sack_delay == 0 && params.sack_freq == 0)
return 0;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
- printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value "
- "in delayed_ack socket option deprecated\n");
- printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
+ pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
+ pr_warn("Use struct sctp_sack_info instead\n");
if (copy_from_user(&params, optval, optlen))
return -EFAULT;
@@ -2868,10 +2880,8 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, unsigned
int val;
if (optlen == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in maxseg socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in maxseg socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
params.assoc_id = 0;
@@ -3121,10 +3131,8 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
int assoc_id = 0;
if (optlen == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in max_burst socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in max_burst socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
if (copy_from_user(&val, optval, optlen))
return -EFAULT;
} else if (optlen == sizeof(struct sctp_assoc_value)) {
@@ -3595,7 +3603,40 @@ out:
/* The SCTP ioctl handler. */
SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
- return -ENOIOCTLCMD;
+ int rc = -ENOTCONN;
+
+ sctp_lock_sock(sk);
+
+ /*
+ * SEQPACKET-style sockets in LISTENING state are valid, for
+ * SCTP, so only discard TCP-style sockets in LISTENING state.
+ */
+ if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))
+ goto out;
+
+ switch (cmd) {
+ case SIOCINQ: {
+ struct sk_buff *skb;
+ unsigned int amount = 0;
+
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb != NULL) {
+ /*
+ * We will only return the amount of this packet since
+ * that is all that will be read.
+ */
+ amount = skb->len;
+ }
+ rc = put_user(amount, (int __user *)arg);
+ break;
+ }
+ default:
+ rc = -ENOIOCTLCMD;
+ break;
+ }
+out:
+ sctp_release_sock(sk);
+ return rc;
}
/* This is the function which gets called during socket creation to
@@ -3854,7 +3895,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
}
out:
- return (retval);
+ return retval;
}
@@ -3910,7 +3951,7 @@ static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len,
}
out:
- return (retval);
+ return retval;
}
/* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS)
@@ -4281,9 +4322,8 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else if (len == sizeof(struct sctp_assoc_value)) {
- printk(KERN_WARNING "SCTP: Use of struct sctp_assoc_value "
- "in delayed_ack socket option deprecated\n");
- printk(KERN_WARNING "SCTP: Use struct sctp_sack_info instead\n");
+ pr_warn("Use of struct sctp_assoc_value in delayed_ack socket option deprecated\n");
+ pr_warn("Use struct sctp_sack_info instead\n");
if (copy_from_user(&params, optval, len))
return -EFAULT;
} else
@@ -4929,10 +4969,8 @@ static int sctp_getsockopt_maxseg(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in maxseg socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in maxseg socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5023,10 +5061,8 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
struct sctp_association *asoc;
if (len == sizeof(int)) {
- printk(KERN_WARNING
- "SCTP: Use of int in max_burst socket option deprecated\n");
- printk(KERN_WARNING
- "SCTP: Use struct sctp_assoc_value instead\n");
+ pr_warn("Use of int in max_burst socket option deprecated\n");
+ pr_warn("Use struct sctp_assoc_value instead\n");
params.assoc_id = 0;
} else if (len >= sizeof(struct sctp_assoc_value)) {
len = sizeof(struct sctp_assoc_value);
@@ -5569,7 +5605,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
/* Note: sk->sk_num gets filled in if ephemeral port request. */
ret = sctp_get_port_local(sk, &addr);
- return (ret ? 1 : 0);
+ return ret ? 1 : 0;
}
/*
@@ -5586,8 +5622,7 @@ SCTP_STATIC int sctp_listen_start(struct sock *sk, int backlog)
tfm = crypto_alloc_hash(sctp_hmac_alg, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
if (net_ratelimit()) {
- printk(KERN_INFO
- "SCTP: failed to load transform for %s: %ld\n",
+ pr_info("failed to load transform for %s: %ld\n",
sctp_hmac_alg, PTR_ERR(tfm));
}
return -ENOSYS;
@@ -5716,13 +5751,12 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* Is it readable? Reconsider this code with TCP-style support. */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* The association is either gone or not ready. */
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 132046cb82fc..d3ae493d234a 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -48,6 +48,8 @@
* be incorporated into the next SCTP release.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/random.h>
@@ -244,10 +246,9 @@ void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
struct dst_entry *dst;
if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
- printk(KERN_WARNING "%s: Reported pmtu %d too low, "
- "using default minimum of %d\n",
- __func__, pmtu,
- SCTP_DEFAULT_MINSEGMENT);
+ pr_warn("%s: Reported pmtu %d too low, using default minimum of %d\n",
+ __func__, pmtu,
+ SCTP_DEFAULT_MINSEGMENT);
/* Use default minimum segment size and disable
* pmtu discovery on this transport.
*/
diff --git a/net/socket.c b/net/socket.c
index 2270b941bcc7..717a5f1c8792 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -535,14 +535,13 @@ void sock_release(struct socket *sock)
}
EXPORT_SYMBOL(sock_release);
-int sock_tx_timestamp(struct msghdr *msg, struct sock *sk,
- union skb_shared_tx *shtx)
+int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
{
- shtx->flags = 0;
+ *tx_flags = 0;
if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
- shtx->hardware = 1;
+ *tx_flags |= SKBTX_HW_TSTAMP;
if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
- shtx->software = 1;
+ *tx_flags |= SKBTX_SW_TSTAMP;
return 0;
}
EXPORT_SYMBOL(sock_tx_timestamp);
@@ -1919,7 +1918,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
* Afterwards, it will be a kernel pointer. Thus the compiler-assisted
* checking falls down on this.
*/
- if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control,
+ if (copy_from_user(ctl_buf,
+ (void __user __force *)msg_sys.msg_control,
ctl_len))
goto out_freectl;
msg_sys.msg_control = ctl_buf;
@@ -3054,14 +3054,19 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
char *optval, int *optlen)
{
mm_segment_t oldfs = get_fs();
+ char __user *uoptval;
+ int __user *uoptlen;
int err;
+ uoptval = (char __user __force *) optval;
+ uoptlen = (int __user __force *) optlen;
+
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
- err = sock_getsockopt(sock, level, optname, optval, optlen);
+ err = sock_getsockopt(sock, level, optname, uoptval, uoptlen);
else
- err = sock->ops->getsockopt(sock, level, optname, optval,
- optlen);
+ err = sock->ops->getsockopt(sock, level, optname, uoptval,
+ uoptlen);
set_fs(oldfs);
return err;
}
@@ -3071,13 +3076,16 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
char *optval, unsigned int optlen)
{
mm_segment_t oldfs = get_fs();
+ char __user *uoptval;
int err;
+ uoptval = (char __user __force *) optval;
+
set_fs(KERNEL_DS);
if (level == SOL_SOCKET)
- err = sock_setsockopt(sock, level, optname, optval, optlen);
+ err = sock_setsockopt(sock, level, optname, uoptval, optlen);
else
- err = sock->ops->setsockopt(sock, level, optname, optval,
+ err = sock->ops->setsockopt(sock, level, optname, uoptval,
optlen);
set_fs(oldfs);
return err;
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 443c161eb8bd..3376d7657185 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -18,10 +18,11 @@ config SUNRPC_XPRT_RDMA
If unsure, say N.
config RPCSEC_GSS_KRB5
- tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)"
- depends on SUNRPC && EXPERIMENTAL
+ tristate
+ depends on SUNRPC && CRYPTO
+ prompt "Secure RPC: Kerberos V mechanism" if !(NFS_V4 || NFSD_V4)
+ default y
select SUNRPC_GSS
- select CRYPTO
select CRYPTO_MD5
select CRYPTO_DES
select CRYPTO_CBC
@@ -34,7 +35,7 @@ config RPCSEC_GSS_KRB5
available from http://linux-nfs.org/. In addition, user-space
Kerberos support should be installed.
- If unsure, say N.
+ If unsure, say Y.
config RPCSEC_GSS_SPKM3
tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 8dc47f1d0001..36cb66022a27 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -19,6 +19,15 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
+#define RPC_CREDCACHE_DEFAULT_HASHBITS (4)
+struct rpc_cred_cache {
+ struct hlist_head *hashtable;
+ unsigned int hashbits;
+ spinlock_t lock;
+};
+
+static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS;
+
static DEFINE_SPINLOCK(rpc_authflavor_lock);
static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
&authnull_ops, /* AUTH_NULL */
@@ -29,6 +38,47 @@ static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
static LIST_HEAD(cred_unused);
static unsigned long number_cred_unused;
+#define MAX_HASHTABLE_BITS (10)
+static int param_set_hashtbl_sz(const char *val, const struct kernel_param *kp)
+{
+ unsigned long num;
+ unsigned int nbits;
+ int ret;
+
+ if (!val)
+ goto out_inval;
+ ret = strict_strtoul(val, 0, &num);
+ if (ret == -EINVAL)
+ goto out_inval;
+ nbits = fls(num);
+ if (num > (1U << nbits))
+ nbits++;
+ if (nbits > MAX_HASHTABLE_BITS || nbits < 2)
+ goto out_inval;
+ *(unsigned int *)kp->arg = nbits;
+ return 0;
+out_inval:
+ return -EINVAL;
+}
+
+static int param_get_hashtbl_sz(char *buffer, const struct kernel_param *kp)
+{
+ unsigned int nbits;
+
+ nbits = *(unsigned int *)kp->arg;
+ return sprintf(buffer, "%u", 1U << nbits);
+}
+
+#define param_check_hashtbl_sz(name, p) __param_check(name, p, unsigned int);
+
+static struct kernel_param_ops param_ops_hashtbl_sz = {
+ .set = param_set_hashtbl_sz,
+ .get = param_get_hashtbl_sz,
+};
+
+module_param_named(auth_hashtable_size, auth_hashbits, hashtbl_sz, 0644);
+MODULE_PARM_DESC(auth_hashtable_size, "RPC credential cache hashtable size");
+
static u32
pseudoflavor_to_flavor(u32 flavor) {
if (flavor >= RPC_AUTH_MAXFLAVOR)
@@ -145,16 +195,23 @@ int
rpcauth_init_credcache(struct rpc_auth *auth)
{
struct rpc_cred_cache *new;
- int i;
+ unsigned int hashsize;
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (!new)
- return -ENOMEM;
- for (i = 0; i < RPC_CREDCACHE_NR; i++)
- INIT_HLIST_HEAD(&new->hashtable[i]);
+ goto out_nocache;
+ new->hashbits = auth_hashbits;
+ hashsize = 1U << new->hashbits;
+ new->hashtable = kcalloc(hashsize, sizeof(new->hashtable[0]), GFP_KERNEL);
+ if (!new->hashtable)
+ goto out_nohashtbl;
spin_lock_init(&new->lock);
auth->au_credcache = new;
return 0;
+out_nohashtbl:
+ kfree(new);
+out_nocache:
+ return -ENOMEM;
}
EXPORT_SYMBOL_GPL(rpcauth_init_credcache);
@@ -183,11 +240,12 @@ rpcauth_clear_credcache(struct rpc_cred_cache *cache)
LIST_HEAD(free);
struct hlist_head *head;
struct rpc_cred *cred;
+ unsigned int hashsize = 1U << cache->hashbits;
int i;
spin_lock(&rpc_credcache_lock);
spin_lock(&cache->lock);
- for (i = 0; i < RPC_CREDCACHE_NR; i++) {
+ for (i = 0; i < hashsize; i++) {
head = &cache->hashtable[i];
while (!hlist_empty(head)) {
cred = hlist_entry(head->first, struct rpc_cred, cr_hash);
@@ -216,6 +274,7 @@ rpcauth_destroy_credcache(struct rpc_auth *auth)
if (cache) {
auth->au_credcache = NULL;
rpcauth_clear_credcache(cache);
+ kfree(cache->hashtable);
kfree(cache);
}
}
@@ -297,7 +356,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
*entry, *new;
unsigned int nr;
- nr = hash_long(acred->uid, RPC_CREDCACHE_HASHBITS);
+ nr = hash_long(acred->uid, cache->hashbits);
rcu_read_lock();
hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) {
@@ -390,16 +449,16 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
}
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
-void
+struct rpc_cred *
rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
{
- task->tk_msg.rpc_cred = get_rpccred(cred);
dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
cred->cr_auth->au_ops->au_name, cred);
+ return get_rpccred(cred);
}
EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
-static void
+static struct rpc_cred *
rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
@@ -407,45 +466,43 @@ rpcauth_bind_root_cred(struct rpc_task *task, int lookupflags)
.uid = 0,
.gid = 0,
};
- struct rpc_cred *ret;
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
- ret = auth->au_ops->lookup_cred(auth, &acred, lookupflags);
- if (!IS_ERR(ret))
- task->tk_msg.rpc_cred = ret;
- else
- task->tk_status = PTR_ERR(ret);
+ return auth->au_ops->lookup_cred(auth, &acred, lookupflags);
}
-static void
+static struct rpc_cred *
rpcauth_bind_new_cred(struct rpc_task *task, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
- struct rpc_cred *ret;
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, auth->au_ops->au_name);
- ret = rpcauth_lookupcred(auth, lookupflags);
- if (!IS_ERR(ret))
- task->tk_msg.rpc_cred = ret;
- else
- task->tk_status = PTR_ERR(ret);
+ return rpcauth_lookupcred(auth, lookupflags);
}
-void
+static int
rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_cred *new;
int lookupflags = 0;
if (flags & RPC_TASK_ASYNC)
lookupflags |= RPCAUTH_LOOKUP_NEW;
if (cred != NULL)
- cred->cr_ops->crbind(task, cred, lookupflags);
+ new = cred->cr_ops->crbind(task, cred, lookupflags);
else if (flags & RPC_TASK_ROOTCREDS)
- rpcauth_bind_root_cred(task, lookupflags);
+ new = rpcauth_bind_root_cred(task, lookupflags);
else
- rpcauth_bind_new_cred(task, lookupflags);
+ new = rpcauth_bind_new_cred(task, lookupflags);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+ if (req->rq_cred != NULL)
+ put_rpccred(req->rq_cred);
+ req->rq_cred = new;
+ return 0;
}
void
@@ -484,22 +541,10 @@ out_nodestroy:
}
EXPORT_SYMBOL_GPL(put_rpccred);
-void
-rpcauth_unbindcred(struct rpc_task *task)
-{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
-
- dprintk("RPC: %5u releasing %s cred %p\n",
- task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
-
- put_rpccred(cred);
- task->tk_msg.rpc_cred = NULL;
-}
-
__be32 *
rpcauth_marshcred(struct rpc_task *task, __be32 *p)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
dprintk("RPC: %5u marshaling %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
@@ -510,7 +555,7 @@ rpcauth_marshcred(struct rpc_task *task, __be32 *p)
__be32 *
rpcauth_checkverf(struct rpc_task *task, __be32 *p)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
dprintk("RPC: %5u validating %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
@@ -522,7 +567,7 @@ int
rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
__be32 *data, void *obj)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
dprintk("RPC: %5u using %s cred %p to wrap rpc data\n",
task->tk_pid, cred->cr_ops->cr_name, cred);
@@ -536,7 +581,7 @@ int
rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
__be32 *data, void *obj)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
dprintk("RPC: %5u using %s cred %p to unwrap rpc data\n",
task->tk_pid, cred->cr_ops->cr_name, cred);
@@ -550,13 +595,21 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
int
rpcauth_refreshcred(struct rpc_task *task)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
int err;
+ cred = task->tk_rqstp->rq_cred;
+ if (cred == NULL) {
+ err = rpcauth_bindcred(task, task->tk_msg.rpc_cred, task->tk_flags);
+ if (err < 0)
+ goto out;
+ cred = task->tk_rqstp->rq_cred;
+ };
dprintk("RPC: %5u refreshing %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
err = cred->cr_ops->crrefresh(task);
+out:
if (err < 0)
task->tk_status = err;
return err;
@@ -565,7 +618,7 @@ rpcauth_refreshcred(struct rpc_task *task)
void
rpcauth_invalcred(struct rpc_task *task)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
dprintk("RPC: %5u invalidating %s cred %p\n",
task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
@@ -576,7 +629,7 @@ rpcauth_invalcred(struct rpc_task *task)
int
rpcauth_uptodatecred(struct rpc_task *task)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
return cred == NULL ||
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0;
@@ -587,14 +640,27 @@ static struct shrinker rpc_cred_shrinker = {
.seeks = DEFAULT_SEEKS,
};
-void __init rpcauth_init_module(void)
+int __init rpcauth_init_module(void)
{
- rpc_init_authunix();
- rpc_init_generic_auth();
+ int err;
+
+ err = rpc_init_authunix();
+ if (err < 0)
+ goto out1;
+ err = rpc_init_generic_auth();
+ if (err < 0)
+ goto out2;
register_shrinker(&rpc_cred_shrinker);
+ return 0;
+out2:
+ rpc_destroy_authunix();
+out1:
+ return err;
}
void __exit rpcauth_remove_module(void)
{
+ rpc_destroy_authunix();
+ rpc_destroy_generic_auth();
unregister_shrinker(&rpc_cred_shrinker);
}
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 8f623b0f03dd..43162bb3b78f 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -27,7 +27,6 @@ struct generic_cred {
};
static struct rpc_auth generic_auth;
-static struct rpc_cred_cache generic_cred_cache;
static const struct rpc_credops generic_credops;
/*
@@ -55,18 +54,13 @@ struct rpc_cred *rpc_lookup_machine_cred(void)
}
EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
-static void
-generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred, int lookupflags)
+static struct rpc_cred *generic_bind_cred(struct rpc_task *task,
+ struct rpc_cred *cred, int lookupflags)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
- struct rpc_cred *ret;
- ret = auth->au_ops->lookup_cred(auth, acred, lookupflags);
- if (!IS_ERR(ret))
- task->tk_msg.rpc_cred = ret;
- else
- task->tk_status = PTR_ERR(ret);
+ return auth->au_ops->lookup_cred(auth, acred, lookupflags);
}
/*
@@ -159,20 +153,16 @@ out_nomatch:
return 0;
}
-void __init rpc_init_generic_auth(void)
+int __init rpc_init_generic_auth(void)
{
- spin_lock_init(&generic_cred_cache.lock);
+ return rpcauth_init_credcache(&generic_auth);
}
void __exit rpc_destroy_generic_auth(void)
{
- rpcauth_clear_credcache(&generic_cred_cache);
+ rpcauth_destroy_credcache(&generic_auth);
}
-static struct rpc_cred_cache generic_cred_cache = {
- {{ NULL, },},
-};
-
static const struct rpc_authops generic_auth_ops = {
.owner = THIS_MODULE,
.au_name = "Generic",
@@ -183,7 +173,6 @@ static const struct rpc_authops generic_auth_ops = {
static struct rpc_auth generic_auth = {
.au_ops = &generic_auth_ops,
.au_count = ATOMIC_INIT(0),
- .au_credcache = &generic_cred_cache,
};
static const struct rpc_credops generic_credops = {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 8da2a0e68574..597c493392ad 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -373,7 +373,7 @@ gss_handle_downcall_result(struct gss_cred *gss_cred, struct gss_upcall_msg *gss
static void
gss_upcall_callback(struct rpc_task *task)
{
- struct gss_cred *gss_cred = container_of(task->tk_msg.rpc_cred,
+ struct gss_cred *gss_cred = container_of(task->tk_rqstp->rq_cred,
struct gss_cred, gc_base);
struct gss_upcall_msg *gss_msg = gss_cred->gc_upcall;
struct inode *inode = &gss_msg->inode->vfs_inode;
@@ -502,7 +502,7 @@ static void warn_gssd(void)
static inline int
gss_refresh_upcall(struct rpc_task *task)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_auth *gss_auth = container_of(cred->cr_auth,
struct gss_auth, rpc_auth);
struct gss_cred *gss_cred = container_of(cred,
@@ -928,6 +928,7 @@ gss_do_free_ctx(struct gss_cl_ctx *ctx)
{
dprintk("RPC: gss_free_ctx\n");
+ gss_delete_sec_context(&ctx->gc_gss_ctx);
kfree(ctx->gc_wire_ctx.data);
kfree(ctx);
}
@@ -942,13 +943,7 @@ gss_free_ctx_callback(struct rcu_head *head)
static void
gss_free_ctx(struct gss_cl_ctx *ctx)
{
- struct gss_ctx *gc_gss_ctx;
-
- gc_gss_ctx = rcu_dereference(ctx->gc_gss_ctx);
- rcu_assign_pointer(ctx->gc_gss_ctx, NULL);
call_rcu(&ctx->gc_rcu, gss_free_ctx_callback);
- if (gc_gss_ctx)
- gss_delete_sec_context(&gc_gss_ctx);
}
static void
@@ -1054,7 +1049,7 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
out:
if (acred->machine_cred != gss_cred->gc_machine_cred)
return 0;
- return (rc->cr_uid == acred->uid);
+ return rc->cr_uid == acred->uid;
}
/*
@@ -1064,12 +1059,12 @@ out:
static __be32 *
gss_marshal(struct rpc_task *task, __be32 *p)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_cred *cred = req->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
__be32 *cred_len;
- struct rpc_rqst *req = task->tk_rqstp;
u32 maj_stat = 0;
struct xdr_netobj mic;
struct kvec iov;
@@ -1119,7 +1114,7 @@ out_put_ctx:
static int gss_renew_cred(struct rpc_task *task)
{
- struct rpc_cred *oldcred = task->tk_msg.rpc_cred;
+ struct rpc_cred *oldcred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(oldcred,
struct gss_cred,
gc_base);
@@ -1133,7 +1128,7 @@ static int gss_renew_cred(struct rpc_task *task)
new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
if (IS_ERR(new))
return PTR_ERR(new);
- task->tk_msg.rpc_cred = new;
+ task->tk_rqstp->rq_cred = new;
put_rpccred(oldcred);
return 0;
}
@@ -1161,7 +1156,7 @@ static int gss_cred_is_negative_entry(struct rpc_cred *cred)
static int
gss_refresh(struct rpc_task *task)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
int ret = 0;
if (gss_cred_is_negative_entry(cred))
@@ -1172,7 +1167,7 @@ gss_refresh(struct rpc_task *task)
ret = gss_renew_cred(task);
if (ret < 0)
goto out;
- cred = task->tk_msg.rpc_cred;
+ cred = task->tk_rqstp->rq_cred;
}
if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
@@ -1191,7 +1186,7 @@ gss_refresh_null(struct rpc_task *task)
static __be32 *
gss_validate(struct rpc_task *task, __be32 *p)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
__be32 seq;
struct kvec iov;
@@ -1400,7 +1395,7 @@ static int
gss_wrap_req(struct rpc_task *task,
kxdrproc_t encode, void *rqstp, __be32 *p, void *obj)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
@@ -1503,7 +1498,7 @@ static int
gss_unwrap_resp(struct rpc_task *task,
kxdrproc_t decode, void *rqstp, __be32 *p, void *obj)
{
- struct rpc_cred *cred = task->tk_msg.rpc_cred;
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
gc_base);
struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index 310b78e99456..c586e92bcf76 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -76,19 +76,19 @@ static int
der_length_size( int length)
{
if (length < (1<<7))
- return(1);
+ return 1;
else if (length < (1<<8))
- return(2);
+ return 2;
#if (SIZEOF_INT == 2)
else
- return(3);
+ return 3;
#else
else if (length < (1<<16))
- return(3);
+ return 3;
else if (length < (1<<24))
- return(4);
+ return 4;
else
- return(5);
+ return 5;
#endif
}
@@ -121,14 +121,14 @@ der_read_length(unsigned char **buf, int *bufsize)
int ret;
if (*bufsize < 1)
- return(-1);
+ return -1;
sf = *(*buf)++;
(*bufsize)--;
if (sf & 0x80) {
if ((sf &= 0x7f) > ((*bufsize)-1))
- return(-1);
+ return -1;
if (sf > SIZEOF_INT)
- return (-1);
+ return -1;
ret = 0;
for (; sf; sf--) {
ret = (ret<<8) + (*(*buf)++);
@@ -138,7 +138,7 @@ der_read_length(unsigned char **buf, int *bufsize)
ret = sf;
}
- return(ret);
+ return ret;
}
/* returns the length of a token, given the mech oid and the body size */
@@ -148,7 +148,7 @@ g_token_size(struct xdr_netobj *mech, unsigned int body_size)
{
/* set body_size to sequence contents size */
body_size += 2 + (int) mech->len; /* NEED overflow check */
- return(1 + der_length_size(body_size) + body_size);
+ return 1 + der_length_size(body_size) + body_size;
}
EXPORT_SYMBOL_GPL(g_token_size);
@@ -186,27 +186,27 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
int ret = 0;
if ((toksize-=1) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (*buf++ != 0x60)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if ((seqsize = der_read_length(&buf, &toksize)) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (seqsize != toksize)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if ((toksize-=1) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (*buf++ != 0x06)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if ((toksize-=1) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
toid.len = *buf++;
if ((toksize-=toid.len) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
toid.data = buf;
buf+=toid.len;
@@ -217,17 +217,17 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
to return G_BAD_TOK_HEADER if the token header is in fact bad */
if ((toksize-=2) < 0)
- return(G_BAD_TOK_HEADER);
+ return G_BAD_TOK_HEADER;
if (ret)
- return(ret);
+ return ret;
if (!ret) {
*buf_in = buf;
*body_size = toksize;
}
- return(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(g_verify_token_header);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 415c013ba382..62ac90c62cb1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -162,5 +162,5 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
*seqnum = ((plain[0]) |
(plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
- return (0);
+ return 0;
}
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 2689de39dc78..8b4061049d76 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -331,7 +331,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle)
*context_handle);
if (!*context_handle)
- return(GSS_S_NO_CONTEXT);
+ return GSS_S_NO_CONTEXT;
if ((*context_handle)->internal_ctx_id)
(*context_handle)->mech_type->gm_ops
->gss_delete_sec_context((*context_handle)
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 1db618f56ecb..a5c36c01707b 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -75,7 +75,7 @@ nul_marshal(struct rpc_task *task, __be32 *p)
static int
nul_refresh(struct rpc_task *task)
{
- set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags);
+ set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
return 0;
}
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index aac2f8b4ee21..4cb70dc6e7ad 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -29,7 +29,6 @@ struct unx_cred {
#endif
static struct rpc_auth unix_auth;
-static struct rpc_cred_cache unix_cred_cache;
static const struct rpc_credops unix_credops;
static struct rpc_auth *
@@ -141,7 +140,7 @@ static __be32 *
unx_marshal(struct rpc_task *task, __be32 *p)
{
struct rpc_clnt *clnt = task->tk_client;
- struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base);
+ struct unx_cred *cred = container_of(task->tk_rqstp->rq_cred, struct unx_cred, uc_base);
__be32 *base, *hold;
int i;
@@ -174,7 +173,7 @@ unx_marshal(struct rpc_task *task, __be32 *p)
static int
unx_refresh(struct rpc_task *task)
{
- set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_msg.rpc_cred->cr_flags);
+ set_bit(RPCAUTH_CRED_UPTODATE, &task->tk_rqstp->rq_cred->cr_flags);
return 0;
}
@@ -197,15 +196,20 @@ unx_validate(struct rpc_task *task, __be32 *p)
printk("RPC: giant verf size: %u\n", size);
return NULL;
}
- task->tk_msg.rpc_cred->cr_auth->au_rslack = (size >> 2) + 2;
+ task->tk_rqstp->rq_cred->cr_auth->au_rslack = (size >> 2) + 2;
p += (size >> 2);
return p;
}
-void __init rpc_init_authunix(void)
+int __init rpc_init_authunix(void)
{
- spin_lock_init(&unix_cred_cache.lock);
+ return rpcauth_init_credcache(&unix_auth);
+}
+
+void rpc_destroy_authunix(void)
+{
+ rpcauth_destroy_credcache(&unix_auth);
}
const struct rpc_authops authunix_ops = {
@@ -219,17 +223,12 @@ const struct rpc_authops authunix_ops = {
};
static
-struct rpc_cred_cache unix_cred_cache = {
-};
-
-static
struct rpc_auth unix_auth = {
.au_cslack = UNX_WRITESLACK,
.au_rslack = 2, /* assume AUTH_NULL verf */
.au_ops = &authunix_ops,
.au_flavor = RPC_AUTH_UNIX,
.au_count = ATOMIC_INIT(0),
- .au_credcache = &unix_cred_cache,
};
static
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 58de76c8540c..2b06410e584e 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -34,7 +34,6 @@
#include <linux/sunrpc/cache.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
-#include <linux/smp_lock.h>
#define RPCDBG_FACILITY RPCDBG_CACHE
@@ -320,7 +319,7 @@ static struct cache_detail *current_detail;
static int current_index;
static void do_cache_clean(struct work_struct *work);
-static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
+static struct delayed_work cache_cleaner;
static void sunrpc_init_cache_detail(struct cache_detail *cd)
{
@@ -1504,6 +1503,11 @@ static int create_cache_proc_entries(struct cache_detail *cd)
}
#endif
+void __init cache_initialize(void)
+{
+ INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean);
+}
+
int cache_register(struct cache_detail *cd)
{
int ret;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 756fc324db9e..2388d83b68ff 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -414,6 +414,35 @@ out_no_clnt:
EXPORT_SYMBOL_GPL(rpc_clone_client);
/*
+ * Kill all tasks for the given client.
+ * XXX: kill their descendants as well?
+ */
+void rpc_killall_tasks(struct rpc_clnt *clnt)
+{
+ struct rpc_task *rovr;
+
+
+ if (list_empty(&clnt->cl_tasks))
+ return;
+ dprintk("RPC: killing all tasks for client %p\n", clnt);
+ /*
+ * Spin lock all_tasks to prevent changes...
+ */
+ spin_lock(&clnt->cl_lock);
+ list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
+ if (!RPC_IS_ACTIVATED(rovr))
+ continue;
+ if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
+ rovr->tk_flags |= RPC_TASK_KILLED;
+ rpc_exit(rovr, -EIO);
+ rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr);
+ }
+ }
+ spin_unlock(&clnt->cl_lock);
+}
+EXPORT_SYMBOL_GPL(rpc_killall_tasks);
+
+/*
* Properly shut down an RPC client, terminating all outstanding
* requests.
*/
@@ -538,6 +567,49 @@ out:
}
EXPORT_SYMBOL_GPL(rpc_bind_new_program);
+void rpc_task_release_client(struct rpc_task *task)
+{
+ struct rpc_clnt *clnt = task->tk_client;
+
+ if (clnt != NULL) {
+ /* Remove from client task list */
+ spin_lock(&clnt->cl_lock);
+ list_del(&task->tk_task);
+ spin_unlock(&clnt->cl_lock);
+ task->tk_client = NULL;
+
+ rpc_release_client(clnt);
+ }
+}
+
+static
+void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
+{
+ if (clnt != NULL) {
+ rpc_task_release_client(task);
+ task->tk_client = clnt;
+ kref_get(&clnt->cl_kref);
+ if (clnt->cl_softrtry)
+ task->tk_flags |= RPC_TASK_SOFT;
+ /* Add to the client's list of all tasks */
+ spin_lock(&clnt->cl_lock);
+ list_add_tail(&task->tk_task, &clnt->cl_tasks);
+ spin_unlock(&clnt->cl_lock);
+ }
+}
+
+static void
+rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg)
+{
+ if (msg != NULL) {
+ task->tk_msg.rpc_proc = msg->rpc_proc;
+ task->tk_msg.rpc_argp = msg->rpc_argp;
+ task->tk_msg.rpc_resp = msg->rpc_resp;
+ if (msg->rpc_cred != NULL)
+ task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred);
+ }
+}
+
/*
* Default callback for async RPC calls
*/
@@ -562,6 +634,18 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data)
if (IS_ERR(task))
goto out;
+ rpc_task_set_client(task, task_setup_data->rpc_client);
+ rpc_task_set_rpc_message(task, task_setup_data->rpc_message);
+
+ if (task->tk_status != 0) {
+ int ret = task->tk_status;
+ rpc_put_task(task);
+ return ERR_PTR(ret);
+ }
+
+ if (task->tk_action == NULL)
+ rpc_call_start(task);
+
atomic_inc(&task->tk_count);
rpc_execute(task);
out:
@@ -756,12 +840,13 @@ EXPORT_SYMBOL_GPL(rpc_force_rebind);
* Restart an (async) RPC call from the call_prepare state.
* Usually called from within the exit handler.
*/
-void
+int
rpc_restart_call_prepare(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
- return;
+ return 0;
task->tk_action = rpc_prepare_task;
+ return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
@@ -769,13 +854,13 @@ EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
* Restart an (async) RPC call. Usually called from within the
* exit handler.
*/
-void
+int
rpc_restart_call(struct rpc_task *task)
{
if (RPC_ASSASSINATED(task))
- return;
-
+ return 0;
task->tk_action = call_start;
+ return 1;
}
EXPORT_SYMBOL_GPL(rpc_restart_call);
@@ -824,11 +909,6 @@ call_reserve(struct rpc_task *task)
{
dprint_status(task);
- if (!rpcauth_uptodatecred(task)) {
- task->tk_action = call_refresh;
- return;
- }
-
task->tk_status = 0;
task->tk_action = call_reserveresult;
xprt_reserve(task);
@@ -892,7 +972,7 @@ call_reserveresult(struct rpc_task *task)
static void
call_allocate(struct rpc_task *task)
{
- unsigned int slack = task->tk_msg.rpc_cred->cr_auth->au_cslack;
+ unsigned int slack = task->tk_client->cl_auth->au_cslack;
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = task->tk_xprt;
struct rpc_procinfo *proc = task->tk_msg.rpc_proc;
@@ -900,7 +980,7 @@ call_allocate(struct rpc_task *task)
dprint_status(task);
task->tk_status = 0;
- task->tk_action = call_bind;
+ task->tk_action = call_refresh;
if (req->rq_buffer)
return;
@@ -937,6 +1017,47 @@ call_allocate(struct rpc_task *task)
rpc_exit(task, -ERESTARTSYS);
}
+/*
+ * 2a. Bind and/or refresh the credentials
+ */
+static void
+call_refresh(struct rpc_task *task)
+{
+ dprint_status(task);
+
+ task->tk_action = call_refreshresult;
+ task->tk_status = 0;
+ task->tk_client->cl_stats->rpcauthrefresh++;
+ rpcauth_refreshcred(task);
+}
+
+/*
+ * 2b. Process the results of a credential refresh
+ */
+static void
+call_refreshresult(struct rpc_task *task)
+{
+ int status = task->tk_status;
+
+ dprint_status(task);
+
+ task->tk_status = 0;
+ task->tk_action = call_bind;
+ if (status >= 0 && rpcauth_uptodatecred(task))
+ return;
+ switch (status) {
+ case -EACCES:
+ rpc_exit(task, -EACCES);
+ return;
+ case -ENOMEM:
+ rpc_exit(task, -ENOMEM);
+ return;
+ case -ETIMEDOUT:
+ rpc_delay(task, 3*HZ);
+ }
+ task->tk_action = call_refresh;
+}
+
static inline int
rpc_task_need_encode(struct rpc_task *task)
{
@@ -1472,43 +1593,6 @@ out_retry:
}
}
-/*
- * 8. Refresh the credentials if rejected by the server
- */
-static void
-call_refresh(struct rpc_task *task)
-{
- dprint_status(task);
-
- task->tk_action = call_refreshresult;
- task->tk_status = 0;
- task->tk_client->cl_stats->rpcauthrefresh++;
- rpcauth_refreshcred(task);
-}
-
-/*
- * 8a. Process the results of a credential refresh
- */
-static void
-call_refreshresult(struct rpc_task *task)
-{
- int status = task->tk_status;
-
- dprint_status(task);
-
- task->tk_status = 0;
- task->tk_action = call_reserve;
- if (status >= 0 && rpcauth_uptodatecred(task))
- return;
- if (status == -EACCES) {
- rpc_exit(task, -EACCES);
- return;
- }
- task->tk_action = call_refresh;
- if (status != -ETIMEDOUT)
- rpc_delay(task, 3*HZ);
-}
-
static __be32 *
rpc_encode_header(struct rpc_task *task)
{
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 4a843b883b89..aa5dbda6608c 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -246,17 +246,8 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
static void rpc_set_active(struct rpc_task *task)
{
- struct rpc_clnt *clnt;
- if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
- return;
rpc_task_set_debuginfo(task);
- /* Add to global list of all tasks */
- clnt = task->tk_client;
- if (clnt != NULL) {
- spin_lock(&clnt->cl_lock);
- list_add_tail(&task->tk_task, &clnt->cl_tasks);
- spin_unlock(&clnt->cl_lock);
- }
+ set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
}
/*
@@ -319,11 +310,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
- if (!RPC_IS_ASYNC(task) && !RPC_IS_ACTIVATED(task)) {
- printk(KERN_ERR "RPC: Inactive synchronous task put to sleep!\n");
- return;
- }
-
__rpc_add_wait_queue(q, task);
BUG_ON(task->tk_callback != NULL);
@@ -334,8 +320,8 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
- /* Mark the task as being activated if so needed */
- rpc_set_active(task);
+ /* We shouldn't ever put an inactive task to sleep */
+ BUG_ON(!RPC_IS_ACTIVATED(task));
/*
* Protect the queue operations.
@@ -390,7 +376,7 @@ int rpc_queue_empty(struct rpc_wait_queue *queue)
spin_lock_bh(&queue->lock);
res = queue->qlen;
spin_unlock_bh(&queue->lock);
- return (res == 0);
+ return res == 0;
}
EXPORT_SYMBOL_GPL(rpc_queue_empty);
@@ -406,14 +392,6 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
/*
- * Wake up the specified task
- */
-static void rpc_wake_up_task(struct rpc_task *task)
-{
- rpc_wake_up_queued_task(task->tk_waitqueue, task);
-}
-
-/*
* Wake up the next task on a priority queue.
*/
static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
@@ -600,7 +578,15 @@ void rpc_exit_task(struct rpc_task *task)
}
}
}
-EXPORT_SYMBOL_GPL(rpc_exit_task);
+
+void rpc_exit(struct rpc_task *task, int status)
+{
+ task->tk_status = status;
+ task->tk_action = rpc_exit_task;
+ if (RPC_IS_QUEUED(task))
+ rpc_wake_up_queued_task(task->tk_waitqueue, task);
+}
+EXPORT_SYMBOL_GPL(rpc_exit);
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
{
@@ -690,7 +676,6 @@ static void __rpc_execute(struct rpc_task *task)
dprintk("RPC: %5u got signal\n", task->tk_pid);
task->tk_flags |= RPC_TASK_KILLED;
rpc_exit(task, -ERESTARTSYS);
- rpc_wake_up_task(task);
}
rpc_set_running(task);
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
@@ -714,8 +699,9 @@ static void __rpc_execute(struct rpc_task *task)
void rpc_execute(struct rpc_task *task)
{
rpc_set_active(task);
- rpc_set_running(task);
- __rpc_execute(task);
+ rpc_make_runnable(task);
+ if (!RPC_IS_ASYNC(task))
+ __rpc_execute(task);
}
static void rpc_async_schedule(struct work_struct *work)
@@ -808,26 +794,9 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
/* Initialize workqueue for async tasks */
task->tk_workqueue = task_setup_data->workqueue;
- task->tk_client = task_setup_data->rpc_client;
- if (task->tk_client != NULL) {
- kref_get(&task->tk_client->cl_kref);
- if (task->tk_client->cl_softrtry)
- task->tk_flags |= RPC_TASK_SOFT;
- }
-
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
- if (task_setup_data->rpc_message != NULL) {
- task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc;
- task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp;
- task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp;
- /* Bind the user cred */
- rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags);
- if (task->tk_action == NULL)
- rpc_call_start(task);
- }
-
/* starting timestamp */
task->tk_start = ktime_get();
@@ -896,11 +865,8 @@ void rpc_put_task(struct rpc_task *task)
if (task->tk_rqstp)
xprt_release(task);
if (task->tk_msg.rpc_cred)
- rpcauth_unbindcred(task);
- if (task->tk_client) {
- rpc_release_client(task->tk_client);
- task->tk_client = NULL;
- }
+ put_rpccred(task->tk_msg.rpc_cred);
+ rpc_task_release_client(task);
if (task->tk_workqueue != NULL) {
INIT_WORK(&task->u.tk_work, rpc_async_release);
queue_work(task->tk_workqueue, &task->u.tk_work);
@@ -913,13 +879,6 @@ static void rpc_release_task(struct rpc_task *task)
{
dprintk("RPC: %5u release task\n", task->tk_pid);
- if (!list_empty(&task->tk_task)) {
- struct rpc_clnt *clnt = task->tk_client;
- /* Remove from client task list */
- spin_lock(&clnt->cl_lock);
- list_del(&task->tk_task);
- spin_unlock(&clnt->cl_lock);
- }
BUG_ON (RPC_IS_QUEUED(task));
/* Wake up anyone who is waiting for task completion */
@@ -928,35 +887,6 @@ static void rpc_release_task(struct rpc_task *task)
rpc_put_task(task);
}
-/*
- * Kill all tasks for the given client.
- * XXX: kill their descendants as well?
- */
-void rpc_killall_tasks(struct rpc_clnt *clnt)
-{
- struct rpc_task *rovr;
-
-
- if (list_empty(&clnt->cl_tasks))
- return;
- dprintk("RPC: killing all tasks for client %p\n", clnt);
- /*
- * Spin lock all_tasks to prevent changes...
- */
- spin_lock(&clnt->cl_lock);
- list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) {
- if (! RPC_IS_ACTIVATED(rovr))
- continue;
- if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
- rovr->tk_flags |= RPC_TASK_KILLED;
- rpc_exit(rovr, -EIO);
- rpc_wake_up_task(rovr);
- }
- }
- spin_unlock(&clnt->cl_lock);
-}
-EXPORT_SYMBOL_GPL(rpc_killall_tasks);
-
int rpciod_up(void)
{
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index f438347d817b..c0d085013a2b 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -33,21 +33,27 @@ init_sunrpc(void)
if (err)
goto out;
err = rpc_init_mempool();
- if (err) {
- unregister_rpc_pipefs();
- goto out;
- }
+ if (err)
+ goto out2;
+ err = rpcauth_init_module();
+ if (err)
+ goto out3;
#ifdef RPC_DEBUG
rpc_register_sysctl();
#endif
#ifdef CONFIG_PROC_FS
rpc_proc_init();
#endif
+ cache_initialize();
cache_register(&ip_map_cache);
cache_register(&unix_gid_cache);
svc_init_xprt_sock(); /* svc sock transport */
init_socket_xprt(); /* clnt sock transport */
- rpcauth_init_module();
+ return 0;
+out3:
+ rpc_destroy_mempool();
+out2:
+ unregister_rpc_pipefs();
out:
return err;
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index dcd0132396ba..970fb00f388c 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1032,6 +1032,8 @@ void xprt_release(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
if (req->rq_buffer)
xprt->ops->buf_free(req->rq_buffer);
+ if (req->rq_cred != NULL)
+ put_rpccred(req->rq_cred);
task->tk_rqstp = NULL;
if (req->rq_release_snd_buf)
req->rq_release_snd_buf(req);
@@ -1129,6 +1131,7 @@ static void xprt_destroy(struct kref *kref)
rpc_destroy_wait_queue(&xprt->sending);
rpc_destroy_wait_queue(&xprt->resend);
rpc_destroy_wait_queue(&xprt->backlog);
+ cancel_work_sync(&xprt->task_cleanup);
/*
* Tear down transport state and free the rpc_xprt
*/
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index e5e28d1946a4..2ac3f6e8adff 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -249,6 +249,8 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
req->rl_nchunks = nchunks;
BUG_ON(nchunks == 0);
+ BUG_ON((r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
+ && (nchunks > 3));
/*
* finish off header. If write, marshal discrim and nchunks.
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 27015c6d8eb5..5f4c7b3bc711 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -650,10 +650,22 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_attr.cap.max_send_wr = cdata->max_requests;
switch (ia->ri_memreg_strategy) {
case RPCRDMA_FRMR:
- /* Add room for frmr register and invalidate WRs */
- ep->rep_attr.cap.max_send_wr *= 3;
- if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr)
- return -EINVAL;
+ /* Add room for frmr register and invalidate WRs.
+ * 1. FRMR reg WR for head
+ * 2. FRMR invalidate WR for head
+ * 3. FRMR reg WR for pagelist
+ * 4. FRMR invalidate WR for pagelist
+ * 5. FRMR reg WR for tail
+ * 6. FRMR invalidate WR for tail
+ * 7. The RDMA_SEND WR
+ */
+ ep->rep_attr.cap.max_send_wr *= 7;
+ if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) {
+ cdata->max_requests = devattr.max_qp_wr / 7;
+ if (!cdata->max_requests)
+ return -EINVAL;
+ ep->rep_attr.cap.max_send_wr = cdata->max_requests * 7;
+ }
break;
case RPCRDMA_MEMWINDOWS_ASYNC:
case RPCRDMA_MEMWINDOWS:
@@ -1490,7 +1502,7 @@ rpcrdma_register_frmr_external(struct rpcrdma_mr_seg *seg,
memset(&frmr_wr, 0, sizeof frmr_wr);
frmr_wr.opcode = IB_WR_FAST_REG_MR;
frmr_wr.send_flags = 0; /* unsignaled */
- frmr_wr.wr.fast_reg.iova_start = (unsigned long)seg1->mr_dma;
+ frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma;
frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl;
frmr_wr.wr.fast_reg.page_list_len = i;
frmr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 7ca65c7005ea..fe9306bf10cc 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -800,7 +800,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
u32 _xid;
__be32 *xp;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
dprintk("RPC: xs_udp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk)))
goto out;
@@ -852,7 +852,7 @@ static void xs_udp_data_ready(struct sock *sk, int len)
dropit:
skb_free_datagram(sk, skb);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
@@ -1229,7 +1229,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
dprintk("RPC: xs_tcp_data_ready...\n");
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
if (xprt->shutdown)
@@ -1248,7 +1248,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
} while (read > 0);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/*
@@ -1301,18 +1301,19 @@ static void xs_tcp_state_change(struct sock *sk)
{
struct rpc_xprt *xprt;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
- dprintk("RPC: state %x conn %d dead %d zapped %d\n",
+ dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
sk->sk_state, xprt_connected(xprt),
sock_flag(sk, SOCK_DEAD),
- sock_flag(sk, SOCK_ZAPPED));
+ sock_flag(sk, SOCK_ZAPPED),
+ sk->sk_shutdown);
switch (sk->sk_state) {
case TCP_ESTABLISHED:
- spin_lock_bh(&xprt->transport_lock);
+ spin_lock(&xprt->transport_lock);
if (!xprt_test_and_set_connected(xprt)) {
struct sock_xprt *transport = container_of(xprt,
struct sock_xprt, xprt);
@@ -1326,7 +1327,7 @@ static void xs_tcp_state_change(struct sock *sk)
xprt_wake_pending_tasks(xprt, -EAGAIN);
}
- spin_unlock_bh(&xprt->transport_lock);
+ spin_unlock(&xprt->transport_lock);
break;
case TCP_FIN_WAIT1:
/* The client initiated a shutdown of the socket */
@@ -1364,7 +1365,7 @@ static void xs_tcp_state_change(struct sock *sk)
xs_sock_mark_closed(xprt);
}
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/**
@@ -1375,7 +1376,7 @@ static void xs_error_report(struct sock *sk)
{
struct rpc_xprt *xprt;
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
dprintk("RPC: %s client %p...\n"
@@ -1383,7 +1384,7 @@ static void xs_error_report(struct sock *sk)
__func__, xprt, sk->sk_err);
xprt_wake_pending_tasks(xprt, -EAGAIN);
out:
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_write_space(struct sock *sk)
@@ -1415,13 +1416,13 @@ static void xs_write_space(struct sock *sk)
*/
static void xs_udp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
/* from net/core/sock.c:sock_def_write_space */
if (sock_writeable(sk))
xs_write_space(sk);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
/**
@@ -1436,13 +1437,13 @@ static void xs_udp_write_space(struct sock *sk)
*/
static void xs_tcp_write_space(struct sock *sk)
{
- read_lock(&sk->sk_callback_lock);
+ read_lock_bh(&sk->sk_callback_lock);
/* from net/core/stream.c:sk_stream_write_space */
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
xs_write_space(sk);
- read_unlock(&sk->sk_callback_lock);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
@@ -1779,10 +1780,25 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt, struct sock_xprt *tra
{
unsigned int state = transport->inet->sk_state;
- if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED)
- return;
- if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT))
- return;
+ if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
+ /* we don't need to abort the connection if the socket
+ * hasn't undergone a shutdown
+ */
+ if (transport->inet->sk_shutdown == 0)
+ return;
+ dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
+ __func__, transport->inet->sk_shutdown);
+ }
+ if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
+ /* we don't need to abort the connection if the socket
+ * hasn't undergone a shutdown
+ */
+ if (transport->inet->sk_shutdown == 0)
+ return;
+ dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
+ "sk_shutdown set to %d\n",
+ __func__, transport->inet->sk_shutdown);
+ }
xs_abort_connection(xprt, transport);
}
@@ -2577,7 +2593,8 @@ void cleanup_socket_xprt(void)
xprt_unregister_transport(&xs_bc_tcp_transport);
}
-static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
+static int param_set_uint_minmax(const char *val,
+ const struct kernel_param *kp,
unsigned int min, unsigned int max)
{
unsigned long num;
@@ -2592,34 +2609,37 @@ static int param_set_uint_minmax(const char *val, struct kernel_param *kp,
return 0;
}
-static int param_set_portnr(const char *val, struct kernel_param *kp)
+static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp,
RPC_MIN_RESVPORT,
RPC_MAX_RESVPORT);
}
-static int param_get_portnr(char *buffer, struct kernel_param *kp)
-{
- return param_get_uint(buffer, kp);
-}
+static struct kernel_param_ops param_ops_portnr = {
+ .set = param_set_portnr,
+ .get = param_get_uint,
+};
+
#define param_check_portnr(name, p) \
__param_check(name, p, unsigned int);
module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
-static int param_set_slot_table_size(const char *val, struct kernel_param *kp)
+static int param_set_slot_table_size(const char *val,
+ const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp,
RPC_MIN_SLOT_TABLE,
RPC_MAX_SLOT_TABLE);
}
-static int param_get_slot_table_size(char *buffer, struct kernel_param *kp)
-{
- return param_get_uint(buffer, kp);
-}
+static struct kernel_param_ops param_ops_slot_table_size = {
+ .set = param_set_slot_table_size,
+ .get = param_get_uint,
+};
+
#define param_check_slot_table_size(name, p) \
__param_check(name, p, unsigned int);
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index c048543ffbeb..2ddc351b3be9 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -89,7 +89,7 @@ int tipc_addr_domain_valid(u32 addr)
int tipc_addr_node_valid(u32 addr)
{
- return (tipc_addr_domain_valid(addr) && tipc_node(addr));
+ return tipc_addr_domain_valid(addr) && tipc_node(addr);
}
int tipc_in_scope(u32 domain, u32 addr)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a008c6689305..ecfaac10d0b4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -143,6 +143,19 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
}
+static void bclink_set_last_sent(void)
+{
+ if (bcl->next_out)
+ bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
+ else
+ bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
+}
+
+u32 tipc_bclink_get_last_sent(void)
+{
+ return bcl->fsm_msg_cnt;
+}
+
/**
* bclink_set_gap - set gap according to contents of current deferred pkt queue
*
@@ -171,7 +184,7 @@ static void bclink_set_gap(struct tipc_node *n_ptr)
static int bclink_ack_allowed(u32 n)
{
- return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
+ return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
}
@@ -237,8 +250,10 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
/* Try resolving broadcast link congestion, if necessary */
- if (unlikely(bcl->next_out))
+ if (unlikely(bcl->next_out)) {
tipc_link_push_queue(bcl);
+ bclink_set_last_sent();
+ }
if (unlikely(released && !list_empty(&bcl->waiting_ports)))
tipc_link_wakeup_ports(bcl, 0);
spin_unlock_bh(&bc_lock);
@@ -395,7 +410,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
if (unlikely(res == -ELINKCONG))
buf_discard(buf);
else
- bcl->stats.sent_info++;
+ bclink_set_last_sent();
if (bcl->out_queue_size > bcl->stats.max_queue_sz)
bcl->stats.max_queue_sz = bcl->out_queue_size;
@@ -529,15 +544,6 @@ receive:
tipc_node_unlock(node);
}
-u32 tipc_bclink_get_last_sent(void)
-{
- u32 last_sent = mod(bcl->next_out_no - 1);
-
- if (bcl->next_out)
- last_sent = mod(buf_seqno(bcl->next_out) - 1);
- return last_sent;
-}
-
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
{
return (n_ptr->bclink.supported &&
@@ -570,6 +576,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
msg = buf_msg(buf);
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tipc_net_id);
+ bcl->stats.sent_info++;
}
/* Send buffer over bearers until all targets reached */
@@ -609,11 +616,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
bcbearer->remains = bcbearer->remains_new;
}
- /* Unable to reach all targets */
+ /*
+ * Unable to reach all targets (indicate success, since currently
+ * there isn't code in place to properly block & unblock the
+ * pseudo-bearer used by the broadcast link)
+ */
- bcbearer->bearer.publ.blocked = 1;
- bcl->stats.bearer_congs++;
- return 1;
+ return TIPC_OK;
}
/**
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 52ae17b2583e..9c10c6b7c12b 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -63,7 +63,7 @@ static int media_name_valid(const char *name)
len = strlen(name);
if ((len + 1) > TIPC_MAX_MEDIA_NAME)
return 0;
- return (strspn(name, tipc_alphabet) == len);
+ return strspn(name, tipc_alphabet) == len;
}
/**
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 696468117985..466b861dab91 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -169,6 +169,7 @@ void tipc_core_stop(void)
tipc_nametbl_stop();
tipc_ref_table_stop();
tipc_socket_stop();
+ tipc_log_resize(0);
}
/**
@@ -203,7 +204,9 @@ static int __init tipc_init(void)
{
int res;
- tipc_log_resize(CONFIG_TIPC_LOG);
+ if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
+ warn("Unable to create log buffer\n");
+
info("Activated (version " TIPC_MOD_VER
" compiled " __DATE__ " " __TIME__ ")\n");
@@ -230,7 +233,6 @@ static void __exit tipc_exit(void)
tipc_core_stop_net();
tipc_core_stop();
info("Deactivated\n");
- tipc_log_resize(0);
}
module_init(tipc_init);
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 1885a7edb0c8..6569d45bfb9a 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -134,7 +134,7 @@ void tipc_printbuf_reset(struct print_buf *pb)
int tipc_printbuf_empty(struct print_buf *pb)
{
- return (!pb->buf || (pb->crs == pb->buf));
+ return !pb->buf || (pb->crs == pb->buf);
}
/**
@@ -169,7 +169,7 @@ int tipc_printbuf_validate(struct print_buf *pb)
tipc_printf(pb, err);
}
}
- return (pb->crs - pb->buf + 1);
+ return pb->crs - pb->buf + 1;
}
/**
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index fc1fcf5e6b53..f28d1ae93125 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -203,6 +203,14 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
return;
}
spin_lock_bh(&n_ptr->lock);
+
+ /* Don't talk to neighbor during cleanup after last session */
+
+ if (n_ptr->cleanup_required) {
+ spin_unlock_bh(&n_ptr->lock);
+ return;
+ }
+
link = n_ptr->links[b_ptr->identity];
if (!link) {
dbg("creating link\n");
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 6230d16020c4..6e988ba485fd 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -72,17 +72,26 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
{
struct sk_buff *clone;
struct net_device *dev;
+ int delta;
clone = skb_clone(buf, GFP_ATOMIC);
- if (clone) {
- skb_reset_network_header(clone);
- dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
- clone->dev = dev;
- dev_hard_header(clone, dev, ETH_P_TIPC,
- &dest->dev_addr.eth_addr,
- dev->dev_addr, clone->len);
- dev_queue_xmit(clone);
+ if (!clone)
+ return 0;
+
+ dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+ delta = dev->hard_header_len - skb_headroom(buf);
+
+ if ((delta > 0) &&
+ pskb_expand_head(clone, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC)) {
+ kfree_skb(clone);
+ return 0;
}
+
+ skb_reset_network_header(clone);
+ clone->dev = dev;
+ dev_hard_header(clone, dev, ETH_P_TIPC, &dest->dev_addr.eth_addr,
+ dev->dev_addr, clone->len);
+ dev_queue_xmit(clone);
return 0;
}
@@ -92,15 +101,12 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
* Accept only packets explicitly sent to this node, or broadcast packets;
* ignores packets sent using Ethernet multicast, and traffic sent to other
* nodes (which can happen if interface is running in promiscuous mode).
- * Routine truncates any Ethernet padding/CRC appended to the message,
- * and ensures message size matches actual length
*/
static int recv_msg(struct sk_buff *buf, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
- u32 size;
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(buf);
@@ -109,13 +115,9 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
if (likely(eb_ptr->bearer)) {
if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
- size = msg_size((struct tipc_msg *)buf->data);
- skb_trim(buf, size);
- if (likely(buf->len == size)) {
- buf->next = NULL;
- tipc_recv_msg(buf, eb_ptr->bearer);
- return 0;
- }
+ buf->next = NULL;
+ tipc_recv_msg(buf, eb_ptr->bearer);
+ return 0;
}
}
kfree_skb(buf);
@@ -133,6 +135,16 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
struct eth_bearer *eb_ptr = &eth_bearers[0];
struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
+ int pending_dev = 0;
+
+ /* Find unused Ethernet bearer structure */
+
+ while (eb_ptr->dev) {
+ if (!eb_ptr->bearer)
+ pending_dev++;
+ if (++eb_ptr == stop)
+ return pending_dev ? -EAGAIN : -EDQUOT;
+ }
/* Find device with specified name */
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a3616b99529b..b8cf1e9d0b86 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -239,13 +239,13 @@ int tipc_link_is_up(struct link *l_ptr)
{
if (!l_ptr)
return 0;
- return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
+ return link_working_working(l_ptr) || link_working_unknown(l_ptr);
}
int tipc_link_is_active(struct link *l_ptr)
{
- return ((l_ptr->owner->active_links[0] == l_ptr) ||
- (l_ptr->owner->active_links[1] == l_ptr));
+ return (l_ptr->owner->active_links[0] == l_ptr) ||
+ (l_ptr->owner->active_links[1] == l_ptr);
}
/**
@@ -1802,6 +1802,15 @@ static int link_recv_buf_validate(struct sk_buff *buf)
return pskb_may_pull(buf, hdr_size);
}
+/**
+ * tipc_recv_msg - process TIPC messages arriving from off-node
+ * @head: pointer to message buffer chain
+ * @tb_ptr: pointer to bearer message arrived on
+ *
+ * Invoked with no locks held. Bearer pointer must point to a valid bearer
+ * structure (i.e. cannot be NULL), but bearer can be inactive.
+ */
+
void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
{
read_lock_bh(&tipc_net_lock);
@@ -1819,6 +1828,11 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
head = head->next;
+ /* Ensure bearer is still enabled */
+
+ if (unlikely(!b_ptr->active))
+ goto cont;
+
/* Ensure message is well-formed */
if (unlikely(!link_recv_buf_validate(buf)))
@@ -1855,13 +1869,22 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
goto cont;
}
- /* Locate unicast link endpoint that should handle message */
+ /* Locate neighboring node that sent message */
n_ptr = tipc_node_find(msg_prevnode(msg));
if (unlikely(!n_ptr))
goto cont;
tipc_node_lock(n_ptr);
+ /* Don't talk to neighbor during cleanup after last session */
+
+ if (n_ptr->cleanup_required) {
+ tipc_node_unlock(n_ptr);
+ goto cont;
+ }
+
+ /* Locate unicast link endpoint that should handle message */
+
l_ptr = n_ptr->links[b_ptr->identity];
if (unlikely(!l_ptr)) {
tipc_node_unlock(n_ptr);
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 2e5385c47d30..26151d30589d 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -279,12 +279,12 @@ static inline int between(u32 lower, u32 upper, u32 n)
static inline int less_eq(u32 left, u32 right)
{
- return (mod(right - left) < 32768u);
+ return mod(right - left) < 32768u;
}
static inline int less(u32 left, u32 right)
{
- return (less_eq(left, right) && (mod(right) != mod(left)));
+ return less_eq(left, right) && (mod(right) != mod(left));
}
static inline u32 lesser(u32 left, u32 right)
@@ -299,32 +299,32 @@ static inline u32 lesser(u32 left, u32 right)
static inline int link_working_working(struct link *l_ptr)
{
- return (l_ptr->state == WORKING_WORKING);
+ return l_ptr->state == WORKING_WORKING;
}
static inline int link_working_unknown(struct link *l_ptr)
{
- return (l_ptr->state == WORKING_UNKNOWN);
+ return l_ptr->state == WORKING_UNKNOWN;
}
static inline int link_reset_unknown(struct link *l_ptr)
{
- return (l_ptr->state == RESET_UNKNOWN);
+ return l_ptr->state == RESET_UNKNOWN;
}
static inline int link_reset_reset(struct link *l_ptr)
{
- return (l_ptr->state == RESET_RESET);
+ return l_ptr->state == RESET_RESET;
}
static inline int link_blocked(struct link *l_ptr)
{
- return (l_ptr->exp_msg_count || l_ptr->blocked);
+ return l_ptr->exp_msg_count || l_ptr->blocked;
}
static inline int link_congested(struct link *l_ptr)
{
- return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
+ return l_ptr->out_queue_size >= l_ptr->queue_limit[0];
}
#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 995d2da35b01..031aad18efce 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -104,7 +104,7 @@ static inline u32 msg_user(struct tipc_msg *m)
static inline u32 msg_isdata(struct tipc_msg *m)
{
- return (msg_user(m) <= TIPC_CRITICAL_IMPORTANCE);
+ return msg_user(m) <= TIPC_CRITICAL_IMPORTANCE;
}
static inline void msg_set_user(struct tipc_msg *m, u32 n)
@@ -289,7 +289,7 @@ static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
static inline int msg_is_dest(struct tipc_msg *m, u32 d)
{
- return(msg_short(m) || (msg_destnode(m) == d));
+ return msg_short(m) || (msg_destnode(m) == d);
}
static inline u32 msg_routed(struct tipc_msg *m)
@@ -632,7 +632,7 @@ static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
static inline u32 msg_max_pkt(struct tipc_msg *m)
{
- return (msg_bits(m, 9, 16, 0xffff) * 4);
+ return msg_bits(m, 9, 16, 0xffff) * 4;
}
static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 8ba79620db3f..9ca4b0689237 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -116,7 +116,7 @@ DEFINE_RWLOCK(tipc_nametbl_lock);
static int hash(int x)
{
- return(x & (tipc_nametbl_size - 1));
+ return x & (tipc_nametbl_size - 1);
}
/**
@@ -613,8 +613,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
}
/*
- * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid.
- * Very time-critical.
+ * tipc_nametbl_translate - translate name to port id
*
* Note: on entry 'destnode' is the search domain used during translation;
* on exit it passes back the node address of the matching port (if any)
@@ -685,7 +684,6 @@ found:
}
spin_unlock_bh(&seq->lock);
not_found:
- *destnode = 0;
read_unlock_bh(&tipc_nametbl_lock);
return 0;
}
@@ -877,7 +875,7 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
u32 index)
{
char portIdStr[27];
- char *scopeStr;
+ const char *scope_str[] = {"", " zone", " cluster", " node"};
struct publication *publ = sseq->zone_list;
tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
@@ -893,15 +891,8 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
tipc_node(publ->node), publ->ref);
tipc_printf(buf, "%-26s ", portIdStr);
if (depth > 3) {
- if (publ->node != tipc_own_addr)
- scopeStr = "";
- else if (publ->scope == TIPC_NODE_SCOPE)
- scopeStr = "node";
- else if (publ->scope == TIPC_CLUSTER_SCOPE)
- scopeStr = "cluster";
- else
- scopeStr = "zone";
- tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
+ tipc_printf(buf, "%-10u %s", publ->key,
+ scope_str[publ->scope]);
}
publ = publ->zone_list_next;
@@ -951,24 +942,19 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
static void nametbl_header(struct print_buf *buf, u32 depth)
{
- tipc_printf(buf, "Type ");
-
- if (depth > 1)
- tipc_printf(buf, "Lower Upper ");
- if (depth > 2)
- tipc_printf(buf, "Port Identity ");
- if (depth > 3)
- tipc_printf(buf, "Publication");
-
- tipc_printf(buf, "\n-----------");
-
- if (depth > 1)
- tipc_printf(buf, "--------------------- ");
- if (depth > 2)
- tipc_printf(buf, "-------------------------- ");
- if (depth > 3)
- tipc_printf(buf, "------------------");
-
+ const char *header[] = {
+ "Type ",
+ "Lower Upper ",
+ "Port Identity ",
+ "Publication Scope"
+ };
+
+ int i;
+
+ if (depth > 4)
+ depth = 4;
+ for (i = 0; i < depth; i++)
+ tipc_printf(buf, header[i]);
tipc_printf(buf, "\n");
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f61b7694138b..7e05af47a196 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -248,6 +248,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
/* Handle message for another node */
msg_dbg(msg, "NET>SEND>: ");
+ skb_trim(buf, msg_size(msg));
tipc_link_send(buf, dnode, msg_link_selector(msg));
}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b634942caba5..7c49cd056df7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -237,23 +237,22 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct link *l_ptr)
int tipc_node_has_active_links(struct tipc_node *n_ptr)
{
- return (n_ptr &&
- ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
+ return n_ptr->active_links[0] != NULL;
}
int tipc_node_has_redundant_links(struct tipc_node *n_ptr)
{
- return (n_ptr->working_links > 1);
+ return n_ptr->working_links > 1;
}
static int tipc_node_has_active_routes(struct tipc_node *n_ptr)
{
- return (n_ptr && (n_ptr->last_router >= 0));
+ return n_ptr && (n_ptr->last_router >= 0);
}
int tipc_node_is_up(struct tipc_node *n_ptr)
{
- return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
+ return tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr);
}
struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
@@ -384,6 +383,20 @@ static void node_established_contact(struct tipc_node *n_ptr)
tipc_highest_allowed_slave);
}
+static void node_cleanup_finished(unsigned long node_addr)
+{
+ struct tipc_node *n_ptr;
+
+ read_lock_bh(&tipc_net_lock);
+ n_ptr = tipc_node_find(node_addr);
+ if (n_ptr) {
+ tipc_node_lock(n_ptr);
+ n_ptr->cleanup_required = 0;
+ tipc_node_unlock(n_ptr);
+ }
+ read_unlock_bh(&tipc_net_lock);
+}
+
static void node_lost_contact(struct tipc_node *n_ptr)
{
struct cluster *c_ptr;
@@ -458,6 +471,11 @@ static void node_lost_contact(struct tipc_node *n_ptr)
tipc_k_signal((Handler)ns->handle_node_down,
(unsigned long)ns->usr_handle);
}
+
+ /* Prevent re-contact with node until all cleanup is done */
+
+ n_ptr->cleanup_required = 1;
+ tipc_k_signal((Handler)node_cleanup_finished, n_ptr->addr);
}
/**
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 6f990da5d143..45f3db3a595d 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -52,6 +52,7 @@
* @active_links: pointers to active links to node
* @links: pointers to all links to node
* @working_links: number of working links to node (both active and standby)
+ * @cleanup_required: non-zero if cleaning up after a prior loss of contact
* @link_cnt: number of links to node
* @permit_changeover: non-zero if node has redundant links to this system
* @routers: bitmap (used for multicluster communication)
@@ -78,6 +79,7 @@ struct tipc_node {
struct link *links[MAX_BEARERS];
int link_cnt;
int working_links;
+ int cleanup_required;
int permit_changeover;
u32 routers[512/32];
int last_router;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 0737680e9266..d760336f2ca8 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -588,19 +588,10 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
if (!p_ptr) {
err = TIPC_ERR_NO_PORT;
} else if (p_ptr->publ.connected) {
- if (port_peernode(p_ptr) != msg_orignode(msg))
+ if ((port_peernode(p_ptr) != msg_orignode(msg)) ||
+ (port_peerport(p_ptr) != msg_origport(msg))) {
err = TIPC_ERR_NO_PORT;
- if (port_peerport(p_ptr) != msg_origport(msg))
- err = TIPC_ERR_NO_PORT;
- if (!err && msg_routed(msg)) {
- u32 seqno = msg_transp_seqno(msg);
- u32 myno = ++p_ptr->last_in_seqno;
- if (seqno != myno) {
- err = TIPC_ERR_NO_PORT;
- abort_buf = port_build_self_abort_msg(p_ptr, err);
- }
- }
- if (msg_type(msg) == CONN_ACK) {
+ } else if (msg_type(msg) == CONN_ACK) {
int wakeup = tipc_port_congested(p_ptr) &&
p_ptr->publ.congested &&
p_ptr->wakeup;
@@ -1473,7 +1464,7 @@ int tipc_forward2name(u32 ref,
msg_set_destnode(msg, destnode);
msg_set_destport(msg, destport);
- if (likely(destport || destnode)) {
+ if (likely(destport)) {
p_ptr->sent++;
if (likely(destnode == tipc_own_addr))
return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
@@ -1551,7 +1542,7 @@ int tipc_forward_buf2name(u32 ref,
skb_push(buf, LONG_H_SIZE);
skb_copy_to_linear_data(buf, msg, LONG_H_SIZE);
msg_dbg(buf_msg(buf),"PREP:");
- if (likely(destport || destnode)) {
+ if (likely(destport)) {
p_ptr->sent++;
if (destnode == tipc_own_addr)
return tipc_port_recv_msg(buf);
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 8d1652aab298..e74bd9563739 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -157,7 +157,7 @@ static inline u32 tipc_peer_node(struct port *p_ptr)
static inline int tipc_port_congested(struct port *p_ptr)
{
- return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
+ return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
}
/**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 66e889ba48fd..33217fc3d697 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -64,6 +64,7 @@ struct tipc_sock {
struct sock sk;
struct tipc_port *p;
struct tipc_portid peer_name;
+ long conn_timeout;
};
#define tipc_sk(sk) ((struct tipc_sock *)(sk))
@@ -240,9 +241,9 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
sock->state = state;
sock_init_data(sock, sk);
- sk->sk_rcvtimeo = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
sk->sk_backlog_rcv = backlog_rcv;
tipc_sk(sk)->p = tp_ptr;
+ tipc_sk(sk)->conn_timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
spin_unlock_bh(tp_ptr->lock);
@@ -429,36 +430,55 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
* to handle any preventable race conditions, so TIPC will do the same ...
*
* TIPC sets the returned events as follows:
- * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
- * or if a connection-oriented socket is does not have an active connection
- * (i.e. a read operation will not block).
- * b) POLLOUT is set except when a socket's connection has been terminated
- * (i.e. a write operation will not block).
- * c) POLLHUP is set when a socket's connection has been terminated.
- *
- * IMPORTANT: The fact that a read or write operation will not block does NOT
- * imply that the operation will succeed!
+ *
+ * socket state flags set
+ * ------------ ---------
+ * unconnected no read flags
+ * no write flags
+ *
+ * connecting POLLIN/POLLRDNORM if ACK/NACK in rx queue
+ * no write flags
+ *
+ * connected POLLIN/POLLRDNORM if data in rx queue
+ * POLLOUT if port is not congested
+ *
+ * disconnecting POLLIN/POLLRDNORM/POLLHUP
+ * no write flags
+ *
+ * listening POLLIN if SYN in rx queue
+ * no write flags
+ *
+ * ready POLLIN/POLLRDNORM if data in rx queue
+ * [connectionless] POLLOUT (since port cannot be congested)
+ *
+ * IMPORTANT: The fact that a read or write operation is indicated does NOT
+ * imply that the operation will succeed, merely that it should be performed
+ * and will not block.
*/
static unsigned int poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
- u32 mask;
+ u32 mask = 0;
poll_wait(file, sk_sleep(sk), wait);
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sock->state == SS_UNCONNECTED) ||
- (sock->state == SS_DISCONNECTING))
- mask = (POLLRDNORM | POLLIN);
- else
- mask = 0;
-
- if (sock->state == SS_DISCONNECTING)
- mask |= POLLHUP;
- else
- mask |= POLLOUT;
+ switch ((int)sock->state) {
+ case SS_READY:
+ case SS_CONNECTED:
+ if (!tipc_sk_port(sk)->congested)
+ mask |= POLLOUT;
+ /* fall thru' */
+ case SS_CONNECTING:
+ case SS_LISTENING:
+ if (!skb_queue_empty(&sk->sk_receive_queue))
+ mask |= (POLLIN | POLLRDNORM);
+ break;
+ case SS_DISCONNECTING:
+ mask = (POLLIN | POLLRDNORM | POLLHUP);
+ break;
+ }
return mask;
}
@@ -1026,9 +1046,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
struct sk_buff *buf;
struct tipc_msg *msg;
unsigned int sz;
- int sz_to_copy;
+ int sz_to_copy, target, needed;
int sz_copied = 0;
- int needed;
char __user *crs = m->msg_iov->iov_base;
unsigned char *buf_crs;
u32 err;
@@ -1050,6 +1069,8 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
goto exit;
}
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
+
restart:
/* Look for a message in receive queue; wait if necessary */
@@ -1138,7 +1159,7 @@ restart:
if ((sz_copied < buf_len) && /* didn't get all requested data */
(!skb_queue_empty(&sk->sk_receive_queue) ||
- (flags & MSG_WAITALL)) && /* and more is ready or required */
+ (sz_copied < target)) && /* and more is ready or required */
(!(flags & MSG_PEEK)) && /* and aren't just peeking at data */
(!err)) /* and haven't reached a FIN */
goto restart;
@@ -1174,7 +1195,7 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
if (msg_connected(msg))
threshold *= 4;
- return (queue_size >= threshold);
+ return queue_size >= threshold;
}
/**
@@ -1365,6 +1386,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
struct msghdr m = {NULL,};
struct sk_buff *buf;
struct tipc_msg *msg;
+ long timeout;
int res;
lock_sock(sk);
@@ -1379,7 +1401,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
/* For now, TIPC does not support the non-blocking form of connect() */
if (flags & O_NONBLOCK) {
- res = -EWOULDBLOCK;
+ res = -EOPNOTSUPP;
goto exit;
}
@@ -1425,11 +1447,12 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
/* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
+ timeout = tipc_sk(sk)->conn_timeout;
release_sock(sk);
res = wait_event_interruptible_timeout(*sk_sleep(sk),
(!skb_queue_empty(&sk->sk_receive_queue) ||
(sock->state != SS_CONNECTING)),
- sk->sk_rcvtimeo);
+ timeout ? timeout : MAX_SCHEDULE_TIMEOUT);
lock_sock(sk);
if (res > 0) {
@@ -1692,7 +1715,7 @@ static int setsockopt(struct socket *sock,
res = tipc_set_portunreturnable(tport->ref, value);
break;
case TIPC_CONN_TIMEOUT:
- sk->sk_rcvtimeo = msecs_to_jiffies(value);
+ tipc_sk(sk)->conn_timeout = msecs_to_jiffies(value);
/* no need to set "res", since already 0 at this point */
break;
default:
@@ -1747,7 +1770,7 @@ static int getsockopt(struct socket *sock,
res = tipc_portunreturnable(tport->ref, &value);
break;
case TIPC_CONN_TIMEOUT:
- value = jiffies_to_msecs(sk->sk_rcvtimeo);
+ value = jiffies_to_msecs(tipc_sk(sk)->conn_timeout);
/* no need to set "res", since already 0 at this point */
break;
case TIPC_NODE_RECVQ_DEPTH:
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ab6eab4c45e2..1a5b9a6bd128 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -604,6 +604,6 @@ int tipc_ispublished(struct tipc_name const *name)
{
u32 domain = 0;
- return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0);
+ return tipc_nametbl_translate(name->type, name->instance, &domain) != 0;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 4414a18c63b4..0ebc777a6660 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -692,6 +692,7 @@ static int unix_autobind(struct socket *sock)
static u32 ordernum = 1;
struct unix_address *addr;
int err;
+ unsigned int retries = 0;
mutex_lock(&u->readlock);
@@ -717,9 +718,17 @@ retry:
if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
addr->hash)) {
spin_unlock(&unix_table_lock);
- /* Sanity yield. It is unusual case, but yet... */
- if (!(ordernum&0xFF))
- yield();
+ /*
+ * __unix_find_socket_byname() may take long time if many names
+ * are already in use.
+ */
+ cond_resched();
+ /* Give up if all names seems to be in use. */
+ if (retries++ == 0xFFFFF) {
+ err = -ENOSPC;
+ kfree(addr);
+ goto out;
+ }
goto retry;
}
addr->hash ^= sk->sk_type;
@@ -1502,6 +1511,8 @@ restart:
goto restart;
}
+ if (sock_flag(other, SOCK_RCVTSTAMP))
+ __net_timestamp(skb);
skb_queue_tail(&other->sk_receive_queue, skb);
unix_state_unlock(other);
other->sk_data_ready(other, len);
@@ -1713,6 +1724,9 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
if (err)
goto out_free;
+ if (sock_flag(sk, SOCK_RCVTSTAMP))
+ __sock_recv_timestamp(msg, sk, skb);
+
if (!siocb->scm) {
siocb->scm = &tmp_scm;
memset(&tmp_scm, 0, sizeof(tmp_scm));
@@ -2024,11 +2038,10 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
- mask |= POLLRDHUP;
+ mask |= POLLRDHUP | POLLIN | POLLRDNORM;
/* readable? */
- if (!skb_queue_empty(&sk->sk_receive_queue) ||
- (sk->sk_shutdown & RCV_SHUTDOWN))
+ if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 2d1d4c70113c..6583cca0e2ee 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -86,7 +86,7 @@ struct cfg80211_registered_device *wiphy_to_dev(struct wiphy *wiphy)
static inline
bool wiphy_idx_valid(int wiphy_idx)
{
- return (wiphy_idx >= 0);
+ return wiphy_idx >= 0;
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5e86d4e97dce..f7af98dff409 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -507,14 +507,14 @@ static int x25_listen(struct socket *sock, int backlog)
struct sock *sk = sock->sk;
int rc = -EOPNOTSUPP;
- lock_kernel();
+ lock_sock(sk);
if (sk->sk_state != TCP_LISTEN) {
memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
rc = 0;
}
- unlock_kernel();
+ release_sock(sk);
return rc;
}
@@ -688,7 +688,6 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
int len, i, rc = 0;
- lock_kernel();
if (!sock_flag(sk, SOCK_ZAPPED) ||
addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25) {
@@ -704,12 +703,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
}
+ lock_sock(sk);
x25_sk(sk)->source_addr = addr->sx25_addr;
x25_insert_socket(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
+ release_sock(sk);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
out:
- unlock_kernel();
return rc;
}
@@ -751,7 +751,6 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
struct x25_route *rt;
int rc = 0;
- lock_kernel();
lock_sock(sk);
if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) {
sock->state = SS_CONNECTED;
@@ -829,7 +828,6 @@ out_put_route:
x25_route_put(rt);
out:
release_sock(sk);
- unlock_kernel();
return rc;
}
@@ -869,8 +867,7 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
struct sk_buff *skb;
int rc = -EINVAL;
- lock_kernel();
- if (!sk || sk->sk_state != TCP_LISTEN)
+ if (!sk)
goto out;
rc = -EOPNOTSUPP;
@@ -878,6 +875,10 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
goto out;
lock_sock(sk);
+ rc = -EINVAL;
+ if (sk->sk_state != TCP_LISTEN)
+ goto out2;
+
rc = x25_wait_for_data(sk, sk->sk_rcvtimeo);
if (rc)
goto out2;
@@ -897,7 +898,6 @@ static int x25_accept(struct socket *sock, struct socket *newsock, int flags)
out2:
release_sock(sk);
out:
- unlock_kernel();
return rc;
}
@@ -909,7 +909,6 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
struct x25_sock *x25 = x25_sk(sk);
int rc = 0;
- lock_kernel();
if (peer) {
if (sk->sk_state != TCP_ESTABLISHED) {
rc = -ENOTCONN;
@@ -923,19 +922,6 @@ static int x25_getname(struct socket *sock, struct sockaddr *uaddr,
*uaddr_len = sizeof(*sx25);
out:
- unlock_kernel();
- return rc;
-}
-
-static unsigned int x25_datagram_poll(struct file *file, struct socket *sock,
- poll_table *wait)
-{
- int rc;
-
- lock_kernel();
- rc = datagram_poll(file, sock, wait);
- unlock_kernel();
-
return rc;
}
@@ -1746,7 +1732,7 @@ static const struct proto_ops x25_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = x25_accept,
.getname = x25_getname,
- .poll = x25_datagram_poll,
+ .poll = datagram_poll,
.ioctl = x25_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_x25_ioctl,
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index a3cca0a94346..64f2ae1fdc15 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -101,7 +101,7 @@ resume:
err = -EHOSTUNREACH;
goto error_nolock;
}
- skb_dst_set_noref(skb, dst);
+ skb_dst_set(skb, dst_clone(dst));
x = dst->xfrm;
} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 2b3ed7ad4933..cbab6e1a8c9c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1175,9 +1175,8 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
tmpl->mode == XFRM_MODE_BEET) {
remote = &tmpl->id.daddr;
local = &tmpl->saddr;
- family = tmpl->encap_family;
- if (xfrm_addr_any(local, family)) {
- error = xfrm_get_saddr(net, &tmp, remote, family);
+ if (xfrm_addr_any(local, tmpl->encap_family)) {
+ error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family);
if (error)
goto fail;
local = &tmp;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 5208b12fbfb4..eb96ce52f178 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -656,15 +656,23 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
EXPORT_SYMBOL(xfrm_sad_getinfo);
static int
-xfrm_init_tempsel(struct xfrm_state *x, struct flowi *fl,
- struct xfrm_tmpl *tmpl,
- xfrm_address_t *daddr, xfrm_address_t *saddr,
- unsigned short family)
+xfrm_init_tempstate(struct xfrm_state *x, struct flowi *fl,
+ struct xfrm_tmpl *tmpl,
+ xfrm_address_t *daddr, xfrm_address_t *saddr,
+ unsigned short family)
{
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
if (!afinfo)
return -1;
- afinfo->init_tempsel(x, fl, tmpl, daddr, saddr);
+ afinfo->init_tempsel(&x->sel, fl);
+
+ if (family != tmpl->encap_family) {
+ xfrm_state_put_afinfo(afinfo);
+ afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
+ if (!afinfo)
+ return -1;
+ }
+ afinfo->init_temprop(x, tmpl, daddr, saddr);
xfrm_state_put_afinfo(afinfo);
return 0;
}
@@ -790,37 +798,38 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
int error = 0;
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
+ unsigned short encap_family = tmpl->encap_family;
to_put = NULL;
spin_lock_bh(&xfrm_state_lock);
- h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, family);
+ h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h, bydst) {
- if (x->props.family == family &&
+ if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
- xfrm_state_addr_check(x, daddr, saddr, family) &&
+ xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
&best, &acquire_in_progress, &error);
}
if (best)
goto found;
- h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, family);
+ h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
hlist_for_each_entry(x, entry, net->xfrm.state_bydst+h_wildcard, bydst) {
- if (x->props.family == family &&
+ if (x->props.family == encap_family &&
x->props.reqid == tmpl->reqid &&
(mark & x->mark.m) == x->mark.v &&
!(x->props.flags & XFRM_STATE_WILDRECV) &&
- xfrm_state_addr_check(x, daddr, saddr, family) &&
+ xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
tmpl->mode == x->props.mode &&
tmpl->id.proto == x->id.proto &&
(tmpl->id.spi == x->id.spi || !tmpl->id.spi))
- xfrm_state_look_at(pol, x, fl, family, daddr, saddr,
+ xfrm_state_look_at(pol, x, fl, encap_family, daddr, saddr,
&best, &acquire_in_progress, &error);
}
@@ -829,7 +838,7 @@ found:
if (!x && !error && !acquire_in_progress) {
if (tmpl->id.spi &&
(x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
- tmpl->id.proto, family)) != NULL) {
+ tmpl->id.proto, encap_family)) != NULL) {
to_put = x0;
error = -EEXIST;
goto out;
@@ -839,9 +848,9 @@ found:
error = -ENOMEM;
goto out;
}
- /* Initialize temporary selector matching only
+ /* Initialize temporary state matching only
* to current session. */
- xfrm_init_tempsel(x, fl, tmpl, daddr, saddr, family);
+ xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
memcpy(&x->mark, &pol->mark, sizeof(x->mark));
error = security_xfrm_state_alloc_acquire(x, pol->security, fl->secid);
@@ -856,10 +865,10 @@ found:
x->km.state = XFRM_STATE_ACQ;
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head(&x->bydst, net->xfrm.state_bydst+h);
- h = xfrm_src_hash(net, daddr, saddr, family);
+ h = xfrm_src_hash(net, daddr, saddr, encap_family);
hlist_add_head(&x->bysrc, net->xfrm.state_bysrc+h);
if (x->id.spi) {
- h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, family);
+ h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
hlist_add_head(&x->byspi, net->xfrm.state_byspi+h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index ba59983aaffe..8bae6b22c846 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1801,7 +1801,7 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
struct xfrm_user_expire *ue = nlmsg_data(nlh);
struct xfrm_usersa_info *p = &ue->state;
struct xfrm_mark m;
- u32 mark = xfrm_mark_get(attrs, &m);;
+ u32 mark = xfrm_mark_get(attrs, &m);
x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
@@ -2504,7 +2504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
if (p->dir > XFRM_POLICY_OUT)
return NULL;
- xp = xfrm_policy_alloc(net, GFP_KERNEL);
+ xp = xfrm_policy_alloc(net, GFP_ATOMIC);
if (xp == NULL) {
*dir = -ENOBUFS;
return NULL;