summaryrefslogtreecommitdiff
path: root/drivers/infiniband/sw/rxe/rxe_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_net.c')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c267
1 files changed, 174 insertions, 93 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index dec92928a1cd..0195d361e5e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -20,31 +20,58 @@
static struct rxe_recv_sockets recv_sockets;
-int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
-{
- int err;
- unsigned char ll_addr[ETH_ALEN];
-
- ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_add(rxe->ndev, ll_addr);
-
- return err;
-}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+/*
+ * lockdep can detect false positive circular dependencies
+ * when there are user-space socket API users or in kernel
+ * users switching between a tcp and rdma transport.
+ * Maybe also switching between siw and rxe may cause
+ * problems as per default sockets are only classified
+ * by family and not by ip protocol. And there might
+ * be different locks used between the application
+ * and the low level sockets.
+ *
+ * Problems were seen with ksmbd.ko and cifs.ko,
+ * switching transports, use git blame to find
+ * more details.
+ */
+static struct lock_class_key rxe_recv_sk_key[2];
+static struct lock_class_key rxe_recv_slock_key[2];
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
-int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
+static inline void rxe_reclassify_recv_socket(struct socket *sock)
{
- int err;
- unsigned char ll_addr[ETH_ALEN];
-
- ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_del(rxe->ndev, ll_addr);
-
- return err;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct sock *sk = sock->sk;
+
+ if (WARN_ON_ONCE(!sock_allow_reclassification(sk)))
+ return;
+
+ switch (sk->sk_family) {
+ case AF_INET:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET-RDMA-RXE-RECV",
+ &rxe_recv_slock_key[0],
+ "sk_lock-AF_INET-RDMA-RXE-RECV",
+ &rxe_recv_sk_key[0]);
+ break;
+ case AF_INET6:
+ sock_lock_init_class_and_name(sk,
+ "slock-AF_INET6-RDMA-RXE-RECV",
+ &rxe_recv_slock_key[1],
+ "sk_lock-AF_INET6-RDMA-RXE-RECV",
+ &rxe_recv_sk_key[1]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+#endif /* CONFIG_DEBUG_LOCK_ALLOC */
}
-static struct dst_entry *rxe_find_route4(struct net_device *ndev,
- struct in_addr *saddr,
- struct in_addr *daddr)
+static struct dst_entry *rxe_find_route4(struct rxe_qp *qp,
+ struct net_device *ndev,
+ struct in_addr *saddr,
+ struct in_addr *daddr)
{
struct rtable *rt;
struct flowi4 fl = { { 0 } };
@@ -57,7 +84,7 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev,
rt = ip_route_output_key(&init_net, &fl);
if (IS_ERR(rt)) {
- pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
+ rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr);
return NULL;
}
@@ -65,7 +92,8 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev,
}
#if IS_ENABLED(CONFIG_IPV6)
-static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+static struct dst_entry *rxe_find_route6(struct rxe_qp *qp,
+ struct net_device *ndev,
struct in6_addr *saddr,
struct in6_addr *daddr)
{
@@ -82,12 +110,12 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
recv_sockets.sk6->sk, &fl6,
NULL);
if (IS_ERR(ndst)) {
- pr_err_ratelimited("no route to %pI6\n", daddr);
+ rxe_dbg_qp(qp, "no route to %pI6\n", daddr);
return NULL;
}
if (unlikely(ndst->error)) {
- pr_err("no route to %pI6\n", daddr);
+ rxe_dbg_qp(qp, "no route to %pI6\n", daddr);
goto put;
}
@@ -99,7 +127,8 @@ put:
#else
-static struct dst_entry *rxe_find_route6(struct net_device *ndev,
+static struct dst_entry *rxe_find_route6(struct rxe_qp *qp,
+ struct net_device *ndev,
struct in6_addr *saddr,
struct in6_addr *daddr)
{
@@ -127,14 +156,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
saddr = &av->sgid_addr._sockaddr_in.sin_addr;
daddr = &av->dgid_addr._sockaddr_in.sin_addr;
- dst = rxe_find_route4(ndev, saddr, daddr);
+ dst = rxe_find_route4(qp, ndev, saddr, daddr);
} else if (av->network_type == RXE_NETWORK_TYPE_IPV6) {
struct in6_addr *saddr6;
struct in6_addr *daddr6;
saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
- dst = rxe_find_route6(ndev, saddr6, daddr6);
+ dst = rxe_find_route6(qp, ndev, saddr6, daddr6);
#if IS_ENABLED(CONFIG_IPV6)
if (dst)
qp->dst_cookie =
@@ -167,7 +196,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto drop;
if (skb_linearize(skb)) {
- pr_err("skb_linearize failed\n");
ib_device_put(&rxe->ib_dev);
goto drop;
}
@@ -179,6 +207,9 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
pkt->mask = RXE_GRH_MASK;
pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
+ /* remove udp header */
+ skb_pull(skb, sizeof(struct udphdr));
+
rxe_rcv(skb);
return 0;
@@ -209,6 +240,7 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
err = udp_sock_create(net, &udp_cfg, &sock);
if (err < 0)
return ERR_PTR(err);
+ rxe_reclassify_recv_socket(sock);
tnl_cfg.encap_type = 1;
tnl_cfg.encap_rcv = rxe_udp_encap_recv;
@@ -259,6 +291,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
iph->version = IPVERSION;
iph->ihl = sizeof(struct iphdr) >> 2;
+ iph->tot_len = htons(skb->len);
iph->frag_off = df;
iph->protocol = proto;
iph->tos = tos;
@@ -292,19 +325,19 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
bool xnet = false;
__be16 df = htons(IP_DF);
- struct rxe_av *av = rxe_get_av(pkt);
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
- pr_err("Host not reachable\n");
+ rxe_dbg_qp(qp, "Host not reachable\n");
return -EHOSTUNREACH;
}
@@ -318,17 +351,17 @@ static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
-static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
- struct rxe_av *av = rxe_get_av(pkt);
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
- pr_err("Host not reachable\n");
+ rxe_dbg_qp(qp, "Host not reachable\n");
return -EHOSTUNREACH;
}
@@ -343,18 +376,17 @@ static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb)
return 0;
}
-int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
+int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
{
int err = 0;
if (skb->protocol == htons(ETH_P_IP))
- err = prepare4(pkt, skb);
+ err = prepare4(av, pkt, skb);
else if (skb->protocol == htons(ETH_P_IPV6))
- err = prepare6(pkt, skb);
+ err = prepare6(av, pkt, skb);
- *crc = rxe_icrc_hdr(pkt, skb);
-
- if (ether_addr_equal(skb->dev->dev_addr, rxe_get_av(pkt)->dmac))
+ if (ether_addr_equal(skb->dev->dev_addr, av->dmac))
pkt->mask |= RXE_LOOPBACK_MASK;
return err;
@@ -362,63 +394,106 @@ int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
static void rxe_skb_tx_dtor(struct sk_buff *skb)
{
- struct sock *sk = skb->sk;
- struct rxe_qp *qp = sk->sk_user_data;
- int skb_out = atomic_dec_return(&qp->skb_out);
+ struct rxe_qp *qp = skb->sk->sk_user_data;
+ int skb_out;
+ skb_out = atomic_dec_return(&qp->skb_out);
if (unlikely(qp->need_req_skb &&
- skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
- rxe_run_task(&qp->req.task, 1);
+ skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
+ rxe_sched_task(&qp->send_task);
- rxe_drop_ref(qp);
+ rxe_put(qp);
+ sock_put(skb->sk);
}
-int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
int err;
+ struct sock *sk = pkt->qp->sk->sk;
+ sock_hold(sk);
+ skb->sk = sk;
skb->destructor = rxe_skb_tx_dtor;
- skb->sk = pkt->qp->sk->sk;
-
- rxe_add_ref(pkt->qp);
+ rxe_get(pkt->qp);
atomic_inc(&pkt->qp->skb_out);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ else
err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
- } else {
- pr_err("Unknown layer 3 protocol: %d\n", skb->protocol);
- atomic_dec(&pkt->qp->skb_out);
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- return -EINVAL;
- }
- if (unlikely(net_xmit_eval(err))) {
- pr_debug("error sending packet: %d\n", err);
- return -EAGAIN;
- }
-
- return 0;
+ return err;
}
/* fix up a send packet to match the packets
* received from UDP before looping them back
*/
-void rxe_loopback(struct sk_buff *skb)
+static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt)
{
- struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ struct sock *sk = pkt->qp->sk->sk;
+
+ memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt));
+
+ sock_hold(sk);
+ skb->sk = sk;
+ skb->destructor = rxe_skb_tx_dtor;
+ rxe_get(pkt->qp);
+ atomic_inc(&pkt->qp->skb_out);
if (skb->protocol == htons(ETH_P_IP))
skb_pull(skb, sizeof(struct iphdr));
else
skb_pull(skb, sizeof(struct ipv6hdr));
- if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev)))
+ if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) {
kfree_skb(skb);
+ return -EIO;
+ }
+
+ /* remove udp header */
+ skb_pull(skb, sizeof(struct udphdr));
+
+ rxe_rcv(skb);
+
+ return 0;
+}
+
+int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ struct sk_buff *skb)
+{
+ int err;
+ int is_request = pkt->mask & RXE_REQ_MASK;
+ struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->state_lock, flags);
+ if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
+ (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+ rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
+ goto drop;
+ }
+ spin_unlock_irqrestore(&qp->state_lock, flags);
+
+ rxe_icrc_generate(skb, pkt);
+
+ if (pkt->mask & RXE_LOOPBACK_MASK)
+ err = rxe_loopback(skb, pkt);
else
- rxe_rcv(skb);
+ err = rxe_send(skb, pkt);
+ if (err) {
+ rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
+ return err;
+ }
+
+ rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS);
+ goto done;
+
+drop:
+ kfree_skb(skb);
+ err = 0;
+done:
+ return err;
}
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
@@ -455,6 +530,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
goto out;
}
+ /* Add time stamp to skb. */
+ skb->tstamp = ktime_get();
+
skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
/* FIXME: hold reference to this netdev until life of this skb. */
@@ -482,7 +560,16 @@ out:
*/
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
{
- return rxe->ndev->name;
+ struct net_device *ndev;
+ char *ndev_name;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return NULL;
+ ndev_name = ndev->name;
+ dev_put(ndev);
+
+ return ndev_name;
}
int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
@@ -494,9 +581,9 @@ int rxe_net_add(const char *ibdev_name, struct net_device *ndev)
if (!rxe)
return -ENOMEM;
- rxe->ndev = ndev;
+ ib_mark_name_assigned_by_user(&rxe->ib_dev);
- err = rxe_add(rxe, ndev->mtu, ibdev_name);
+ err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev);
if (err) {
ib_dealloc_device(&rxe->ib_dev);
return err;
@@ -520,11 +607,6 @@ static void rxe_port_event(struct rxe_dev *rxe,
/* Caller must hold net_info_lock */
void rxe_port_up(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_ACTIVE;
-
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
dev_info(&rxe->ib_dev.dev, "set active\n");
}
@@ -532,11 +614,6 @@ void rxe_port_up(struct rxe_dev *rxe)
/* Caller must hold net_info_lock */
void rxe_port_down(struct rxe_dev *rxe)
{
- struct rxe_port *port;
-
- port = &rxe->port;
- port->attr.state = IB_PORT_DOWN;
-
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
dev_info(&rxe->ib_dev.dev, "set down\n");
@@ -544,10 +621,18 @@ void rxe_port_down(struct rxe_dev *rxe)
void rxe_set_port_state(struct rxe_dev *rxe)
{
- if (netif_running(rxe->ndev) && netif_carrier_ok(rxe->ndev))
+ struct net_device *ndev;
+
+ ndev = rxe_ib_device_get_netdev(&rxe->ib_dev);
+ if (!ndev)
+ return;
+
+ if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE)
rxe_port_up(rxe);
else
rxe_port_down(rxe);
+
+ dev_put(ndev);
}
static int rxe_notify(struct notifier_block *not_blk,
@@ -564,18 +649,14 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_UNREGISTER:
ib_unregister_device_queued(&rxe->ib_dev);
break;
- case NETDEV_UP:
- rxe_port_up(rxe);
- break;
- case NETDEV_DOWN:
- rxe_port_down(rxe);
- break;
case NETDEV_CHANGEMTU:
- pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
+ rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu);
rxe_set_mtu(rxe, ndev->mtu);
break;
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
- rxe_set_port_state(rxe);
+ if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN)
+ rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED);
break;
case NETDEV_REBOOT:
case NETDEV_GOING_DOWN:
@@ -583,7 +664,7 @@ static int rxe_notify(struct notifier_block *not_blk,
case NETDEV_CHANGENAME:
case NETDEV_FEAT_CHANGE:
default:
- pr_info("ignoring netdev event = %ld for %s\n",
+ rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n",
event, ndev->name);
break;
}