diff options
Diffstat (limited to 'drivers/infiniband/sw/rxe/rxe_net.c')
| -rw-r--r-- | drivers/infiniband/sw/rxe/rxe_net.c | 550 |
1 files changed, 285 insertions, 265 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 08f3f90d2912..0195d361e5e3 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -1,34 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. */ #include <linux/skbuff.h> @@ -45,82 +18,60 @@ #include "rxe_net.h" #include "rxe_loc.h" -static LIST_HEAD(rxe_dev_list); -static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */ - -struct rxe_dev *net_to_rxe(struct net_device *ndev) -{ - struct rxe_dev *rxe; - struct rxe_dev *found = NULL; - - spin_lock_bh(&dev_list_lock); - list_for_each_entry(rxe, &rxe_dev_list, list) { - if (rxe->ndev == ndev) { - found = rxe; - break; - } - } - spin_unlock_bh(&dev_list_lock); - - return found; -} - -struct rxe_dev *get_rxe_by_name(const char *name) -{ - struct rxe_dev *rxe; - struct rxe_dev *found = NULL; - - spin_lock_bh(&dev_list_lock); - list_for_each_entry(rxe, &rxe_dev_list, list) { - if (!strcmp(name, rxe->ib_dev.name)) { - found = rxe; - break; - } - } - spin_unlock_bh(&dev_list_lock); - return found; -} - - -struct rxe_recv_sockets recv_sockets; - -struct device *rxe_dma_device(struct rxe_dev *rxe) -{ - struct net_device *ndev; - - ndev = rxe->ndev; - - if (is_vlan_dev(ndev)) - ndev = vlan_dev_real_dev(ndev); - - return ndev->dev.parent; -} - -int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) -{ - int err; - unsigned char ll_addr[ETH_ALEN]; - - ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_add(rxe->ndev, ll_addr); +static struct rxe_recv_sockets recv_sockets; - return err; -} +#ifdef CONFIG_DEBUG_LOCK_ALLOC +/* + * lockdep can detect false positive circular dependencies + * when there are user-space socket API users or in kernel + * users switching between a tcp and rdma transport. + * Maybe also switching between siw and rxe may cause + * problems as per default sockets are only classified + * by family and not by ip protocol. And there might + * be different locks used between the application + * and the low level sockets. + * + * Problems were seen with ksmbd.ko and cifs.ko, + * switching transports, use git blame to find + * more details. + */ +static struct lock_class_key rxe_recv_sk_key[2]; +static struct lock_class_key rxe_recv_slock_key[2]; +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ -int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) +static inline void rxe_reclassify_recv_socket(struct socket *sock) { - int err; - unsigned char ll_addr[ETH_ALEN]; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct sock *sk = sock->sk; - ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_del(rxe->ndev, ll_addr); + if (WARN_ON_ONCE(!sock_allow_reclassification(sk))) + return; - return err; + switch (sk->sk_family) { + case AF_INET: + sock_lock_init_class_and_name(sk, + "slock-AF_INET-RDMA-RXE-RECV", + &rxe_recv_slock_key[0], + "sk_lock-AF_INET-RDMA-RXE-RECV", + &rxe_recv_sk_key[0]); + break; + case AF_INET6: + sock_lock_init_class_and_name(sk, + "slock-AF_INET6-RDMA-RXE-RECV", + &rxe_recv_slock_key[1], + "sk_lock-AF_INET6-RDMA-RXE-RECV", + &rxe_recv_sk_key[1]); + break; + default: + WARN_ON_ONCE(1); + } +#endif /* CONFIG_DEBUG_LOCK_ALLOC */ } -static struct dst_entry *rxe_find_route4(struct net_device *ndev, - struct in_addr *saddr, - struct in_addr *daddr) +static struct dst_entry *rxe_find_route4(struct rxe_qp *qp, + struct net_device *ndev, + struct in_addr *saddr, + struct in_addr *daddr) { struct rtable *rt; struct flowi4 fl = { { 0 } }; @@ -133,7 +84,7 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev, rt = ip_route_output_key(&init_net, &fl); if (IS_ERR(rt)) { - pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr); + rxe_dbg_qp(qp, "no route to %pI4\n", &daddr->s_addr); return NULL; } @@ -141,7 +92,8 @@ static struct dst_entry *rxe_find_route4(struct net_device *ndev, } #if IS_ENABLED(CONFIG_IPV6) -static struct dst_entry *rxe_find_route6(struct net_device *ndev, +static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, + struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { @@ -154,14 +106,16 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, memcpy(&fl6.daddr, daddr, sizeof(*daddr)); fl6.flowi6_proto = IPPROTO_UDP; - if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk), - recv_sockets.sk6->sk, &ndst, &fl6))) { - pr_err_ratelimited("no route to %pI6\n", daddr); - goto put; + ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), + recv_sockets.sk6->sk, &fl6, + NULL); + if (IS_ERR(ndst)) { + rxe_dbg_qp(qp, "no route to %pI6\n", daddr); + return NULL; } if (unlikely(ndst->error)) { - pr_err("no route to %pI6\n", daddr); + rxe_dbg_qp(qp, "no route to %pI6\n", daddr); goto put; } @@ -173,7 +127,8 @@ put: #else -static struct dst_entry *rxe_find_route6(struct net_device *ndev, +static struct dst_entry *rxe_find_route6(struct rxe_qp *qp, + struct net_device *ndev, struct in6_addr *saddr, struct in6_addr *daddr) { @@ -182,7 +137,7 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, #endif -static struct dst_entry *rxe_find_route(struct rxe_dev *rxe, +static struct dst_entry *rxe_find_route(struct net_device *ndev, struct rxe_qp *qp, struct rxe_av *av) { @@ -191,42 +146,57 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe, if (qp_type(qp) == IB_QPT_RC) dst = sk_dst_get(qp->sk->sk); - if (!dst || !(dst->obsolete && dst->ops->check(dst, 0))) { + if (!dst || !dst_check(dst, qp->dst_cookie)) { if (dst) dst_release(dst); - if (av->network_type == RDMA_NETWORK_IPV4) { + if (av->network_type == RXE_NETWORK_TYPE_IPV4) { struct in_addr *saddr; struct in_addr *daddr; saddr = &av->sgid_addr._sockaddr_in.sin_addr; daddr = &av->dgid_addr._sockaddr_in.sin_addr; - dst = rxe_find_route4(rxe->ndev, saddr, daddr); - } else if (av->network_type == RDMA_NETWORK_IPV6) { + dst = rxe_find_route4(qp, ndev, saddr, daddr); + } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) { struct in6_addr *saddr6; struct in6_addr *daddr6; saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr; daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr; - dst = rxe_find_route6(rxe->ndev, saddr6, daddr6); + dst = rxe_find_route6(qp, ndev, saddr6, daddr6); +#if IS_ENABLED(CONFIG_IPV6) + if (dst) + qp->dst_cookie = + rt6_get_cookie((struct rt6_info *)dst); +#endif } - } + if (dst && (qp_type(qp) == IB_QPT_RC)) { + dst_hold(dst); + sk_dst_set(qp->sk->sk, dst); + } + } return dst; } static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct udphdr *udph; + struct rxe_dev *rxe; struct net_device *ndev = skb->dev; - struct rxe_dev *rxe = net_to_rxe(ndev); struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); + /* takes a reference on rxe->ib_dev + * drop when skb is freed + */ + rxe = rxe_get_dev_from_net(ndev); + if (!rxe && is_vlan_dev(ndev)) + rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); if (!rxe) goto drop; if (skb_linearize(skb)) { - pr_err("skb_linearize failed\n"); + ib_device_put(&rxe->ib_dev); goto drop; } @@ -237,9 +207,15 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) pkt->mask = RXE_GRH_MASK; pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); - return rxe_rcv(skb); + /* remove udp header */ + skb_pull(skb, sizeof(struct udphdr)); + + rxe_rcv(skb); + + return 0; drop: kfree_skb(skb); + return 0; } @@ -262,10 +238,9 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, /* Create UDP socket */ err = udp_sock_create(net, &udp_cfg, &sock); - if (err < 0) { - pr_err("failed to create udp socket. err = %d\n", err); + if (err < 0) return ERR_PTR(err); - } + rxe_reclassify_recv_socket(sock); tnl_cfg.encap_type = 1; tnl_cfg.encap_rcv = rxe_udp_encap_recv; @@ -276,7 +251,7 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, return sock; } -void rxe_release_udp_tunnel(struct socket *sk) +static void rxe_release_udp_tunnel(struct socket *sk) { if (sk) udp_tunnel_sock_release(sk); @@ -316,6 +291,7 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, iph->version = IPVERSION; iph->ihl = sizeof(struct iphdr) >> 2; + iph->tot_len = htons(skb->len); iph->frag_off = df; iph->protocol = proto; iph->tos = tos; @@ -324,8 +300,6 @@ static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, iph->ttl = ttl; __ip_select_ident(dev_net(dst->dev), iph, skb_shinfo(skb)->gso_segs ?: 1); - iph->tot_len = htons(skb->len); - ip_send_check(iph); } static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, @@ -337,7 +311,7 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); - skb_dst_set(skb, dst); + skb_dst_set(skb, dst_clone(dst)); __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); @@ -351,8 +325,8 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb, ip6h->payload_len = htons(skb->len - sizeof(*ip6h)); } -static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, - struct sk_buff *skb, struct rxe_av *av) +static int prepare4(struct rxe_av *av, struct rxe_pkt_info *pkt, + struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; struct dst_entry *dst; @@ -361,170 +335,222 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr; struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr; - dst = rxe_find_route(rxe, qp, av); + dst = rxe_find_route(skb->dev, qp, av); if (!dst) { - pr_err("Host not reachable\n"); + rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } - if (!memcmp(saddr, daddr, sizeof(*daddr))) - pkt->mask |= RXE_LOOPBACK_MASK; - - prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT), - htons(ROCE_V2_UDP_DPORT)); + prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), + cpu_to_be16(ROCE_V2_UDP_DPORT)); prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP, av->grh.traffic_class, av->grh.hop_limit, df, xnet); - if (qp_type(qp) == IB_QPT_RC) - sk_dst_set(qp->sk->sk, dst); - else - dst_release(dst); - + dst_release(dst); return 0; } -static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, - struct sk_buff *skb, struct rxe_av *av) +static int prepare6(struct rxe_av *av, struct rxe_pkt_info *pkt, + struct sk_buff *skb) { struct rxe_qp *qp = pkt->qp; - struct dst_entry *dst = NULL; + struct dst_entry *dst; struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr; struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr; - dst = rxe_find_route(rxe, qp, av); + dst = rxe_find_route(skb->dev, qp, av); if (!dst) { - pr_err("Host not reachable\n"); + rxe_dbg_qp(qp, "Host not reachable\n"); return -EHOSTUNREACH; } - if (!memcmp(saddr, daddr, sizeof(*daddr))) - pkt->mask |= RXE_LOOPBACK_MASK; - - prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT), - htons(ROCE_V2_UDP_DPORT)); + prepare_udp_hdr(skb, cpu_to_be16(qp->src_port), + cpu_to_be16(ROCE_V2_UDP_DPORT)); prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP, av->grh.traffic_class, av->grh.hop_limit); - if (qp_type(qp) == IB_QPT_RC) - sk_dst_set(qp->sk->sk, dst); - else - dst_release(dst); - + dst_release(dst); return 0; } -int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, - struct sk_buff *skb, u32 *crc) +int rxe_prepare(struct rxe_av *av, struct rxe_pkt_info *pkt, + struct sk_buff *skb) { int err = 0; - struct rxe_av *av = rxe_get_av(pkt); - if (av->network_type == RDMA_NETWORK_IPV4) - err = prepare4(rxe, pkt, skb, av); - else if (av->network_type == RDMA_NETWORK_IPV6) - err = prepare6(rxe, pkt, skb, av); + if (skb->protocol == htons(ETH_P_IP)) + err = prepare4(av, pkt, skb); + else if (skb->protocol == htons(ETH_P_IPV6)) + err = prepare6(av, pkt, skb); - *crc = rxe_icrc_hdr(pkt, skb); + if (ether_addr_equal(skb->dev->dev_addr, av->dmac)) + pkt->mask |= RXE_LOOPBACK_MASK; return err; } static void rxe_skb_tx_dtor(struct sk_buff *skb) { - struct sock *sk = skb->sk; - struct rxe_qp *qp = sk->sk_user_data; - int skb_out = atomic_dec_return(&qp->skb_out); + struct rxe_qp *qp = skb->sk->sk_user_data; + int skb_out; + skb_out = atomic_dec_return(&qp->skb_out); if (unlikely(qp->need_req_skb && - skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) - rxe_run_task(&qp->req.task, 1); + skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) + rxe_sched_task(&qp->send_task); - rxe_drop_ref(qp); + rxe_put(qp); + sock_put(skb->sk); } -int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) +static int rxe_send(struct sk_buff *skb, struct rxe_pkt_info *pkt) { - struct sk_buff *nskb; - struct rxe_av *av; int err; + struct sock *sk = pkt->qp->sk->sk; - av = rxe_get_av(pkt); + sock_hold(sk); + skb->sk = sk; + skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); + atomic_inc(&pkt->qp->skb_out); - nskb = skb_clone(skb, GFP_ATOMIC); - if (!nskb) - return -ENOMEM; + if (skb->protocol == htons(ETH_P_IP)) + err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); + else + err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); - nskb->destructor = rxe_skb_tx_dtor; - nskb->sk = pkt->qp->sk->sk; + return err; +} - if (av->network_type == RDMA_NETWORK_IPV4) { - err = ip_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb); - } else if (av->network_type == RDMA_NETWORK_IPV6) { - err = ip6_local_out(dev_net(skb_dst(skb)->dev), nskb->sk, nskb); - } else { - pr_err("Unknown layer 3 protocol: %d\n", av->network_type); - kfree_skb(nskb); - return -EINVAL; - } +/* fix up a send packet to match the packets + * received from UDP before looping them back + */ +static int rxe_loopback(struct sk_buff *skb, struct rxe_pkt_info *pkt) +{ + struct sock *sk = pkt->qp->sk->sk; - if (unlikely(net_xmit_eval(err))) { - pr_debug("error sending packet: %d\n", err); - return -EAGAIN; - } + memcpy(SKB_TO_PKT(skb), pkt, sizeof(*pkt)); - rxe_add_ref(pkt->qp); + sock_hold(sk); + skb->sk = sk; + skb->destructor = rxe_skb_tx_dtor; + rxe_get(pkt->qp); atomic_inc(&pkt->qp->skb_out); - kfree_skb(skb); + + if (skb->protocol == htons(ETH_P_IP)) + skb_pull(skb, sizeof(struct iphdr)); + else + skb_pull(skb, sizeof(struct ipv6hdr)); + + if (WARN_ON(!ib_device_try_get(&pkt->rxe->ib_dev))) { + kfree_skb(skb); + return -EIO; + } + + /* remove udp header */ + skb_pull(skb, sizeof(struct udphdr)); + + rxe_rcv(skb); return 0; } -int rxe_loopback(struct sk_buff *skb) +int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, + struct sk_buff *skb) { - return rxe_rcv(skb); -} + int err; + int is_request = pkt->mask & RXE_REQ_MASK; + struct rxe_dev *rxe = to_rdev(qp->ibqp.device); + unsigned long flags; + + spin_lock_irqsave(&qp->state_lock, flags); + if ((is_request && (qp_state(qp) < IB_QPS_RTS)) || + (!is_request && (qp_state(qp) < IB_QPS_RTR))) { + spin_unlock_irqrestore(&qp->state_lock, flags); + rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n"); + goto drop; + } + spin_unlock_irqrestore(&qp->state_lock, flags); -static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av) -{ - return rxe->port.port_guid == av->grh.dgid.global.interface_id; + rxe_icrc_generate(skb, pkt); + + if (pkt->mask & RXE_LOOPBACK_MASK) + err = rxe_loopback(skb, pkt); + else + err = rxe_send(skb, pkt); + if (err) { + rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); + return err; + } + + rxe_counter_inc(rxe, RXE_CNT_SENT_PKTS); + goto done; + +drop: + kfree_skb(skb); + err = 0; +done: + return err; } struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, int paylen, struct rxe_pkt_info *pkt) { unsigned int hdr_len; - struct sk_buff *skb; + struct sk_buff *skb = NULL; + struct net_device *ndev; + const struct ib_gid_attr *attr; + const int port_num = 1; - if (av->network_type == RDMA_NETWORK_IPV4) + attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); + if (IS_ERR(attr)) + return NULL; + + if (av->network_type == RXE_NETWORK_TYPE_IPV4) hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct iphdr); else hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct ipv6hdr); - skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(rxe->ndev), + rcu_read_lock(); + ndev = rdma_read_gid_attr_ndev_rcu(attr); + if (IS_ERR(ndev)) { + rcu_read_unlock(); + goto out; + } + skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev), GFP_ATOMIC); - if (unlikely(!skb)) - return NULL; - skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev)); + if (unlikely(!skb)) { + rcu_read_unlock(); + goto out; + } + + /* Add time stamp to skb. */ + skb->tstamp = ktime_get(); + + skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev)); + + /* FIXME: hold reference to this netdev until life of this skb. */ + skb->dev = ndev; + rcu_read_unlock(); - skb->dev = rxe->ndev; - if (av->network_type == RDMA_NETWORK_IPV4) + if (av->network_type == RXE_NETWORK_TYPE_IPV4) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); pkt->rxe = rxe; - pkt->port_num = 1; + pkt->port_num = port_num; pkt->hdr = skb_put(skb, paylen); pkt->mask |= RXE_GRH_MASK; - memset(pkt->hdr, 0, paylen); - +out: + rdma_put_gid_attr(attr); return skb; } @@ -534,52 +560,37 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, */ const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num) { - return rxe->ndev->name; -} + struct net_device *ndev; + char *ndev_name; -enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num) -{ - return IB_LINK_LAYER_ETHERNET; + ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); + if (!ndev) + return NULL; + ndev_name = ndev->name; + dev_put(ndev); + + return ndev_name; } -struct rxe_dev *rxe_net_add(struct net_device *ndev) +int rxe_net_add(const char *ibdev_name, struct net_device *ndev) { int err; struct rxe_dev *rxe = NULL; - rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe)); + rxe = ib_alloc_device(rxe_dev, ib_dev); if (!rxe) - return NULL; + return -ENOMEM; - rxe->ndev = ndev; + ib_mark_name_assigned_by_user(&rxe->ib_dev); - err = rxe_add(rxe, ndev->mtu); + err = rxe_add(rxe, ndev->mtu, ibdev_name, ndev); if (err) { ib_dealloc_device(&rxe->ib_dev); - return NULL; + return err; } - spin_lock_bh(&dev_list_lock); - list_add_tail(&rxe->list, &rxe_dev_list); - spin_unlock_bh(&dev_list_lock); - return rxe; -} - -void rxe_remove_all(void) -{ - spin_lock_bh(&dev_list_lock); - while (!list_empty(&rxe_dev_list)) { - struct rxe_dev *rxe = - list_first_entry(&rxe_dev_list, struct rxe_dev, list); - - list_del(&rxe->list); - spin_unlock_bh(&dev_list_lock); - rxe_remove(rxe); - spin_lock_bh(&dev_list_lock); - } - spin_unlock_bh(&dev_list_lock); + return 0; } -EXPORT_SYMBOL(rxe_remove_all); static void rxe_port_event(struct rxe_dev *rxe, enum ib_event_type event) @@ -596,27 +607,32 @@ static void rxe_port_event(struct rxe_dev *rxe, /* Caller must hold net_info_lock */ void rxe_port_up(struct rxe_dev *rxe) { - struct rxe_port *port; - - port = &rxe->port; - port->attr.state = IB_PORT_ACTIVE; - port->attr.phys_state = IB_PHYS_STATE_LINK_UP; - rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE); - pr_info("set %s active\n", rxe->ib_dev.name); + dev_info(&rxe->ib_dev.dev, "set active\n"); } /* Caller must hold net_info_lock */ void rxe_port_down(struct rxe_dev *rxe) { - struct rxe_port *port; + rxe_port_event(rxe, IB_EVENT_PORT_ERR); + rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); + dev_info(&rxe->ib_dev.dev, "set down\n"); +} + +void rxe_set_port_state(struct rxe_dev *rxe) +{ + struct net_device *ndev; - port = &rxe->port; - port->attr.state = IB_PORT_DOWN; - port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN; + ndev = rxe_ib_device_get_netdev(&rxe->ib_dev); + if (!ndev) + return; - rxe_port_event(rxe, IB_EVENT_PORT_ERR); - pr_info("set %s down\n", rxe->ib_dev.name); + if (ib_get_curr_port_state(ndev) == IB_PORT_ACTIVE) + rxe_port_up(rxe); + else + rxe_port_down(rxe); + + dev_put(ndev); } static int rxe_notify(struct notifier_block *not_blk, @@ -624,42 +640,40 @@ static int rxe_notify(struct notifier_block *not_blk, void *arg) { struct net_device *ndev = netdev_notifier_info_to_dev(arg); - struct rxe_dev *rxe = net_to_rxe(ndev); + struct rxe_dev *rxe = rxe_get_dev_from_net(ndev); if (!rxe) - goto out; + return NOTIFY_OK; switch (event) { case NETDEV_UNREGISTER: - list_del(&rxe->list); - rxe_remove(rxe); - break; - case NETDEV_UP: - rxe_port_up(rxe); - break; - case NETDEV_DOWN: - rxe_port_down(rxe); + ib_unregister_device_queued(&rxe->ib_dev); break; case NETDEV_CHANGEMTU: - pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu); + rxe_dbg_dev(rxe, "%s changed mtu to %d\n", ndev->name, ndev->mtu); rxe_set_mtu(rxe, ndev->mtu); break; - case NETDEV_REBOOT: + case NETDEV_DOWN: case NETDEV_CHANGE: + if (ib_get_curr_port_state(ndev) == IB_PORT_DOWN) + rxe_counter_inc(rxe, RXE_CNT_LINK_DOWNED); + break; + case NETDEV_REBOOT: case NETDEV_GOING_DOWN: case NETDEV_CHANGEADDR: case NETDEV_CHANGENAME: case NETDEV_FEAT_CHANGE: default: - pr_info("ignoring netdev event = %ld for %s\n", + rxe_dbg_dev(rxe, "ignoring netdev event = %ld for %s\n", event, ndev->name); break; } -out: + + ib_device_put(&rxe->ib_dev); return NOTIFY_OK; } -struct notifier_block rxe_net_notifier = { +static struct notifier_block rxe_net_notifier = { .notifier_call = rxe_notify, }; @@ -682,6 +696,12 @@ static int rxe_net_ipv6_init(void) recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net, htons(ROCE_V2_UDP_DPORT), true); + if (PTR_ERR(recv_sockets.sk6) == -EAFNOSUPPORT) { + recv_sockets.sk6 = NULL; + pr_warn("IPv6 is not supported, can not create a UDPv6 socket\n"); + return 0; + } + if (IS_ERR(recv_sockets.sk6)) { recv_sockets.sk6 = NULL; pr_err("Failed to create IPv6 UDP tunnel\n"); |
