diff options
Diffstat (limited to 'drivers/infiniband/sw/siw')
-rw-r--r-- | drivers/infiniband/sw/siw/Kconfig | 5 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw.h | 54 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_cm.c | 27 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_cq.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_main.c | 83 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_mem.c | 28 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_mem.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_qp.c | 54 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_qp_rx.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_qp_tx.c | 46 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_verbs.c | 50 | ||||
-rw-r--r-- | drivers/infiniband/sw/siw/siw_verbs.h | 2 |
12 files changed, 135 insertions, 248 deletions
diff --git a/drivers/infiniband/sw/siw/Kconfig b/drivers/infiniband/sw/siw/Kconfig index 81b70a3eeb87..186f182b80e7 100644 --- a/drivers/infiniband/sw/siw/Kconfig +++ b/drivers/infiniband/sw/siw/Kconfig @@ -2,9 +2,8 @@ config RDMA_SIW tristate "Software RDMA over TCP/IP (iWARP) driver" depends on INET && INFINIBAND depends on INFINIBAND_VIRT_DMA - select LIBCRC32C - select CRYPTO - select CRYPTO_CRC32C + select CRC32 + select NET_CRC32C help This driver implements the iWARP RDMA transport over the Linux TCP/IP network stack. It enables a system with a diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 75253f2b3e3d..f5fd71717b80 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -10,9 +10,9 @@ #include <rdma/restrack.h> #include <linux/socket.h> #include <linux/skbuff.h> -#include <crypto/hash.h> #include <linux/crc32.h> #include <linux/crc32c.h> +#include <linux/unaligned.h> #include <rdma/siw-abi.h> #include "iwarp.h" @@ -46,6 +46,9 @@ */ #define SIW_IRQ_MAXBURST_SQ_ACTIVE 4 +/* There is always only a port 1 per siw device */ +#define SIW_PORT 1 + struct siw_dev_cap { int max_qp; int max_qp_wr; @@ -69,16 +72,12 @@ struct siw_pd { struct siw_device { struct ib_device base_dev; - struct net_device *netdev; struct siw_dev_cap attrs; u32 vendor_part_id; int numa_node; char raw_gid[ETH_ALEN]; - /* physical port state (only one port per device) */ - enum ib_port_state state; - spinlock_t lock; struct xarray qp_xa; @@ -94,8 +93,6 @@ struct siw_device { atomic_t num_mr; atomic_t num_srq; atomic_t num_ctx; - - struct work_struct netdev_down; }; struct siw_ucontext { @@ -292,7 +289,8 @@ struct siw_rx_stream { union iwarp_hdr hdr; struct mpa_trailer trailer; - struct shash_desc *mpa_crc_hd; + u32 mpa_crc; + bool mpa_crc_enabled; /* * For each FPDU, main RX loop runs through 3 stages: @@ -393,7 +391,8 @@ struct siw_iwarp_tx { int burst; int bytes_unsent; /* ddp payload bytes */ - struct shash_desc *mpa_crc_hd; + u32 mpa_crc; + bool mpa_crc_enabled; u8 do_crc : 1; /* do crc for segment */ u8 use_sendpage : 1; /* send w/o copy */ @@ -499,7 +498,6 @@ extern u_char mpa_version; extern const bool peer_to_peer; extern struct task_struct *siw_tx_thread[]; -extern struct crypto_shash *siw_crypto_shash; extern struct iwarp_msg_info iwarp_pktinfo[RDMAP_TERMINATE + 1]; /* QP general functions */ @@ -671,29 +669,33 @@ static inline struct siw_sqe *irq_alloc_free(struct siw_qp *qp) return NULL; } -static inline __wsum siw_csum_update(const void *buff, int len, __wsum sum) +static inline void siw_crc_init(u32 *crc) { - return (__force __wsum)crc32c((__force __u32)sum, buff, len); + *crc = ~0; } -static inline __wsum siw_csum_combine(__wsum csum, __wsum csum2, int offset, - int len) +static inline void siw_crc_update(u32 *crc, const void *data, size_t len) { - return (__force __wsum)__crc32c_le_combine((__force __u32)csum, - (__force __u32)csum2, len); + *crc = crc32c(*crc, data, len); } -static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) +static inline void siw_crc_final(u32 *crc, u8 out[4]) { - const struct skb_checksum_ops siw_cs_ops = { - .update = siw_csum_update, - .combine = siw_csum_combine, - }; - __wsum crc = *(u32 *)shash_desc_ctx(srx->mpa_crc_hd); + put_unaligned_le32(~*crc, out); +} - crc = __skb_checksum(srx->skb, srx->skb_offset, len, crc, - &siw_cs_ops); - *(u32 *)shash_desc_ctx(srx->mpa_crc_hd) = crc; +static inline void siw_crc_oneshot(const void *data, size_t len, u8 out[4]) +{ + u32 crc; + + siw_crc_init(&crc); + siw_crc_update(&crc, data, len); + return siw_crc_final(&crc, out); +} + +static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) +{ + srx->mpa_crc = skb_crc32c(srx->skb, srx->skb_offset, len, srx->mpa_crc); } #define siw_dbg(ibdev, fmt, ...) \ @@ -716,7 +718,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len) "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__) #define siw_dbg_cep(cep, fmt, ...) \ - ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \ + ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \ cep, __func__, ##__VA_ARGS__) void siw_cq_flush(struct siw_cq *cq); diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 86323918a570..708b13993fdf 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1759,6 +1759,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) { struct socket *s; struct siw_cep *cep = NULL; + struct net_device *ndev = NULL; struct siw_device *sdev = to_siw_dev(id->device); int addr_family = id->local_addr.ss_family; int rv = 0; @@ -1779,9 +1780,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr); /* For wildcard addr, limit binding to current device only */ - if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) - s->sk->sk_bound_dev_if = sdev->netdev->ifindex; - + if (ipv4_is_zeronet(laddr->sin_addr.s_addr)) { + ndev = ib_device_get_netdev(id->device, SIW_PORT); + if (ndev) { + s->sk->sk_bound_dev_if = ndev->ifindex; + } else { + rv = -ENODEV; + goto error; + } + } rv = s->ops->bind(s, (struct sockaddr *)laddr, sizeof(struct sockaddr_in)); } else { @@ -1797,9 +1804,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) } /* For wildcard addr, limit binding to current device only */ - if (ipv6_addr_any(&laddr->sin6_addr)) - s->sk->sk_bound_dev_if = sdev->netdev->ifindex; - + if (ipv6_addr_any(&laddr->sin6_addr)) { + ndev = ib_device_get_netdev(id->device, SIW_PORT); + if (ndev) { + s->sk->sk_bound_dev_if = ndev->ifindex; + } else { + rv = -ENODEV; + goto error; + } + } rv = s->ops->bind(s, (struct sockaddr *)laddr, sizeof(struct sockaddr_in6)); } @@ -1860,6 +1873,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog) } list_add_tail(&cep->listenq, (struct list_head *)id->provider_data); cep->state = SIW_EPSTATE_LISTENING; + dev_put(ndev); siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr); @@ -1879,6 +1893,7 @@ error: siw_cep_set_free_and_put(cep); } sock_release(s); + dev_put(ndev); return rv; } diff --git a/drivers/infiniband/sw/siw/siw_cq.c b/drivers/infiniband/sw/siw/siw_cq.c index f3c2226aff94..25b3c741b66b 100644 --- a/drivers/infiniband/sw/siw/siw_cq.c +++ b/drivers/infiniband/sw/siw/siw_cq.c @@ -72,7 +72,7 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc) wc->opcode = map_wc_opcode[cqe->opcode]; wc->status = map_cqe_status[cqe->status].ib; siw_dbg_cq(cq, - "idx %u, type %d, flags %2x, id 0x%pK\n", + "idx %u, type %d, flags %2x, id 0x%p\n", cq->cq_get % cq->num_cqe, cqe->opcode, cqe->flags, (void *)(uintptr_t)cqe->id); } else { diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 61ad8ca3d1a2..5168307229a9 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -59,7 +59,6 @@ u_char mpa_version = MPA_REVISION_2; const bool peer_to_peer; struct task_struct *siw_tx_thread[NR_CPUS]; -struct crypto_shash *siw_crypto_shash; static int siw_device_register(struct siw_device *sdev, const char *name) { @@ -287,7 +286,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev) return NULL; base_dev = &sdev->base_dev; - sdev->netdev = netdev; if (netdev->addr_len) { memcpy(sdev->raw_gid, netdev->dev_addr, @@ -364,39 +362,6 @@ error: return NULL; } -/* - * Network link becomes unavailable. Mark all - * affected QP's accordingly. - */ -static void siw_netdev_down(struct work_struct *work) -{ - struct siw_device *sdev = - container_of(work, struct siw_device, netdev_down); - - struct siw_qp_attrs qp_attrs; - struct list_head *pos, *tmp; - - memset(&qp_attrs, 0, sizeof(qp_attrs)); - qp_attrs.state = SIW_QP_STATE_ERROR; - - list_for_each_safe(pos, tmp, &sdev->qp_list) { - struct siw_qp *qp = list_entry(pos, struct siw_qp, devq); - - down_write(&qp->state_lock); - WARN_ON(siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE)); - up_write(&qp->state_lock); - } - ib_device_put(&sdev->base_dev); -} - -static void siw_device_goes_down(struct siw_device *sdev) -{ - if (ib_device_try_get(&sdev->base_dev)) { - INIT_WORK(&sdev->netdev_down, siw_netdev_down); - schedule_work(&sdev->netdev_down); - } -} - static int siw_netdev_event(struct notifier_block *nb, unsigned long event, void *arg) { @@ -413,20 +378,6 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event, sdev = to_siw_dev(base_dev); switch (event) { - case NETDEV_UP: - sdev->state = IB_PORT_ACTIVE; - siw_port_event(sdev, 1, IB_EVENT_PORT_ACTIVE); - break; - - case NETDEV_GOING_DOWN: - siw_device_goes_down(sdev); - break; - - case NETDEV_DOWN: - sdev->state = IB_PORT_DOWN; - siw_port_event(sdev, 1, IB_EVENT_PORT_ERR); - break; - case NETDEV_REGISTER: /* * Device registration now handled only by @@ -444,12 +395,8 @@ static int siw_netdev_event(struct notifier_block *nb, unsigned long event, siw_port_event(sdev, 1, IB_EVENT_LID_CHANGE); break; /* - * Todo: Below netdev events are currently not handled. + * All other events are not handled */ - case NETDEV_CHANGEMTU: - case NETDEV_CHANGE: - break; - default: break; } @@ -479,12 +426,7 @@ static int siw_newlink(const char *basedev_name, struct net_device *netdev) sdev = siw_device_create(netdev); if (sdev) { dev_dbg(&netdev->dev, "siw: new device\n"); - - if (netif_running(netdev) && netif_carrier_ok(netdev)) - sdev->state = IB_PORT_ACTIVE; - else - sdev->state = IB_PORT_DOWN; - + ib_mark_name_assigned_by_user(&sdev->base_dev); rv = siw_device_register(sdev, basedev_name); if (rv) ib_dealloc_device(&sdev->base_dev); @@ -524,20 +466,7 @@ static __init int siw_init_module(void) rv = -ENOMEM; goto out_error; } - /* - * Locate CRC32 algorithm. If unsuccessful, fail - * loading siw only, if CRC is required. - */ - siw_crypto_shash = crypto_alloc_shash("crc32c", 0, 0); - if (IS_ERR(siw_crypto_shash)) { - pr_info("siw: Loading CRC32c failed: %ld\n", - PTR_ERR(siw_crypto_shash)); - siw_crypto_shash = NULL; - if (mpa_crc_required) { - rv = -EOPNOTSUPP; - goto out_error; - } - } + rv = register_netdevice_notifier(&siw_netdev_nb); if (rv) goto out_error; @@ -550,9 +479,6 @@ static __init int siw_init_module(void) out_error: siw_stop_tx_threads(); - if (siw_crypto_shash) - crypto_free_shash(siw_crypto_shash); - pr_info("SoftIWARP attach failed. Error: %d\n", rv); siw_cm_exit(); @@ -573,9 +499,6 @@ static void __exit siw_exit_module(void) siw_destroy_cpulist(siw_cpu_info.num_nodes); - if (siw_crypto_shash) - crypto_free_shash(siw_crypto_shash); - pr_info("SoftiWARP detached\n"); } diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index dcb963607c8b..d5ddeb17bd22 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -18,30 +18,6 @@ #define SIW_STAG_MAX_INDEX 0x00ffffff /* - * The code avoids special Stag of zero and tries to randomize - * STag values between 1 and SIW_STAG_MAX_INDEX. - */ -int siw_mem_add(struct siw_device *sdev, struct siw_mem *m) -{ - struct xa_limit limit = XA_LIMIT(1, SIW_STAG_MAX_INDEX); - u32 id, next; - - get_random_bytes(&next, 4); - next &= SIW_STAG_MAX_INDEX; - - if (xa_alloc_cyclic(&sdev->mem_xa, &id, m, limit, &next, - GFP_KERNEL) < 0) - return -ENOMEM; - - /* Set the STag index part */ - m->stag = id << 8; - - siw_dbg_mem(m, "new MEM object\n"); - - return 0; -} - -/* * siw_mem_id2obj() * * resolves memory from stag given by id. might be called from: @@ -181,10 +157,10 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, */ if (addr < mem->va || addr + len > mem->va + mem->len) { siw_dbg_pd(pd, "MEM interval len %d\n", len); - siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n", + siw_dbg_pd(pd, "[0x%p, 0x%p] out of bounds\n", (void *)(uintptr_t)addr, (void *)(uintptr_t)(addr + len)); - siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n", + siw_dbg_pd(pd, "[0x%p, 0x%p] STag=0x%08x\n", (void *)(uintptr_t)mem->va, (void *)(uintptr_t)(mem->va + mem->len), mem->stag); diff --git a/drivers/infiniband/sw/siw/siw_mem.h b/drivers/infiniband/sw/siw/siw_mem.h index e74cfcd6dbc1..8e769d30e2ac 100644 --- a/drivers/infiniband/sw/siw/siw_mem.h +++ b/drivers/infiniband/sw/siw/siw_mem.h @@ -12,7 +12,6 @@ void siw_umem_release(struct siw_umem *umem); struct siw_pbl *siw_pbl_alloc(u32 num_buf); dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx); struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index); -int siw_mem_add(struct siw_device *sdev, struct siw_mem *m); int siw_invalidate_stag(struct ib_pd *pd, u32 stag); int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr, enum ib_access_flags perms, int len); diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c index da92cfa2073d..c1e6e7d6e32f 100644 --- a/drivers/infiniband/sw/siw/siw_qp.c +++ b/drivers/infiniband/sw/siw/siw_qp.c @@ -226,33 +226,6 @@ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) return 0; } -static int siw_qp_enable_crc(struct siw_qp *qp) -{ - struct siw_rx_stream *c_rx = &qp->rx_stream; - struct siw_iwarp_tx *c_tx = &qp->tx_ctx; - int size; - - if (siw_crypto_shash == NULL) - return -ENOENT; - - size = crypto_shash_descsize(siw_crypto_shash) + - sizeof(struct shash_desc); - - c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); - c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL); - if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) { - kfree(c_tx->mpa_crc_hd); - kfree(c_rx->mpa_crc_hd); - c_tx->mpa_crc_hd = NULL; - c_rx->mpa_crc_hd = NULL; - return -ENOMEM; - } - c_tx->mpa_crc_hd->tfm = siw_crypto_shash; - c_rx->mpa_crc_hd->tfm = siw_crypto_shash; - - return 0; -} - /* * Send a non signalled READ or WRITE to peer side as negotiated * with MPAv2 P2P setup protocol. The work request is only created @@ -583,20 +556,15 @@ void siw_send_terminate(struct siw_qp *qp) term->ctrl.mpa_len = cpu_to_be16(len_terminate - (MPA_HDR_SIZE + MPA_CRC_SIZE)); - if (qp->tx_ctx.mpa_crc_hd) { - crypto_shash_init(qp->tx_ctx.mpa_crc_hd); - if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, - (u8 *)iov[0].iov_base, - iov[0].iov_len)) - goto out; - + if (qp->tx_ctx.mpa_crc_enabled) { + siw_crc_init(&qp->tx_ctx.mpa_crc); + siw_crc_update(&qp->tx_ctx.mpa_crc, + iov[0].iov_base, iov[0].iov_len); if (num_frags == 3) { - if (crypto_shash_update(qp->tx_ctx.mpa_crc_hd, - (u8 *)iov[1].iov_base, - iov[1].iov_len)) - goto out; + siw_crc_update(&qp->tx_ctx.mpa_crc, + iov[1].iov_base, iov[1].iov_len); } - crypto_shash_final(qp->tx_ctx.mpa_crc_hd, (u8 *)&crc); + siw_crc_final(&qp->tx_ctx.mpa_crc, (u8 *)&crc); } rv = kernel_sendmsg(s, &msg, iov, num_frags, len_terminate); @@ -604,7 +572,6 @@ void siw_send_terminate(struct siw_qp *qp) rv == len_terminate ? "success" : "failure", __rdmap_term_layer(term), __rdmap_term_etype(term), __rdmap_term_ecode(term), rv); -out: kfree(term); kfree(err_hdr); } @@ -643,9 +610,10 @@ static int siw_qp_nextstate_from_idle(struct siw_qp *qp, switch (attrs->state) { case SIW_QP_STATE_RTS: if (attrs->flags & SIW_MPA_CRC) { - rv = siw_qp_enable_crc(qp); - if (rv) - break; + siw_crc_init(&qp->tx_ctx.mpa_crc); + qp->tx_ctx.mpa_crc_enabled = true; + siw_crc_init(&qp->rx_stream.mpa_crc); + qp->rx_stream.mpa_crc_enabled = true; } if (!(mask & SIW_QP_ATTR_LLP_HANDLE)) { siw_dbg_qp(qp, "no socket\n"); diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index ed4fc39718b4..a10820e33887 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -38,7 +38,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, p = siw_get_upage(umem, dest_addr); if (unlikely(!p)) { - pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n", + pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n", __func__, qp_id(rx_qp(srx)), (void *)(uintptr_t)dest_addr, (void *)(uintptr_t)umem->fp_addr); @@ -51,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, pg_off = dest_addr & ~PAGE_MASK; bytes = min(len, (int)PAGE_SIZE - pg_off); - siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes); + siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes); dest = kmap_atomic(p); rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off, @@ -67,10 +67,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem, return -EFAULT; } - if (srx->mpa_crc_hd) { + if (srx->mpa_crc_enabled) { if (rdma_is_kernel_res(&rx_qp(srx)->base_qp.res)) { - crypto_shash_update(srx->mpa_crc_hd, - (u8 *)(dest + pg_off), bytes); + siw_crc_update(&srx->mpa_crc, dest + pg_off, + bytes); kunmap_atomic(dest); } else { kunmap_atomic(dest); @@ -105,17 +105,17 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len) { int rv; - siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len); + siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len); rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len); if (unlikely(rv)) { - pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n", + pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n", qp_id(rx_qp(srx)), __func__, len, kva, rv); return rv; } - if (srx->mpa_crc_hd) - crypto_shash_update(srx->mpa_crc_hd, (u8 *)kva, len); + if (srx->mpa_crc_enabled) + siw_crc_update(&srx->mpa_crc, kva, len); srx->skb_offset += len; srx->skb_copied += len; @@ -966,16 +966,16 @@ static int siw_get_trailer(struct siw_qp *qp, struct siw_rx_stream *srx) if (srx->fpdu_part_rem) return -EAGAIN; - if (!srx->mpa_crc_hd) + if (!srx->mpa_crc_enabled) return 0; if (srx->pad) - crypto_shash_update(srx->mpa_crc_hd, tbuf, srx->pad); + siw_crc_update(&srx->mpa_crc, tbuf, srx->pad); /* * CRC32 is computed, transmitted and received directly in NBO, * so there's never a reason to convert byte order. */ - crypto_shash_final(srx->mpa_crc_hd, (u8 *)&crc_own); + siw_crc_final(&srx->mpa_crc, (u8 *)&crc_own); crc_in = (__force __wsum)srx->trailer.crc; if (unlikely(crc_in != crc_own)) { @@ -1093,13 +1093,12 @@ static int siw_get_hdr(struct siw_rx_stream *srx) * (tagged/untagged). E.g., a WRITE can get intersected by a SEND, * but not by a READ RESPONSE etc. */ - if (srx->mpa_crc_hd) { + if (srx->mpa_crc_enabled) { /* * Restart CRC computation */ - crypto_shash_init(srx->mpa_crc_hd); - crypto_shash_update(srx->mpa_crc_hd, (u8 *)c_hdr, - srx->fpdu_part_rcvd); + siw_crc_init(&srx->mpa_crc); + siw_crc_update(&srx->mpa_crc, c_hdr, srx->fpdu_part_rcvd); } if (frx->more_ddp_segs) { frx->first_ddp_seg = 0; diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c index 64ad9e0895bd..6432bce7d083 100644 --- a/drivers/infiniband/sw/siw/siw_qp_tx.c +++ b/drivers/infiniband/sw/siw/siw_qp_tx.c @@ -248,10 +248,8 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) /* * Do complete CRC if enabled and short packet */ - if (c_tx->mpa_crc_hd && - crypto_shash_digest(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt, - c_tx->ctrl_len, (u8 *)crc) != 0) - return -EINVAL; + if (c_tx->mpa_crc_enabled) + siw_crc_oneshot(&c_tx->pkt, c_tx->ctrl_len, (u8 *)crc); c_tx->ctrl_len += MPA_CRC_SIZE; return PKT_COMPLETE; @@ -331,6 +329,8 @@ static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, msg.msg_flags &= ~MSG_MORE; tcp_rate_check_app_limited(sk); + if (!sendpage_ok(page[i])) + msg.msg_flags &= ~MSG_SPLICE_PAGES; bvec_set_page(&bvec, page[i], bytes, offset); iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, size); @@ -480,9 +480,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) iov[seg].iov_len = sge_len; if (do_crc) - crypto_shash_update(c_tx->mpa_crc_hd, - iov[seg].iov_base, - sge_len); + siw_crc_update(&c_tx->mpa_crc, + iov[seg].iov_base, sge_len); sge_off += sge_len; data_len -= sge_len; seg++; @@ -514,15 +513,14 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) iov[seg].iov_len = plen; if (do_crc) - crypto_shash_update( - c_tx->mpa_crc_hd, + siw_crc_update( + &c_tx->mpa_crc, iov[seg].iov_base, plen); } else if (do_crc) { kaddr = kmap_local_page(p); - crypto_shash_update(c_tx->mpa_crc_hd, - kaddr + fp_off, - plen); + siw_crc_update(&c_tx->mpa_crc, + kaddr + fp_off, plen); kunmap_local(kaddr); } } else { @@ -534,10 +532,9 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) page_array[seg] = ib_virt_dma_to_page(va); if (do_crc) - crypto_shash_update( - c_tx->mpa_crc_hd, - ib_virt_dma_to_ptr(va), - plen); + siw_crc_update(&c_tx->mpa_crc, + ib_virt_dma_to_ptr(va), + plen); } sge_len -= plen; @@ -574,14 +571,14 @@ sge_done: if (c_tx->pad) { *(u32 *)c_tx->trailer.pad = 0; if (do_crc) - crypto_shash_update(c_tx->mpa_crc_hd, - (u8 *)&c_tx->trailer.crc - c_tx->pad, - c_tx->pad); + siw_crc_update(&c_tx->mpa_crc, + (u8 *)&c_tx->trailer.crc - c_tx->pad, + c_tx->pad); } - if (!c_tx->mpa_crc_hd) + if (!c_tx->mpa_crc_enabled) c_tx->trailer.crc = 0; else if (do_crc) - crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)&c_tx->trailer.crc); + siw_crc_final(&c_tx->mpa_crc, (u8 *)&c_tx->trailer.crc); data_len = c_tx->bytes_unsent; @@ -734,10 +731,9 @@ static void siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe) /* * Init MPA CRC computation */ - if (c_tx->mpa_crc_hd) { - crypto_shash_init(c_tx->mpa_crc_hd); - crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt, - c_tx->ctrl_len); + if (c_tx->mpa_crc_enabled) { + siw_crc_init(&c_tx->mpa_crc); + siw_crc_update(&c_tx->mpa_crc, &c_tx->pkt, c_tx->ctrl_len); c_tx->do_crc = 1; } } diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index ecf0444666b4..2b2a7b8e93b0 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -171,21 +171,28 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, int siw_query_port(struct ib_device *base_dev, u32 port, struct ib_port_attr *attr) { - struct siw_device *sdev = to_siw_dev(base_dev); + struct net_device *ndev; int rv; memset(attr, 0, sizeof(*attr)); rv = ib_get_eth_speed(base_dev, port, &attr->active_speed, &attr->active_width); + if (rv) + return rv; + + ndev = ib_device_get_netdev(base_dev, SIW_PORT); + if (!ndev) + return -ENODEV; + attr->gid_tbl_len = 1; attr->max_msg_sz = -1; - attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); - attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); - attr->phys_state = sdev->state == IB_PORT_ACTIVE ? + attr->max_mtu = ib_mtu_int_to_enum(ndev->max_mtu); + attr->active_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu)); + attr->state = ib_get_curr_port_state(ndev); + attr->phys_state = attr->state == IB_PORT_ACTIVE ? IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP; - attr->state = sdev->state; /* * All zero * @@ -199,6 +206,7 @@ int siw_query_port(struct ib_device *base_dev, u32 port, * attr->subnet_timeout = 0; * attr->init_type_repy = 0; */ + dev_put(ndev); return rv; } @@ -505,21 +513,24 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) { struct siw_qp *qp; - struct siw_device *sdev; + struct net_device *ndev; - if (base_qp && qp_attr && qp_init_attr) { + if (base_qp && qp_attr && qp_init_attr) qp = to_siw_qp(base_qp); - sdev = to_siw_dev(base_qp->device); - } else { + else return -EINVAL; - } + + ndev = ib_device_get_netdev(base_qp->device, SIW_PORT); + if (!ndev) + return -ENODEV; + qp_attr->qp_state = siw_qp_state_to_ib_qp_state[qp->attrs.state]; qp_attr->cap.max_inline_data = SIW_MAX_INLINE; qp_attr->cap.max_send_wr = qp->attrs.sq_size; qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges; qp_attr->cap.max_recv_wr = qp->attrs.rq_size; qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges; - qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu); + qp_attr->path_mtu = ib_mtu_int_to_enum(READ_ONCE(ndev->mtu)); qp_attr->max_rd_atomic = qp->attrs.irq_size; qp_attr->max_dest_rd_atomic = qp->attrs.orq_size; @@ -534,6 +545,7 @@ int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr, qp_init_attr->cap = qp_attr->cap; + dev_put(ndev); return 0; } @@ -619,9 +631,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata) } up_write(&qp->state_lock); - kfree(qp->tx_ctx.mpa_crc_hd); - kfree(qp->rx_stream.mpa_crc_hd); - qp->scq = qp->rcq = NULL; siw_qp_put(qp); @@ -927,7 +936,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr, rv = -EINVAL; break; } - siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n", + siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n", sqe->opcode, sqe->flags, (void *)(uintptr_t)sqe->id); @@ -1093,7 +1102,7 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr, siw_dbg_qp(qp, "error %d\n", rv); *bad_wr = wr; } - return rv > 0 ? 0 : rv; + return rv; } int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) @@ -1124,12 +1133,13 @@ int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata) * * @base_cq: CQ as allocated by RDMA midlayer * @attr: Initial CQ attributes - * @udata: relates to user context + * @attrs: uverbs bundle */ int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, - struct ib_udata *udata) + struct uverbs_attr_bundle *attrs) { + struct ib_udata *udata = &attrs->driver_udata; struct siw_device *sdev = to_siw_dev(base_cq->device); struct siw_cq *cq = to_siw_cq(base_cq); int rv, size = attr->cqe; @@ -1322,7 +1332,7 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, struct siw_device *sdev = to_siw_dev(pd->device); int rv; - siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n", + siw_dbg_pd(pd, "start: 0x%p, va: 0x%p, len: %llu\n", (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va, (unsigned long long)len); @@ -1515,7 +1525,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, mem->len = base_mr->length; mem->va = base_mr->iova; siw_dbg_mem(mem, - "%llu bytes, start 0x%pK, %u SLE to %u entries\n", + "%llu bytes, start 0x%p, %u SLE to %u entries\n", mem->len, (void *)(uintptr_t)mem->va, num_sle, pbl->num_buf); } diff --git a/drivers/infiniband/sw/siw/siw_verbs.h b/drivers/infiniband/sw/siw/siw_verbs.h index 4b57a4fb7237..1f1a305540af 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.h +++ b/drivers/infiniband/sw/siw/siw_verbs.h @@ -43,7 +43,7 @@ int siw_get_port_immutable(struct ib_device *base_dev, u32 port, int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, struct ib_udata *udata); int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr, - struct ib_udata *udata); + struct uverbs_attr_bundle *attrs); int siw_query_port(struct ib_device *base_dev, u32 port, struct ib_port_attr *attr); int siw_query_gid(struct ib_device *base_dev, u32 port, int idx, |