diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_roce.c')
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_roce.c | 390 |
1 files changed, 181 insertions, 209 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c index fb7c2d1562ae..134ecfca96a3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_roce.c +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -1,34 +1,9 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ + #include <linux/types.h> #include <asm/byteorder.h> #include <linux/bitops.h> @@ -44,8 +19,10 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> +#include <linux/if_vlan.h> #include "qed.h" #include "qed_cxt.h" +#include "qed_dcbx.h" #include "qed_hsi.h" #include "qed_hw.h" #include "qed_init_ops.h" @@ -60,26 +37,30 @@ static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); -static int -qed_roce_async_event(struct qed_hwfn *p_hwfn, - u8 fw_event_code, - u16 echo, union event_ring_data *data, u8 fw_return_code) +static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) { + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + union rdma_eqe_data *rdata = &data->rdma_data; + if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { - u16 icid = - (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid); + u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid); /* icid release in this async event can occur only if the icid * was offloaded to the FW. In case it wasn't offloaded this is * handled in qed_roce_sp_destroy_qp. */ qed_roce_free_real_icid(p_hwfn, icid); - } else { - struct qed_rdma_events *events = &p_hwfn->p_rdma_info->events; + } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || + fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { + u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo); - events->affiliated_event(p_hwfn->p_rdma_info->events.context, - fw_event_code, - (void *)&data->rdma_data.async_handle); + events.affiliated_event(events.context, fw_event_code, + &srq_id); + } else { + events.affiliated_event(events.context, fw_event_code, + (void *)&rdata->async_handle); } return 0; @@ -95,14 +76,21 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn) * We delay for a short while if an async destroy QP is still expected. * Beyond the added delay we clear the bitmap anyway. */ - while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) { + while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) { + /* If the HW device is during recovery, all resources are + * immediately reset without receiving a per-cid indication + * from HW. In this case we don't expect the cid bitmap to be + * cleared. + */ + if (p_hwfn->cdev->recov_in_prog) + return; + msleep(100); if (wait_count++ > 20) { DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); break; } } - qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE); } static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, @@ -129,26 +117,19 @@ static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) { - enum roce_flavor flavor; - switch (roce_mode) { case ROCE_V1: - flavor = PLAIN_ROCE; - break; + return PLAIN_ROCE; case ROCE_V2_IPV4: - flavor = RROCE_IPV4; - break; + return RROCE_IPV4; case ROCE_V2_IPV6: - flavor = ROCE_V2_IPV6; - break; + return RROCE_IPV6; default: - flavor = MAX_ROCE_MODE; - break; + return MAX_ROCE_FLAVOR; } - return flavor; } -void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) +static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) { spin_lock_bh(&p_hwfn->p_rdma_info->lock); qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); @@ -222,16 +203,36 @@ static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) spin_unlock_bh(&p_hwfn->p_rdma_info->lock); } +static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + u8 pri, tc = 0; + + if (qp->vlan_id) { + pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + tc = qed_dcbx_get_priority_tc(p_hwfn, pri); + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u tc: %u (vlan priority %s)\n", + qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); + + return tc; +} + static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_create_qp_resp_ramrod_data *p_ramrod; + u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; - enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; - u16 regular_latency_queue; enum protocol_type proto; + u32 flags = 0; int rc; + u8 tc; + + if (!qp->has_resp) + return 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); @@ -259,41 +260,34 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - p_ramrod = &p_ent->ramrod.roce_create_qp_resp; - - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); - roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); - - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, qp->fmr_and_reserved_lkey); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, qp->min_rnr_nak_timer); + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, + qed_rdma_is_xrc_qp(qp)); + + p_ramrod = &p_ent->ramrod.roce_create_qp_resp; + p_ramrod->flags = cpu_to_le32(flags); p_ramrod->max_ird = qp->max_rd_atomic_resp; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; @@ -308,26 +302,32 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); - p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); - p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); - - regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - + p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); + + tc = qed_roce_get_qp_tc(p_hwfn, qp); + regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); + low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u pqs: regular_latency %u low_latency %u\n", + qp->icid, regular_latency_queue - CM_TX_PQ_BASE, + low_latency_queue - CM_TX_PQ_BASE); p_ramrod->regular_latency_phy_queue = cpu_to_le16(regular_latency_queue); p_ramrod->low_latency_phy_queue = - cpu_to_le16(regular_latency_queue); + cpu_to_le16(low_latency_queue); p_ramrod->dpi = cpu_to_le16(qp->dpi); qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); - p_ramrod->udp_src_port = qp->udp_src_port; + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); @@ -336,11 +336,6 @@ static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, qp->stats_queue; rc = qed_spq_post(p_hwfn, p_ent, NULL); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "rc = %d regular physical queue = 0x%x\n", rc, - regular_latency_queue); - if (rc) goto err; @@ -366,12 +361,16 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { struct roce_create_qp_req_ramrod_data *p_ramrod; + u16 regular_latency_queue, low_latency_queue; struct qed_sp_init_data init_data; - enum roce_flavor roce_flavor; struct qed_spq_entry *p_ent; - u16 regular_latency_queue; enum protocol_type proto; + u16 flags = 0; int rc; + u8 tc; + + if (!qp->has_req) + return 0; DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); @@ -400,28 +399,30 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - p_ramrod = &p_ent->ramrod.roce_create_qp_req; - - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); - roce_flavor = qed_roce_mode_to_flavor(qp->roce_mode); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, roce_flavor); - - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, qp->fmr_and_reserved_lkey); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, + qp->signal_all); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, + qp->retry_cnt); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, + qed_rdma_is_xrc_qp(qp)); + + p_ramrod = &p_ent->ramrod.roce_create_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + + SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, + qp->edpm_mode); + p_ramrod->max_ord = qp->max_rd_atomic_req; p_ramrod->traffic_class = qp->traffic_class_tos; p_ramrod->hop_limit = qp->hop_limit_ttl; @@ -437,34 +438,36 @@ static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); - p_ramrod->qp_handle_for_async.hi = cpu_to_le32(qp->qp_handle_async.hi); - p_ramrod->qp_handle_for_async.lo = cpu_to_le32(qp->qp_handle_async.lo); - p_ramrod->qp_handle_for_cqe.hi = cpu_to_le32(qp->qp_handle.hi); - p_ramrod->qp_handle_for_cqe.lo = cpu_to_le32(qp->qp_handle.lo); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); - regular_latency_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); - + tc = qed_roce_get_qp_tc(p_hwfn, qp); + regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); + low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u pqs: regular_latency %u low_latency %u\n", + qp->icid, regular_latency_queue - CM_TX_PQ_BASE, + low_latency_queue - CM_TX_PQ_BASE); p_ramrod->regular_latency_phy_queue = cpu_to_le16(regular_latency_queue); p_ramrod->low_latency_phy_queue = - cpu_to_le16(regular_latency_queue); + cpu_to_le16(low_latency_queue); p_ramrod->dpi = cpu_to_le16(qp->dpi); qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); - p_ramrod->udp_src_port = qp->udp_src_port; + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; rc = qed_spq_post(p_hwfn, p_ent, NULL); - - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); - if (rc) goto err; @@ -491,8 +494,12 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, struct roce_modify_qp_resp_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags = 0; int rc; + if (!qp->has_resp) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (move_to_err && !qp->resp_offloaded) @@ -512,53 +519,43 @@ static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, return rc; } - p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; - - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, qp->incoming_rdma_read_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, qp->incoming_rdma_write_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, qp->incoming_atomic_en); - SET_FIELD(p_ramrod->flags, - ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, qp->e2e_flow_control_en); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); + p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; + p_ramrod->flags = cpu_to_le16(flags); + p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, @@ -585,8 +582,12 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, struct roce_modify_qp_req_ramrod_data *p_ramrod; struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; + u16 flags = 0; int rc; + if (!qp->has_req) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (move_to_err && !(qp->req_offloaded)) @@ -606,54 +607,44 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, return rc; } - p_ramrod = &p_ent->ramrod.roce_modify_qp_req; - - p_ramrod->flags = 0; + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, move_to_err); + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, + !!move_to_sqd); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, move_to_sqd); - - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, qp->sqd_async); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, GET_FIELD(modify_flags, QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); - SET_FIELD(p_ramrod->flags, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); + p_ramrod = &p_ent->ramrod.roce_modify_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + p_ramrod->fields = 0; SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); - - SET_FIELD(p_ramrod->fields, - ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, qp->rnr_retry_cnt); p_ramrod->max_ord = qp->max_rd_atomic_req; @@ -672,7 +663,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, - u32 *num_invalidated_mw, u32 *cq_prod) { struct roce_destroy_qp_resp_output_params *p_ramrod_res; @@ -682,9 +672,12 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, dma_addr_t ramrod_res_phys; int rc; - DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + if (!qp->has_resp) { + *cq_prod = 0; + return 0; + } - *num_invalidated_mw = 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); *cq_prod = qp->cq_prod; if (!qp->resp_offloaded) { @@ -715,15 +708,16 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; - p_ramrod_res = (struct roce_destroy_qp_resp_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), - &ramrod_res_phys, GFP_KERNEL); + p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_ramrod_res), + &ramrod_res_phys, GFP_KERNEL); if (!p_ramrod_res) { rc = -ENOMEM; DP_NOTICE(p_hwfn, "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); return rc; } @@ -733,7 +727,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw); *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); qp->cq_prod = *cq_prod; @@ -755,8 +748,7 @@ err: } static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, - struct qed_rdma_qp *qp, - u32 *num_bound_mw) + struct qed_rdma_qp *qp) { struct roce_destroy_qp_req_output_params *p_ramrod_res; struct roce_destroy_qp_req_ramrod_data *p_ramrod; @@ -765,13 +757,15 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, dma_addr_t ramrod_res_phys; int rc = -ENOMEM; + if (!qp->has_req) + return 0; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); if (!qp->req_offloaded) return 0; - p_ramrod_res = (struct roce_destroy_qp_req_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), &ramrod_res_phys, GFP_KERNEL); if (!p_ramrod_res) { @@ -798,8 +792,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, if (rc) goto err; - *num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw); - /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ dma_free_coherent(&p_hwfn->cdev->pdev->dev, qp->orq_num_pages * RDMA_RING_PAGE_SIZE, @@ -848,15 +840,15 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, if (!(qp->resp_offloaded)) { DP_NOTICE(p_hwfn, - "The responder's qp should be offloded before requester's\n"); + "The responder's qp should be offloaded before requester's\n"); return -EINVAL; } /* Send a query responder ramrod to FW to get RQ-PSN and state */ - p_resp_ramrod_res = (struct roce_query_qp_resp_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - sizeof(*p_resp_ramrod_res), - &resp_ramrod_res_phys, GFP_KERNEL); + p_resp_ramrod_res = + dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_resp_ramrod_res), + &resp_ramrod_res_phys, GFP_KERNEL); if (!p_resp_ramrod_res) { DP_NOTICE(p_hwfn, "qed query qp failed: cannot allocate memory (ramrod)\n"); @@ -881,7 +873,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, goto err_resp; out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); - rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag), + rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags), ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), @@ -901,8 +893,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, } /* Send a query requester ramrod to FW to get SQ-PSN and state */ - p_req_ramrod_res = (struct roce_query_qp_req_output_params *) - dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), &req_ramrod_res_phys, GFP_KERNEL); @@ -959,8 +950,6 @@ err_resp: int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) { - u32 num_invalidated_mw = 0; - u32 num_bound_mw = 0; u32 cq_prod; int rc; @@ -975,22 +964,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) return rc; /* Send destroy requester ramrod */ - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); if (rc) return rc; - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } } return 0; @@ -1001,7 +982,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, enum qed_roce_qp_state prev_state, struct qed_rdma_modify_qp_in_params *params) { - u32 num_invalidated_mw = 0, num_bound_mw = 0; int rc = 0; /* Perform additional operations according to the current state and the @@ -1081,7 +1061,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, /* Send destroy responder ramrod */ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, - &num_invalidated_mw, &cq_prod); if (rc) @@ -1089,14 +1068,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, qp->cq_prod = cq_prod; - rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, - &num_bound_mw); - - if (num_invalidated_mw != num_bound_mw) { - DP_NOTICE(p_hwfn, - "number of invalidate memory windows is different from bounded ones\n"); - return -EINVAL; - } + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); } |
