diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_cxt.c')
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_cxt.c | 170 |
1 files changed, 92 insertions, 78 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7b76667acaba..33f4f58ee51c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -1,33 +1,7 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> @@ -80,27 +54,27 @@ /* connection context union */ union conn_context { - struct e4_core_conn_context core_ctx; - struct e4_eth_conn_context eth_ctx; - struct e4_iscsi_conn_context iscsi_ctx; - struct e4_fcoe_conn_context fcoe_ctx; - struct e4_roce_conn_context roce_ctx; + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; + struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; + struct roce_conn_context roce_ctx; }; /* TYPE-0 task context - iSCSI, FCOE */ union type0_task_context { - struct e4_iscsi_task_context iscsi_ctx; - struct e4_fcoe_task_context fcoe_ctx; + struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; }; /* TYPE-1 task context - ROCE */ union type1_task_context { - struct e4_rdma_task_context roce_ctx; + struct rdma_task_context roce_ctx; }; struct src_ent { - u8 opaque[56]; - u64 next; + __u8 opaque[56]; + __be64 next; }; #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ @@ -120,14 +94,14 @@ struct src_ent { static bool src_proto(enum protocol_type type) { - return type == PROTOCOLID_ISCSI || + return type == PROTOCOLID_TCP_ULP || type == PROTOCOLID_FCOE || type == PROTOCOLID_IWARP; } static bool tm_cid_proto(enum protocol_type type) { - return type == PROTOCOLID_ISCSI || + return type == PROTOCOLID_TCP_ULP || type == PROTOCOLID_FCOE || type == PROTOCOLID_ROCE || type == PROTOCOLID_IWARP; @@ -271,7 +245,7 @@ static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; } - iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->vf_cids = vf_cids; iids->tids += vf_tids * p_mngr->vf_count; DP_VERBOSE(p_hwfn, QED_MSG_ILT, @@ -465,6 +439,20 @@ static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk) return p_blk; } +static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 cli_idx, blk_idx; + + for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) { + for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++) + clients[cli_idx].pf_blks[blk_idx].total_size = 0; + + for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++) + clients[cli_idx].vf_blks[blk_idx].total_size = 0; + } +} + int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) { struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; @@ -484,6 +472,11 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); + /* Reset all ILT blocks at the beginning of ILT computing in order + * to prevent memory allocation for irrelevant blocks afterwards. + */ + qed_cxt_ilt_blk_reset(p_hwfn); + DP_VERBOSE(p_hwfn, QED_MSG_ILT, "hwfn [%d] - Set context manager starting line to be 0x%08x\n", p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); @@ -940,6 +933,7 @@ static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) p_dma->virt_addr = NULL; } kfree(p_mngr->ilt_shadow); + p_mngr->ilt_shadow = NULL; } static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, @@ -1043,12 +1037,12 @@ static void qed_cid_map_free(struct qed_hwfn *p_hwfn) u32 type, vf; for (type = 0; type < MAX_CONN_TYPES; type++) { - kfree(p_mngr->acquired[type].cid_map); + bitmap_free(p_mngr->acquired[type].cid_map); p_mngr->acquired[type].max_count = 0; p_mngr->acquired[type].start_cid = 0; for (vf = 0; vf < MAX_NUM_VFS; vf++) { - kfree(p_mngr->acquired_vf[type][vf].cid_map); + bitmap_free(p_mngr->acquired_vf[type][vf].cid_map); p_mngr->acquired_vf[type][vf].max_count = 0; p_mngr->acquired_vf[type][vf].start_cid = 0; } @@ -1061,15 +1055,10 @@ qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn, u32 cid_start, u32 cid_count, struct qed_cid_acquired_map *p_map) { - u32 size; - if (!cid_count) return 0; - size = DIV_ROUND_UP(cid_count, - sizeof(unsigned long) * BITS_PER_BYTE) * - sizeof(unsigned long); - p_map->cid_map = kzalloc(size, GFP_KERNEL); + p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL); if (!p_map->cid_map) return -ENOMEM; @@ -1223,7 +1212,6 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) struct qed_cid_acquired_map *p_map; struct qed_conn_type_cfg *p_cfg; int type; - u32 len; /* Reset acquired cids */ for (type = 0; type < MAX_CONN_TYPES; type++) { @@ -1232,11 +1220,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) p_cfg = &p_mngr->conn_cfg[type]; if (p_cfg->cid_count) { p_map = &p_mngr->acquired[type]; - len = DIV_ROUND_UP(p_map->max_count, - sizeof(unsigned long) * - BITS_PER_BYTE) * - sizeof(unsigned long); - memset(p_map->cid_map, 0, len); + bitmap_zero(p_map->cid_map, p_map->max_count); } if (!p_cfg->cids_per_vf) @@ -1244,11 +1228,7 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) for (vf = 0; vf < MAX_NUM_VFS; vf++) { p_map = &p_mngr->acquired_vf[type][vf]; - len = DIV_ROUND_UP(p_map->max_count, - sizeof(unsigned long) * - BITS_PER_BYTE) * - sizeof(unsigned long); - memset(p_map->cid_map, 0, len); + bitmap_zero(p_map->cid_map, p_map->max_count); } } } @@ -1654,9 +1634,9 @@ static void qed_src_init_pf(struct qed_hwfn *p_hwfn) ilog2(rounded_conn_num)); STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, - p_hwfn->p_cxt_mngr->first_free); + p_hwfn->p_cxt_mngr->src_t2.first_free); STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, - p_hwfn->p_cxt_mngr->last_free); + p_hwfn->p_cxt_mngr->src_t2.last_free); } /* Timers PF */ @@ -1989,8 +1969,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, enum protocol_type proto; if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { - DP_NOTICE(p_hwfn, - "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; } @@ -2053,7 +2033,7 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) rdma_tasks); /* no need for break since RoCE coexist with Ethernet */ } - /* fall through */ + fallthrough; case QED_PCI_ETH: { struct qed_eth_pf_params *p_params = @@ -2079,7 +2059,6 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) PROTOCOLID_FCOE, p_params->num_cons, 0); - qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, QED_CXT_FCOE_TID_SEG, 0, p_params->num_tasks, true); @@ -2097,13 +2076,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) if (p_params->num_cons && p_params->num_tasks) { qed_cxt_set_proto_cid_count(p_hwfn, - PROTOCOLID_ISCSI, + PROTOCOLID_TCP_ULP, p_params->num_cons, 0); - qed_cxt_set_proto_tid_count(p_hwfn, - PROTOCOLID_ISCSI, - QED_CXT_ISCSI_TID_SEG, + PROTOCOLID_TCP_ULP, + QED_CXT_TCP_ULP_TID_SEG, 0, p_params->num_tasks, true); @@ -2113,6 +2091,29 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) } break; } + case QED_PCI_NVMETCP: + { + struct qed_nvmetcp_pf_params *p_params; + + p_params = &p_hwfn->pf_params.nvmetcp_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_TCP_ULP, + p_params->num_cons, + 0); + qed_cxt_set_proto_tid_count(p_hwfn, + PROTOCOLID_TCP_ULP, + QED_CXT_TCP_ULP_TID_SEG, + 0, + p_params->num_tasks, + true); + } else { + DP_INFO(p_hwfn->cdev, + "NvmeTCP personality used without setting params!\n"); + } + break; + } default: return -EINVAL; } @@ -2136,8 +2137,9 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, seg = QED_CXT_FCOE_TID_SEG; break; case QED_PCI_ISCSI: - proto = PROTOCOLID_ISCSI; - seg = QED_CXT_ISCSI_TID_SEG; + case QED_PCI_NVMETCP: + proto = PROTOCOLID_TCP_ULP; + seg = QED_CXT_TCP_ULP_TID_SEG; break; default: return -EINVAL; @@ -2177,12 +2179,14 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, enum qed_cxt_elem_type elem_type, u32 iid) { u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; + struct tdif_task_context *tdif_context; struct qed_ilt_client_cfg *p_cli; struct qed_ilt_cli_blk *p_blk; struct qed_ptt *p_ptt; dma_addr_t p_phys; u64 ilt_hw_entry; void *p_virt; + u32 flags1; int rc = 0; switch (elem_type) { @@ -2209,8 +2213,8 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; break; default: - DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); - return -EINVAL; + DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type); + return -EOPNOTSUPP; } /* Calculate line in ilt */ @@ -2259,8 +2263,12 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, for (elem_i = 0; elem_i < elems_per_p; elem_i++) { elem = (union type1_task_context *)elem_start; - SET_FIELD(elem->roce_ctx.tdif_context.flags1, - TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); + tdif_context = &elem->roce_ctx.tdif_context; + + flags1 = le32_to_cpu(tdif_context->flags1); + SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); + tdif_context->flags1 = cpu_to_le32(flags1); + elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); } } @@ -2336,6 +2344,11 @@ qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, elem_size = SRQ_CXT_SIZE; p_blk = &p_cli->pf_blks[SRQ_BLK]; break; + case QED_ELEM_XRC_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = XRC_SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; case QED_ELEM_TASK: p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); @@ -2451,8 +2464,9 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, seg = QED_CXT_FCOE_TID_SEG; break; case QED_PCI_ISCSI: - proto = PROTOCOLID_ISCSI; - seg = QED_CXT_ISCSI_TID_SEG; + case QED_PCI_NVMETCP: + proto = PROTOCOLID_TCP_ULP; + seg = QED_CXT_TCP_ULP_TID_SEG; break; default: return -EINVAL; |
