diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_mcp.c')
| -rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_mcp.c | 920 |
1 files changed, 794 insertions, 126 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index e7f18e34ff0d..c7f497c36f66 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1,33 +1,7 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and /or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * Copyright (c) 2019-2020 Marvell International Ltd. */ #include <linux/types.h> @@ -43,22 +17,25 @@ #include "qed_cxt.h" #include "qed_dcbx.h" #include "qed_hsi.h" +#include "qed_mfw_hsi.h" #include "qed_hw.h" #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sriov.h" +#define GRCBASE_MCP 0xe00000 + #define QED_MCP_RESP_ITER_US 10 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ - qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ + qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \ _val) #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ - qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) + qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset))) #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ @@ -163,7 +140,7 @@ static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn, int qed_mcp_free(struct qed_hwfn *p_hwfn) { if (p_hwfn->mcp_info) { - struct qed_mcp_cmd_elem *p_cmd_elem, *p_tmp; + struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp; kfree(p_hwfn->mcp_info->mfw_mb_cur); kfree(p_hwfn->mcp_info->mfw_mb_shadow); @@ -272,6 +249,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) /* Initialize the MFW spinlock */ spin_lock_init(&p_info->cmd_lock); spin_lock_init(&p_info->link_lock); + spin_lock_init(&p_info->unload_lock); INIT_LIST_HEAD(&p_info->cmd_list); @@ -408,7 +386,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); /* Get the union data */ - if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) { + if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) { u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); @@ -434,7 +412,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, union_data_addr = p_hwfn->mcp_info->drv_mb_addr + offsetof(struct public_drv_mb, union_data); memset(&union_data, 0, sizeof(union_data)); - if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size) + if (p_mb_params->p_data_src && p_mb_params->data_src_size) memcpy(&union_data, p_mb_params->p_data_src, p_mb_params->data_src_size); qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, @@ -481,12 +459,11 @@ static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, static int _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, - struct qed_mcp_mb_params *p_mb_params, - u32 max_retries, u32 usecs) + struct qed_mcp_mb_params *p_mb_params) { - u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); struct qed_mcp_cmd_elem *p_cmd_elem; u16 seq_num; + u32 cnt = 0; int rc = 0; /* Wait until the mailbox is non-occupied */ @@ -510,12 +487,13 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) - msleep(msecs); + usleep_range(QED_MCP_RESP_ITER_US, + QED_MCP_RESP_ITER_US * 2); else - udelay(usecs); - } while (++cnt < max_retries); + udelay(QED_MCP_RESP_ITER_US); + } while (++cnt < QED_DRV_MB_MAX_RETRIES); - if (cnt >= max_retries) { + if (cnt >= QED_DRV_MB_MAX_RETRIES) { DP_NOTICE(p_hwfn, "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); @@ -542,9 +520,10 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, */ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) - msleep(msecs); + usleep_range(QED_MCP_RESP_ITER_US, + QED_MCP_RESP_ITER_US * 2); else - udelay(usecs); + udelay(QED_MCP_RESP_ITER_US); spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); @@ -558,9 +537,9 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, goto err; spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); - } while (++cnt < max_retries); + } while (++cnt < QED_DRV_MB_MAX_RETRIES); - if (cnt >= max_retries) { + if (cnt >= QED_DRV_MB_MAX_RETRIES) { DP_NOTICE(p_hwfn, "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", p_mb_params->cmd, p_mb_params->param); @@ -573,6 +552,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) qed_mcp_cmd_set_blocking(p_hwfn, true); + qed_hw_err_notify(p_hwfn, p_ptt, + QED_HW_ERR_MFW_RESP_FAIL, NULL); return -EAGAIN; } @@ -584,7 +565,8 @@ _qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", p_mb_params->mcp_resp, p_mb_params->mcp_param, - (cnt * usecs) / 1000, (cnt * usecs) % 1000); + (cnt * QED_MCP_RESP_ITER_US) / 1000, + (cnt * QED_MCP_RESP_ITER_US) % 1000); /* Clear the sequence number from the MFW response */ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; @@ -601,8 +583,6 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, struct qed_mcp_mb_params *p_mb_params) { size_t union_data_size = sizeof(union drv_union_data); - u32 max_retries = QED_DRV_MB_MAX_RETRIES; - u32 usecs = QED_MCP_RESP_ITER_US; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { @@ -626,21 +606,16 @@ static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, return -EINVAL; } - if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { - max_retries = DIV_ROUND_UP(max_retries, 1000); - usecs *= 1000; - } - - return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, - usecs); + return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params); } -int qed_mcp_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 cmd, - u32 param, - u32 *o_mcp_resp, - u32 *o_mcp_param) +static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + bool can_sleep) { struct qed_mcp_mb_params mb_params; int rc; @@ -648,6 +623,7 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, memset(&mb_params, 0, sizeof(mb_params)); mb_params.cmd = cmd; mb_params.param = param; + mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) @@ -659,6 +635,28 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, + o_mcp_resp, o_mcp_param, true)); +} + +int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, + o_mcp_resp, o_mcp_param, false)); +} + static int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -693,7 +691,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 cmd, u32 param, u32 *o_mcp_resp, - u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf) + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep) { struct qed_mcp_mb_params mb_params; u8 raw_data[MCP_DRV_NVM_BUF_LEN]; @@ -706,6 +705,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, /* Use the maximal value since the actual one is part of the response */ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; + if (b_can_sleep) + mb_params.flags = QED_MB_FLAG_CAN_SLEEP; rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); if (rc) @@ -760,34 +761,34 @@ static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn, return rc; } -#define CONFIG_QEDE_BITMAP_IDX BIT(0) -#define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1) -#define CONFIG_QEDR_BITMAP_IDX BIT(2) -#define CONFIG_QEDF_BITMAP_IDX BIT(4) -#define CONFIG_QEDI_BITMAP_IDX BIT(5) -#define CONFIG_QED_LL2_BITMAP_IDX BIT(6) +#define BITMAP_IDX_FOR_CONFIG_QEDE BIT(0) +#define BITMAP_IDX_FOR_CONFIG_QED_SRIOV BIT(1) +#define BITMAP_IDX_FOR_CONFIG_QEDR BIT(2) +#define BITMAP_IDX_FOR_CONFIG_QEDF BIT(4) +#define BITMAP_IDX_FOR_CONFIG_QEDI BIT(5) +#define BITMAP_IDX_FOR_CONFIG_QED_LL2 BIT(6) static u32 qed_get_config_bitmap(void) { u32 config_bitmap = 0x0; if (IS_ENABLED(CONFIG_QEDE)) - config_bitmap |= CONFIG_QEDE_BITMAP_IDX; + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDE; if (IS_ENABLED(CONFIG_QED_SRIOV)) - config_bitmap |= CONFIG_QED_SRIOV_BITMAP_IDX; + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_SRIOV; if (IS_ENABLED(CONFIG_QED_RDMA)) - config_bitmap |= CONFIG_QEDR_BITMAP_IDX; + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDR; if (IS_ENABLED(CONFIG_QED_FCOE)) - config_bitmap |= CONFIG_QEDF_BITMAP_IDX; + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDF; if (IS_ENABLED(CONFIG_QED_ISCSI)) - config_bitmap |= CONFIG_QEDI_BITMAP_IDX; + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDI; if (IS_ENABLED(CONFIG_QED_LL2)) - config_bitmap |= CONFIG_QED_LL2_BITMAP_IDX; + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_LL2; return config_bitmap; } @@ -938,7 +939,6 @@ enum qed_load_req_force { }; static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, - enum qed_load_req_force force_cmd, u8 *p_mfw_force_cmd) { @@ -966,7 +966,6 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, memset(&in_params, 0, sizeof(in_params)); in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT; - in_params.drv_ver_0 = QED_VERSION; in_params.drv_ver_1 = qed_get_config_bitmap(); in_params.fw_ver = STORM_FW_VERSION; rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); @@ -1070,10 +1069,36 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, + ¶m); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a LOAD_DONE command, rc = %d\n", rc); + return rc; + } + + /* Check if there is a DID mismatch between nvm-cfg/efuse */ + if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) + DP_NOTICE(p_hwfn, + "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); + + return 0; +} + +#define MFW_COMPLETION_MAX_ITER 5000 +#define MFW_COMPLETION_INTERVAL_MS 1 + int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_mcp_mb_params mb_params; + u32 cnt = MFW_COMPLETION_MAX_ITER; u32 wol_param; + int rc; switch (p_hwfn->cdev->wol_config) { case QED_OV_WOL_DISABLED: @@ -1086,7 +1111,7 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_NOTICE(p_hwfn, "Unknown WoL configuration %02x\n", p_hwfn->cdev->wol_config); - /* Fallthrough */ + fallthrough; case QED_OV_WOL_DEFAULT: wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; } @@ -1096,7 +1121,23 @@ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) mb_params.param = wol_param; mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; - return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + spin_lock_bh(&p_hwfn->mcp_info->unload_lock); + set_bit(QED_MCP_BYPASS_PROC_BIT, + &p_hwfn->mcp_info->mcp_handling_status); + spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); + + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + + while (test_bit(QED_MCP_IN_PROCESSING_BIT, + &p_hwfn->mcp_info->mcp_handling_status) && --cnt) + msleep(MFW_COMPLETION_INTERVAL_MS); + + if (!cnt) + DP_NOTICE(p_hwfn, + "Failed to wait MFW event completion after %d msec\n", + MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS); + + return rc; } int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -1366,7 +1407,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, break; case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: p_link->full_duplex = false; - /* Fall-through */ + fallthrough; case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: p_link->speed = 1000; break; @@ -1447,6 +1488,25 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (status & LINK_STATUS_FEC_MODE_MASK) { + case LINK_STATUS_FEC_MODE_NONE: + p_link->fec_active = QED_FEC_MODE_NONE; + break; + case LINK_STATUS_FEC_MODE_FIRECODE_CL74: + p_link->fec_active = QED_FEC_MODE_FIRECODE; + break; + case LINK_STATUS_FEC_MODE_RS_CL91: + p_link->fec_active = QED_FEC_MODE_RS; + break; + default: + p_link->fec_active = QED_FEC_MODE_AUTO; + } + } else { + p_link->fec_active = QED_FEC_MODE_UNSUPPORTED; + } + qed_link_update(p_hwfn, p_ptt); out: spin_unlock_bh(&p_hwfn->mcp_info->link_lock); @@ -1457,8 +1517,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; struct qed_mcp_mb_params mb_params; struct eth_phy_cfg phy_cfg; + u32 cmd, fec_bit = 0; + u32 val, ext_speed; int rc = 0; - u32 cmd; /* Set the shmem configuration according to params */ memset(&phy_cfg, 0, sizeof(phy_cfg)); @@ -1490,19 +1551,87 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) EEE_TX_TIMER_USEC_MASK; } + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + if (params->fec & QED_FEC_MODE_NONE) + fec_bit |= FEC_FORCE_MODE_NONE; + else if (params->fec & QED_FEC_MODE_FIRECODE) + fec_bit |= FEC_FORCE_MODE_FIRECODE; + else if (params->fec & QED_FEC_MODE_RS) + fec_bit |= FEC_FORCE_MODE_RS; + else if (params->fec & QED_FEC_MODE_AUTO) + fec_bit |= FEC_FORCE_MODE_AUTO; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); + } + + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = 0; + if (params->ext_speed.autoneg) + ext_speed |= ETH_EXT_SPEED_NONE; + + val = params->ext_speed.forced_speed; + if (val & QED_EXT_SPEED_1G) + ext_speed |= ETH_EXT_SPEED_1G; + if (val & QED_EXT_SPEED_10G) + ext_speed |= ETH_EXT_SPEED_10G; + if (val & QED_EXT_SPEED_25G) + ext_speed |= ETH_EXT_SPEED_25G; + if (val & QED_EXT_SPEED_40G) + ext_speed |= ETH_EXT_SPEED_40G; + if (val & QED_EXT_SPEED_50G_R) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_50G_R2) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R2) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_100G_P4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_P4; + + SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED, + ext_speed); + + ext_speed = 0; + + val = params->ext_speed.advertised_speeds; + if (val & QED_EXT_SPEED_MASK_1G) + ext_speed |= ETH_EXT_ADV_SPEED_1G; + if (val & QED_EXT_SPEED_MASK_10G) + ext_speed |= ETH_EXT_ADV_SPEED_10G; + if (val & QED_EXT_SPEED_MASK_25G) + ext_speed |= ETH_EXT_ADV_SPEED_25G; + if (val & QED_EXT_SPEED_MASK_40G) + ext_speed |= ETH_EXT_ADV_SPEED_40G; + if (val & QED_EXT_SPEED_MASK_50G_R) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_MASK_50G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_MASK_100G_P4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4; + + phy_cfg.extended_speed |= ext_speed; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE, + params->ext_fec_mode); + } + p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", - phy_cfg.speed, - phy_cfg.pause, - phy_cfg.adv_speed, - phy_cfg.loopback_mode, - phy_cfg.feature_config_flags); + "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n", + phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, + phy_cfg.loopback_mode, phy_cfg.fec_mode, + phy_cfg.extended_speed); } else { - DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, - "Resetting link\n"); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); } memset(&mb_params, 0, sizeof(mb_params)); @@ -1528,6 +1657,60 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) return 0; } +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr); + path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn)); + + proc_kill_cnt = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, process_kill)) & + PROCESS_KILL_COUNTER_MASK; + + return proc_kill_cnt; +} + +static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 proc_kill_cnt; + + /* Prevent possible attentions/interrupts during the recovery handling + * and till its load phase, during which they will be re-enabled. + */ + qed_int_igu_disable_int(p_hwfn, p_ptt); + + DP_NOTICE(p_hwfn, "Received a process kill indication\n"); + + /* The following operations should be done once, and thus in CMT mode + * are carried out by only the first HW function. + */ + if (p_hwfn != QED_LEADING_HWFN(cdev)) + return; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Ignoring the indication since a recovery process is already in progress\n"); + return; + } + + cdev->recov_in_prog = true; + + proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt); + DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt); + + qed_schedule_recovery_handler(p_hwfn); +} + static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum MFW_DRV_MSG_TYPE type) @@ -1585,8 +1768,8 @@ static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); /* Acknowledge the MFW */ - qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, - ¶m); + qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, + ¶m); } static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -1623,8 +1806,129 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); /* Acknowledge the MFW */ - qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, - &resp, ¶m); + qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, + &resp, ¶m); +} + +static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + /* A single notification should be sent to upper driver in CMT mode */ + if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) + return; + + qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL, + "Fan failure was detected on the network interface card and it's going to be shut down.\n"); +} + +struct qed_mdump_cmd_params { + u32 cmd; + void *p_data_src; + u8 data_src_size; + void *p_data_dst; + u8 data_dst_size; + u32 mcp_resp; +}; + +static int +qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mdump_cmd_params *p_mdump_cmd_params) +{ + struct qed_mcp_mb_params mb_params; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; + mb_params.param = p_mdump_cmd_params->cmd; + mb_params.p_data_src = p_mdump_cmd_params->p_data_src; + mb_params.data_src_size = p_mdump_cmd_params->data_src_size; + mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; + mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; + + if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { + DP_INFO(p_hwfn, + "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", + p_mdump_cmd_params->cmd); + rc = -EOPNOTSUPP; + } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The mdump command is not supported by the MFW\n"); + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mdump_cmd_params mdump_cmd_params; + + memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; + + return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); +} + +int +qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct mdump_retain_data_stc *p_mdump_retain) +{ + struct qed_mdump_cmd_params mdump_cmd_params; + int rc; + + memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; + mdump_cmd_params.p_data_dst = p_mdump_retain; + mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain); + + rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); + if (rc) + return rc; + + if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { + DP_INFO(p_hwfn, + "Failed to get the mdump retained data [mcp_resp 0x%x]\n", + mdump_cmd_params.mcp_resp); + return -EINVAL; + } + + return 0; +} + +static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct mdump_retain_data_stc mdump_retain; + int rc; + + /* In CMT mode - no need for more than a single acknowledgment to the + * MFW, and no more than a single notification to the upper driver. + */ + if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) + return; + + rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); + if (rc == 0 && mdump_retain.valid) + DP_NOTICE(p_hwfn, + "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", + mdump_retain.epoch, + mdump_retain.pf, mdump_retain.status); + else + DP_NOTICE(p_hwfn, + "The MFW notified that a critical error occurred in the device\n"); + + DP_NOTICE(p_hwfn, + "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); + qed_mcp_mdump_ack(p_hwfn, p_ptt); + + qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL); } void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) @@ -1733,6 +2037,19 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); + spin_lock_bh(&p_hwfn->mcp_info->unload_lock); + if (test_bit(QED_MCP_BYPASS_PROC_BIT, + &p_hwfn->mcp_info->mcp_handling_status)) { + spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); + DP_INFO(p_hwfn, + "Msg [%d] is bypassed on unload flow\n", i); + continue; + } + + set_bit(QED_MCP_IN_PROCESSING_BIT, + &p_hwfn->mcp_info->mcp_handling_status); + spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); + switch (i) { case MFW_DRV_MSG_LINK_CHANGE: qed_mcp_handle_link_change(p_hwfn, p_ptt, false); @@ -1758,6 +2075,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_ERROR_RECOVERY: + qed_mcp_handle_process_kill(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_GET_LAN_STATS: case MFW_DRV_MSG_GET_FCOE_STATS: case MFW_DRV_MSG_GET_ISCSI_STATS: @@ -1770,6 +2090,12 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_S_TAG_UPDATE: qed_mcp_update_stag(p_hwfn, p_ptt); break; + case MFW_DRV_MSG_FAILURE_DETECTED: + qed_mcp_handle_fan_failure(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: + qed_mcp_handle_critical_error(p_hwfn, p_ptt); + break; case MFW_DRV_MSG_GET_TLV_REQ: qed_mfw_tlv_req(p_hwfn); break; @@ -1777,6 +2103,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; } + + clear_bit(QED_MCP_IN_PROCESSING_BIT, + &p_hwfn->mcp_info->mcp_handling_status); } /* ACK everything */ @@ -1807,7 +2136,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id) { - u32 global_offsize; + u32 global_offsize, public_base; if (IS_VF(p_hwfn->cdev)) { if (p_hwfn->vf_iov_info) { @@ -1824,16 +2153,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, } } + public_base = p_hwfn->mcp_info->public_base; global_offsize = qed_rd(p_hwfn, p_ptt, - SECTION_OFFSIZE_ADDR(p_hwfn-> - mcp_info->public_base, + SECTION_OFFSIZE_ADDR(public_base, PUBLIC_GLOBAL)); *p_mfw_ver = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, mfw_ver)); - if (p_running_bundle_id != NULL) { + if (p_running_bundle_id) { *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, SECTION_ADDR(global_offsize, 0) + offsetof(struct public_global, @@ -1935,6 +2264,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, return 0; } + static bool qed_is_transceiver_ready(u32 transceiver_state, u32 transceiver_type) { @@ -2010,6 +2340,11 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; case ETH_TRANSCEIVER_TYPE_40G_CR4: case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | @@ -2040,8 +2375,10 @@ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; break; case ETH_TRANSCEIVER_TYPE_10G_BASET: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | - NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; break; default: DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n", @@ -2097,7 +2434,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to Legacy capabilities, L2 personality is %08x\n", - (u32) *p_proto); + (u32)*p_proto); } static int @@ -2142,7 +2479,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", - (u32) *p_proto, resp, param); + (u32)*p_proto, resp, param); return 0; } @@ -2169,7 +2506,7 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, break; case FUNC_MF_CFG_PROTOCOL_ROCE: DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); - /* Fallthrough */ + fallthrough; default: rc = -EINVAL; } @@ -2236,11 +2573,10 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, } DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), - "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n", + "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n", info->pause_on_host, info->protocol, info->bandwidth_min, info->bandwidth_max, - info->mac[0], info->mac[1], info->mac[2], - info->mac[3], info->mac[4], info->mac[5], + info->mac, info->wwn_port, info->wwn_node, info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); @@ -2303,6 +2639,43 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, return 0; } +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Avoid triggering a recovery since such a process is already in progress\n"); + return -EAGAIN; + } + + DP_NOTICE(p_hwfn, "Triggering a recovery process\n"); + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); + + return 0; +} + +#define QED_RECOVERY_PROLOG_SLEEP_MS 100 + +int qed_recovery_prolog(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + int rc; + + /* Allow ongoing PCIe transactions to complete */ + msleep(QED_RECOVERY_PROLOG_SLEEP_MS); + + /* Clear the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); + if (rc) + DP_NOTICE(p_hwfn, + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + + return rc; +} + static int qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num) @@ -2534,7 +2907,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, } int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, u8 *mac) + struct qed_ptt *p_ptt, const u8 *mac) { struct qed_mcp_mb_params mb_params; u32 mfw_mac[2]; @@ -2706,20 +3079,13 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) DRV_MB_PARAM_NVM_LEN_OFFSET), &resp, &resp_param, &read_len, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), true); if (rc || (resp != FW_MSG_CODE_NVM_OK)) { DP_NOTICE(cdev, "MCP command rc = %d\n", rc); break; } - /* This can be a lengthy process, and it's possible scheduler - * isn't preemptable. Sleep a bit to prevent CPU hogging. - */ - if (bytes_left % 0x1000 < - (bytes_left - read_len) % 0x1000) - usleep_range(1000, 2000); - offset += read_len; bytes_left -= read_len; } @@ -2809,10 +3175,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, * to be delivered to MFW. */ if (param && cmd == QED_PUT_FILE_DATA) { - buf_idx = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); - buf_size = QED_MFW_GET_FIELD(param, - FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + buf_idx = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); } else { buf_idx += buf_size; buf_size = min_t(u32, (len - buf_idx), @@ -2856,7 +3224,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_TRANSCEIVER_READ, nvm_offset, &resp, ¶m, &buf_size, - (u32 *)(p_buf + offset)); + (u32 *)(p_buf + offset), true); if (rc) { DP_NOTICE(p_hwfn, "Failed to send a transceiver read command to the MFW. rc = %d.\n", @@ -2933,7 +3301,9 @@ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, if (rc) return rc; - if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) + if (((rsp & FW_MSG_CODE_MASK) == FW_MSG_CODE_UNSUPPORTED)) + rc = -EOPNOTSUPP; + else if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) rc = -EINVAL; return rc; @@ -2955,7 +3325,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, DRV_MSG_CODE_BIST_TEST, param, &resp, &resp_param, &buf_size, - (u32 *)p_image_att); + (u32 *)p_image_att, false); if (rc) return rc; @@ -2988,6 +3358,7 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) p_ptt, &nvm_info.num_images); if (rc == -EOPNOTSUPP) { DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); + nvm_info.num_images = 0; goto out; } else if (rc || !nvm_info.num_images) { DP_ERR(p_hwfn, "Failed getting number of images\n"); @@ -3034,12 +3405,20 @@ err0: return rc; } +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; + p_hwfn->nvm_info.valid = false; +} + int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, struct qed_nvm_image_att *p_image_att) { enum nvm_image_type type; + int rc; u32 i; /* Translate image_id into MFW definitions */ @@ -3050,6 +3429,9 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, case QED_NVM_IMAGE_FCOE_CFG: type = NVM_TYPE_FCOE_CFG; break; + case QED_NVM_IMAGE_MDUMP: + type = NVM_TYPE_MDUMP; + break; case QED_NVM_IMAGE_NVM_CFG1: type = NVM_TYPE_NVM_CFG1; break; @@ -3057,7 +3439,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, type = NVM_TYPE_DEFAULT_CFG; break; case QED_NVM_IMAGE_NVM_META: - type = NVM_TYPE_META; + type = NVM_TYPE_NVM_META; break; default: DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", @@ -3065,7 +3447,10 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, return -EINVAL; } - qed_mcp_nvm_info_populate(p_hwfn); + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) + return rc; + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) if (type == p_hwfn->nvm_info.image_att[i].image_type) break; @@ -3146,9 +3531,12 @@ static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) case QED_ILT: mfw_res_id = RESOURCE_ILT_E; break; - case QED_LL2_QUEUE: + case QED_LL2_RAM_QUEUE: mfw_res_id = RESOURCE_LL2_QUEUE_E; break; + case QED_LL2_CTX_QUEUE: + mfw_res_id = RESOURCE_LL2_CQS_E; + break; case QED_RDMA_CNQ_RAM: case QED_CMDQS_CQS: /* CNQ/CMDQS are the same resource */ @@ -3215,7 +3603,7 @@ qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn, switch (p_in_params->cmd) { case DRV_MSG_SET_RESOURCE_VALUE_MSG: mfw_resc_info.size = p_in_params->resc_max_val; - /* Fallthrough */ + fallthrough; case DRV_MSG_GET_RESOURCE_ALLOC_MSG: break; default: @@ -3339,8 +3727,8 @@ static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn, { int rc; - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, param, - p_mcp_resp, p_mcp_param); + rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, + param, p_mcp_resp, p_mcp_param); if (rc) return rc; @@ -3492,7 +3880,7 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Resource unlock request for an already released resource [%d]\n", p_params->resource); - /* Fallthrough */ + fallthrough; case RESOURCE_OPCODE_RELEASED: p_params->b_released = true; break; @@ -3539,6 +3927,12 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, } } +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); +} + int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 mcp_resp; @@ -3559,8 +3953,282 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) u32 mcp_resp, mcp_param, features; features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | - DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK; + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, features, &mcp_resp, &mcp_param); } + +int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params mb_params = {0}; + struct qed_dev *cdev = p_hwfn->cdev; + u8 fir_valid, l2_valid; + int rc; + + mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_engine_config command is unsupported by the MFW\n"); + return -EOPNOTSUPP; + } + + fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); + if (fir_valid) + cdev->fir_affin = + QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); + + l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); + if (l2_valid) + cdev->l2_affin_hint = + QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); + + DP_INFO(p_hwfn, + "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", + fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint); + + return 0; +} + +int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params mb_params = {0}; + struct qed_dev *cdev = p_hwfn->cdev; + int rc; + + mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_ppfid_bitmap command is unsupported by the MFW\n"); + return -EOPNOTSUPP; + } + + cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_PPFID_BITMAP); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n", + cdev->ppfid_bitmap); + + return 0; +} + +int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 *p_len) +{ + u32 mb_param = 0, resp, param; + int rc; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_GET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, p_len, + (u32 *)p_buf, false); + + return rc; +} + +int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 len) +{ + u32 mb_param = 0, resp, param; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_ALL) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_COMMIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_SET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, len, (u32 *)p_buf); +} + +#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN +#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32) +#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \ + (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE) + +static int +__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_buf, u8 size) +{ + struct qed_mcp_mb_params mb_params; + int rc; + + if (size > QED_MCP_DBG_DATA_MAX_SIZE) { + DP_ERR(p_hwfn, + "Debug data size is %d while it should not exceed %d\n", + size, QED_MCP_DBG_DATA_MAX_SIZE); + return -EINVAL; + } + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND; + SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size); + mb_params.p_data_src = p_buf; + mb_params.data_src_size = size; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The DEBUG_DATA_SEND command is unsupported by the MFW\n"); + return -EOPNOTSUPP; + } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) { + DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n"); + return -EBUSY; + } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) { + DP_NOTICE(p_hwfn, + "Failed to send debug data to the MFW [resp 0x%08x]\n", + mb_params.mcp_resp); + return -EINVAL; + } + + return 0; +} + +enum qed_mcp_dbg_data_type { + QED_MCP_DBG_DATA_TYPE_RAW, +}; + +/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */ +#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0 +#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff +#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12 +#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000 +#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20 +#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000 +#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28 +#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000 + +#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1 +#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2 + +static int +qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size) +{ + u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf; + u32 tmp_size = size, *p_header, *p_payload; + u8 flags = 0; + u16 seq; + int rc; + + p_header = (u32 *)raw_data; + p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE); + + seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq); + + /* First chunk is marked as 'first' */ + flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; + + *p_header = 0; + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq); + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type); + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id); + + while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) { + memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE); + rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, + QED_MCP_DBG_DATA_MAX_SIZE); + if (rc) + return rc; + + /* Clear the 'first' marking after sending the first chunk */ + if (p_tmp_buf == p_buf) { + flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, + flags); + } + + p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; + tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; + } + + /* Last chunk is marked as 'last' */ + flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST; + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); + memcpy(p_payload, p_tmp_buf, tmp_size); + + /* Casting the left size to u8 is ok since at this point it is <= 32 */ + return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, + (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE + + tmp_size)); +} + +int +qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_buf, u32 size) +{ + return qed_mcp_send_debug_data(p_hwfn, p_ptt, + QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size); +} + +bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK); +} + +int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, ¶m); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc); + return rc; + } + + *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED); + + return 0; +} |
