diff options
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c')
| -rw-r--r-- | drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 913 |
1 files changed, 577 insertions, 336 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d80f5c981d90..be7deb9cc410 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -8,14 +8,17 @@ * the Free Software Foundation. */ +#include <linux/ethtool.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> #include <linux/interrupt.h> #include <linux/etherdevice.h> -#include "bnxt_hsi.h" +#include <net/dcbnl.h> +#include <linux/bnxt/hsi.h> #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_vfr.h" @@ -25,47 +28,34 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, struct bnxt_vf_info *vf, u16 event_id) { - struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_fwd_async_event_cmpl_input *req; struct hwrm_async_event_cmpl *async_cmpl; int rc = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL); + if (rc) + goto exit; + if (vf) - req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid); else /* broadcast this async event to all VFs */ - req.encap_async_event_target_id = cpu_to_le16(0xffff); - async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + req->encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = + (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl; async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); async_cmpl->event_id = cpu_to_le16(event_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { + rc = hwrm_req_send(bp, req); +exit: + if (rc) netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", rc); - goto fwd_async_event_cmpl_exit; - } - - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", - resp->error_code); - rc = -1; - } - -fwd_async_event_cmpl_exit: - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) { - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - netdev_err(bp->dev, "vf ndo called though PF is down\n"); - return -EINVAL; - } if (!bp->pf.active_vfs) { netdev_err(bp->dev, "vf ndo called though sriov is disabled\n"); return -EINVAL; @@ -79,10 +69,10 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); - struct bnxt_vf_info *vf; + struct hwrm_func_cfg_input *req; bool old_setting = false; + struct bnxt_vf_info *vf; u32 func_flags; int rc; @@ -99,28 +89,76 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) if (old_setting == setting) return 0; - func_flags = vf->func_flags; if (setting) - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_ENABLE; else - func_flags |= FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; + func_flags = FUNC_CFG_REQ_FLAGS_SRC_MAC_ADDR_CHECK_DISABLE; /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(func_flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { - vf->func_flags = func_flags; - if (setting) - vf->flags |= BNXT_VF_SPOOFCHK; - else - vf->flags &= ~BNXT_VF_SPOOFCHK; + req->fid = cpu_to_le16(vf->fw_fid); + req->flags = cpu_to_le32(func_flags); + rc = hwrm_req_send(bp, req); + if (!rc) { + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } } return rc; } +static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->func_qcfg_flags = le16_to_cpu(resp->flags); + hwrm_req_drop(bp, req); + return rc; +} + +bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + if (BNXT_PF(bp) && !(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return !!(vf->flags & BNXT_VF_TRUST); + + bnxt_hwrm_func_qcfg_flags(bp, vf); + return !!(vf->func_qcfg_flags & FUNC_QCFG_RESP_FLAGS_TRUSTED_VF); +} + +static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) +{ + struct hwrm_func_cfg_input *req; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) + return 0; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); + if (vf->flags & BNXT_VF_TRUST) + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + else + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + return hwrm_req_send(bp, req); +} + int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) { struct bnxt *bp = netdev_priv(dev); @@ -135,6 +173,7 @@ int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) else vf->flags &= ~BNXT_VF_TRUST; + bnxt_hwrm_set_trusted_vf(bp, vf); return 0; } @@ -158,13 +197,10 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN); ivi->max_tx_rate = vf->max_tx_rate; ivi->min_tx_rate = vf->min_tx_rate; - ivi->vlan = vf->vlan; - if (vf->flags & BNXT_VF_QOS) - ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; - else - ivi->qos = 0; + ivi->vlan = vf->vlan & VLAN_VID_MASK; + ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT; ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK); - ivi->trusted = !!(vf->flags & BNXT_VF_TRUST); + ivi->trusted = bnxt_is_trusted_vf(bp, vf); if (!(vf->flags & BNXT_VF_LINK_FORCED)) ivi->linkstate = IFLA_VF_LINK_STATE_AUTO; else if (vf->flags & BNXT_VF_LINK_UP) @@ -177,8 +213,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; int rc; @@ -194,20 +230,23 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) } vf = &bp->pf.vf[vf_id]; + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + memcpy(vf->mac_addr, mac, ETH_ALEN); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + return hwrm_req_send(bp, req); } int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u16 vlan_tag; int rc; @@ -215,40 +254,45 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (bp->hwrm_spec_code < 0x10201) return -ENOTSUPP; - if (vlan_proto != htons(ETH_P_8021Q)) + if (vlan_proto != htons(ETH_P_8021Q) && + (vlan_proto != htons(ETH_P_8021AD) || + !(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP))) return -EPROTONOSUPPORT; rc = bnxt_vf_ndo_prep(bp, vf_id); if (rc) return rc; - /* TODO: needed to implement proper handling of user priority, - * currently fail the command if there is valid priority - */ - if (vlan_id > 4095 || qos) + if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES || + (!vlan_id && qos)) return -EINVAL; vf = &bp->pf.vf[vf_id]; - vlan_tag = vlan_id; + vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT; if (vlan_tag == vf->vlan) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); - req.dflt_vlan = cpu_to_le16(vlan_tag); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) - vf->vlan = vlan_tag; + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->dflt_vlan = cpu_to_le16(vlan_tag); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID); + req->tpid = vlan_proto; + } + rc = hwrm_req_send(bp, req); + if (!rc) + vf->vlan = vlan_tag; + } return rc; } int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, int max_tx_rate) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u32 pf_link_speed; int rc; @@ -265,28 +309,61 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, return -EINVAL; } - if (min_tx_rate > pf_link_speed || min_tx_rate > max_tx_rate) { + if (min_tx_rate > pf_link_speed) { netdev_info(bp->dev, "min tx rate %d is invalid for VF %d\n", min_tx_rate, vf_id); return -EINVAL; } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(vf->func_flags); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(max_tx_rate); - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(min_tx_rate); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); if (!rc) { - vf->min_tx_rate = min_tx_rate; - vf->max_tx_rate = max_tx_rate; + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(max_tx_rate); + req->min_bw = cpu_to_le32(min_tx_rate); + rc = hwrm_req_send(bp, req); + if (!rc) { + vf->min_tx_rate = min_tx_rate; + vf->max_tx_rate = max_tx_rate; + } } return rc; } +static int bnxt_set_vf_link_admin_state(struct bnxt *bp, int vf_id) +{ + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN)) + return 0; + + vf = &bp->pf.vf[vf_id]; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); + switch (vf->flags & (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP)) { + case BNXT_VF_LINK_FORCED: + req->options = + FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN; + break; + case (BNXT_VF_LINK_FORCED | BNXT_VF_LINK_UP): + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_FORCED_UP; + break; + default: + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + break; + } + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + return hwrm_req_send(bp, req); +} + int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) { struct bnxt *bp = netdev_priv(dev); @@ -312,10 +389,11 @@ int bnxt_set_vf_link_state(struct net_device *dev, int vf_id, int link) break; default: netdev_err(bp->dev, "Invalid link option\n"); - rc = -EINVAL; - break; + return -EINVAL; } - if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) + if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) + rc = bnxt_set_vf_link_admin_state(bp, vf_id); + else if (vf->flags & (BNXT_VF_LINK_UP | BNXT_VF_LINK_FORCED)) rc = bnxt_hwrm_fwd_async_event_cmpl(bp, vf, ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE); return rc; @@ -335,21 +413,22 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) { - int i, rc = 0; + struct hwrm_func_vf_resc_free_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_func_vf_resc_free_input req = {0}; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { - req.vf_id = cpu_to_le16(i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(i); + rc = hwrm_req_send(bp, req); if (rc) break; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -370,6 +449,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp) } } + bp->pf.active_vfs = 0; kfree(bp->pf.vf); bp->pf.vf = NULL; } @@ -422,37 +502,133 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) { - struct hwrm_func_buf_rgtr_input req = {0}; + struct hwrm_func_buf_rgtr_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR); + if (rc) + return rc; + + req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); + req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + + return hwrm_req_send(bp, req); +} + +static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) +{ + struct hwrm_func_cfg_input *req; + struct bnxt_vf_info *vf; + int rc; + + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; + + vf = &bp->pf.vf[vf_id]; + req->fid = cpu_to_le16(vf->fw_fid); + + if (is_valid_ether_addr(vf->mac_addr)) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN); + } + if (vf->vlan) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + req->dflt_vlan = cpu_to_le16(vf->vlan); + } + if (vf->max_tx_rate) { + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(vf->max_tx_rate); + req->min_bw = cpu_to_le32(vf->min_tx_rate); + } + if (vf->flags & BNXT_VF_TRUST) + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + + return hwrm_req_send(bp, req); +} + +static void bnxt_hwrm_roce_sriov_cfg(struct bnxt *bp, int num_vfs) +{ + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_cfg_input *cfg_req; + struct hwrm_func_qcaps_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto err; - req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); - req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); - req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); - req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); - req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); - req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); - req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + rc = hwrm_req_init(bp, cfg_req, HWRM_FUNC_CFG); + if (rc) + goto err; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + /* In case of VF Dynamic resource allocation, driver will provision + * maximum resources to all the VFs. FW will dynamically allocate + * resources to VFs on the fly, so always divide the resources by 1. + */ + if (BNXT_ROCE_VF_DYN_ALLOC_CAP(bp)) + num_vfs = 1; + + cfg_req->fid = cpu_to_le16(0xffff); + cfg_req->enables2 = + cpu_to_le32(FUNC_CFG_REQ_ENABLES2_ROCE_MAX_AV_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_CQ_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_MRW_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_QP_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_SRQ_PER_VF | + FUNC_CFG_REQ_ENABLES2_ROCE_MAX_GID_PER_VF); + cfg_req->roce_max_av_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_av) / num_vfs); + cfg_req->roce_max_cq_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_cq) / num_vfs); + cfg_req->roce_max_mrw_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_mrw) / num_vfs); + cfg_req->roce_max_qp_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_qp) / num_vfs); + cfg_req->roce_max_srq_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_srq) / num_vfs); + cfg_req->roce_max_gid_per_vf = + cpu_to_le32(le32_to_cpu(resp->roce_vf_max_gid) / num_vfs); + + rc = hwrm_req_send(bp, cfg_req); + +err: + hwrm_req_drop(bp, req); + if (rc) + netdev_err(bp->dev, "RoCE sriov configuration failed\n"); } /* Only called by PF to reserve resources for VFs, returns actual number of * VFs configured, or < 0 on error. */ -static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) +static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) { - struct hwrm_func_vf_resource_cfg_input req = {0}; + struct hwrm_func_vf_resource_cfg_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; u16 vf_stat_ctx, vf_vnics, vf_ring_grps; struct bnxt_pf_info *pf = &bp->pf; int i, rc = 0, min = 1; u16 vf_msix = 0; + u16 vf_rss; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) + return rc; - if (bp->flags & BNXT_FLAG_CHIP_P5) { + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); vf_ring_grps = 0; } else { @@ -466,79 +642,98 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; vf_vnics = hw_resc->max_vnics - bp->nr_vnics; - vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); + vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; - req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); - req.max_rsscos_ctx = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); + req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { min = 0; - req.min_rsscos_ctx = cpu_to_le16(min); + req->min_rsscos_ctx = cpu_to_le16(min); } if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { - req.min_cmpl_rings = cpu_to_le16(min); - req.min_tx_rings = cpu_to_le16(min); - req.min_rx_rings = cpu_to_le16(min); - req.min_l2_ctxs = cpu_to_le16(min); - req.min_vnics = cpu_to_le16(min); - req.min_stat_ctx = cpu_to_le16(min); - if (!(bp->flags & BNXT_FLAG_CHIP_P5)) - req.min_hw_ring_grps = cpu_to_le16(min); + req->min_cmpl_rings = cpu_to_le16(min); + req->min_tx_rings = cpu_to_le16(min); + req->min_rx_rings = cpu_to_le16(min); + req->min_l2_ctxs = cpu_to_le16(min); + req->min_vnics = cpu_to_le16(min); + req->min_stat_ctx = cpu_to_le16(min); + if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS)) + req->min_hw_ring_grps = cpu_to_le16(min); } else { vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; vf_rx_rings /= num_vfs; - vf_vnics /= num_vfs; + if ((bp->fw_cap & BNXT_FW_CAP_PRE_RESV_VNICS) && + vf_vnics >= pf->max_vfs) { + /* Take into account that FW has pre-reserved 1 VNIC for + * each pf->max_vfs. + */ + vf_vnics = (vf_vnics - pf->max_vfs + num_vfs) / num_vfs; + } else { + vf_vnics /= num_vfs; + } vf_stat_ctx /= num_vfs; vf_ring_grps /= num_vfs; - - req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.min_tx_rings = cpu_to_le16(vf_tx_rings); - req.min_rx_rings = cpu_to_le16(vf_rx_rings); - req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.min_vnics = cpu_to_le16(vf_vnics); - req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + vf_rss /= num_vfs; + + vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); + req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->min_tx_rings = cpu_to_le16(vf_tx_rings); + req->min_rx_rings = cpu_to_le16(vf_rx_rings); + req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->min_vnics = cpu_to_le16(vf_vnics); + req->min_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->min_rsscos_ctx = cpu_to_le16(vf_rss); } - req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.max_tx_rings = cpu_to_le16(vf_tx_rings); - req.max_rx_rings = cpu_to_le16(vf_rx_rings); - req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.max_vnics = cpu_to_le16(vf_vnics); - req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); - if (bp->flags & BNXT_FLAG_CHIP_P5) - req.max_msix = cpu_to_le16(vf_msix / num_vfs); - - mutex_lock(&bp->hwrm_cmd_lock); + req->max_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->max_tx_rings = cpu_to_le16(vf_tx_rings); + req->max_rx_rings = cpu_to_le16(vf_rx_rings); + req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->max_vnics = cpu_to_le16(vf_vnics); + req->max_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->max_rsscos_ctx = cpu_to_le16(vf_rss); + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + req->max_msix = cpu_to_le16(vf_msix / num_vfs); + + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { - req.vf_id = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); - if (rc) { - rc = -ENOMEM; + struct bnxt_vf_info *vf = &pf->vf[i]; + + vf->fw_fid = pf->first_vf_id + i; + rc = bnxt_set_vf_link_admin_state(bp, i); + if (rc) + break; + + if (reset) + __bnxt_set_vf_params(bp, i); + + req->vf_id = cpu_to_le16(vf->fw_fid); + rc = hwrm_req_send(bp, req); + if (rc) break; - } pf->active_vfs = i + 1; - pf->vf[i].fw_fid = pf->first_vf_id + i; } - mutex_unlock(&bp->hwrm_cmd_lock); + if (pf->active_vfs) { u16 n = pf->active_vfs; - hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; - hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; - hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * - n; - hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; - hw_resc->max_rsscos_ctxs -= pf->active_vfs; - hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; - hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; - if (bp->flags & BNXT_FLAG_CHIP_P5) - hw_resc->max_irqs -= vf_msix * n; + hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= + le16_to_cpu(req->min_hw_ring_grps) * n; + hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n; + hw_resc->max_rsscos_ctxs -= + le16_to_cpu(req->min_rsscos_ctx) * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n; + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + hw_resc->max_nqs -= vf_msix; rc = pf->active_vfs; } + hwrm_req_drop(bp, req); return rc; } @@ -547,15 +742,18 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) */ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { - u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_cfg_input *req; int total_vf_tx_rings = 0; u16 vf_ring_grps; + u32 mtu, i; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); + if (rc) + return rc; /* Remaining rings are distributed equally amongs VF's for now */ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; @@ -571,52 +769,55 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | - FUNC_CFG_REQ_ENABLES_MRU | - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); - - mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - req.mru = cpu_to_le16(mtu); - req.mtu = cpu_to_le16(mtu); - - req.num_rsscos_ctxs = cpu_to_le16(1); - req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.num_tx_rings = cpu_to_le16(vf_tx_rings); - req.num_rx_rings = cpu_to_le16(vf_rx_rings); - req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.num_l2_ctxs = cpu_to_le16(4); - - req.num_vnics = cpu_to_le16(vf_vnics); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); + + if (bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN) { + req->options = FUNC_CFG_REQ_OPTIONS_LINK_ADMIN_STATE_AUTO; + req->enables |= + cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE); + } + + mtu = bp->dev->mtu + VLAN_ETH_HLEN; + req->mru = cpu_to_le16(mtu); + req->admin_mtu = cpu_to_le16(mtu); + + req->num_rsscos_ctxs = cpu_to_le16(1); + req->num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->num_tx_rings = cpu_to_le16(vf_tx_rings); + req->num_rx_rings = cpu_to_le16(vf_rx_rings); + req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->num_l2_ctxs = cpu_to_le16(4); + + req->num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ - req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { int vf_tx_rsvd = vf_tx_rings; - req.fid = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; - pf->vf[i].fw_fid = le16_to_cpu(req.fid); + pf->vf[i].fw_fid = le16_to_cpu(req->fid); rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, &vf_tx_rsvd); if (rc) break; total_vf_tx_rings += vf_tx_rsvd; } - mutex_unlock(&bp->hwrm_cmd_lock); - if (rc) - rc = -ENOMEM; + hwrm_req_drop(bp, req); if (pf->active_vfs) { hw_resc->max_tx_rings -= total_vf_tx_rings; hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; @@ -630,14 +831,42 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) return rc; } -static int bnxt_func_cfg(struct bnxt *bp, int num_vfs) +static int bnxt_func_cfg(struct bnxt *bp, int num_vfs, bool reset) { if (BNXT_NEW_RM(bp)) - return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs); + return bnxt_hwrm_func_vf_resc_cfg(bp, num_vfs, reset); else return bnxt_hwrm_func_cfg(bp, num_vfs); } +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) +{ + int rc; + + /* Register buffers for VFs */ + rc = bnxt_hwrm_func_buf_rgtr(bp); + if (rc) + return rc; + + /* Reserve resources for VFs */ + rc = bnxt_func_cfg(bp, *num_vfs, reset); + if (rc != *num_vfs) { + if (rc <= 0) { + netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); + *num_vfs = 0; + return rc; + } + netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", + rc); + *num_vfs = rc; + } + + if (BNXT_RDMA_SRIOV_EN(bp) && BNXT_ROCE_VF_RESC_CAP(bp)) + bnxt_hwrm_roce_sriov_cfg(bp, *num_vfs); + + return 0; +} + static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) { int rc = 0, vfs_supported; @@ -646,7 +875,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) int tx_ok = 0, rx_ok = 0, rss_ok = 0; int avail_cp, avail_stat; - /* Check if we can enable requested num of vf's. At a mininum + /* Check if we can enable requested num of vf's. At a minimum * we require 1 RX 1 TX rings for each VF. In this minimum conf * features like TPA will not be available. */ @@ -703,42 +932,46 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) if (rc) goto err_out1; - /* Reserve resources for VFs */ - rc = bnxt_func_cfg(bp, *num_vfs); - if (rc != *num_vfs) { - if (rc <= 0) { - netdev_warn(bp->dev, "Unable to reserve resources for SRIOV.\n"); - *num_vfs = 0; - goto err_out2; - } - netdev_warn(bp->dev, "Only able to reserve resources for %d VFs.\n", rc); - *num_vfs = rc; - } - - /* Register buffers for VFs */ - rc = bnxt_hwrm_func_buf_rgtr(bp); + rc = bnxt_cfg_hw_sriov(bp, num_vfs, false); if (rc) goto err_out2; - bnxt_ulp_sriov_cfg(bp, *num_vfs); - rc = pci_enable_sriov(bp->pdev, *num_vfs); if (rc) goto err_out2; + if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return 0; + + /* Create representors for VFs in switchdev mode */ + devl_lock(bp->dl); + rc = bnxt_vf_reps_create(bp); + devl_unlock(bp->dl); + if (rc) { + netdev_info(bp->dev, "Cannot enable VFS as representors cannot be created\n"); + goto err_out3; + } + return 0; +err_out3: + /* Disable SR-IOV */ + pci_disable_sriov(bp->pdev); + err_out2: /* Free the resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); + /* Restore the max resources */ + bnxt_hwrm_func_qcaps(bp); + err_out1: bnxt_free_vf_resources(bp); return rc; } -void bnxt_sriov_disable(struct bnxt *bp) +void __bnxt_sriov_disable(struct bnxt *bp) { u16 num_vfs = pci_num_vf(bp->pdev); @@ -746,7 +979,7 @@ void bnxt_sriov_disable(struct bnxt *bp) return; /* synchronize VF and VF-rep create and destroy */ - mutex_lock(&bp->sriov_lock); + devl_lock(bp->dl); bnxt_vf_reps_destroy(bp); if (pci_vfs_assigned(bp->pdev)) { @@ -759,17 +992,24 @@ void bnxt_sriov_disable(struct bnxt *bp) /* Free the HW resources reserved for various VF's */ bnxt_hwrm_func_vf_resource_free(bp, num_vfs); } - mutex_unlock(&bp->sriov_lock); + devl_unlock(bp->dl); bnxt_free_vf_resources(bp); +} + +static void bnxt_sriov_disable(struct bnxt *bp) +{ + if (!pci_num_vf(bp->pdev)) + return; + + __bnxt_sriov_disable(bp); - bp->pf.active_vfs = 0; /* Reclaim all resources for the PF. */ rtnl_lock(); + netdev_lock(bp->dev); bnxt_restore_pf_fw_resources(bp); + netdev_unlock(bp->dev); rtnl_unlock(); - - bnxt_ulp_sriov_cfg(bp, 0); } int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) @@ -777,18 +1017,22 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) struct net_device *dev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(dev); - if (!(bp->flags & BNXT_FLAG_USING_MSIX)) { - netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n"); - return 0; - } - rtnl_lock(); + netdev_lock(dev); if (!netif_running(dev)) { netdev_warn(dev, "Reject SRIOV config request since if is down!\n"); + netdev_unlock(dev); + rtnl_unlock(); + return 0; + } + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + netdev_warn(dev, "Reject SRIOV config request when FW reset is in progress\n"); + netdev_unlock(dev); rtnl_unlock(); return 0; } bp->sriov_cfg = true; + netdev_unlock(dev); rtnl_unlock(); if (pci_vfs_assigned(bp->pdev)) { @@ -819,109 +1063,75 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, void *encap_resp, __le64 encap_resp_addr, __le16 encap_resp_cpr, u32 msg_size) { - int rc = 0; - struct hwrm_fwd_resp_input req = {0}; - struct hwrm_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_fwd_resp_input *req; + int rc; - if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) + if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) { + netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n", + msg_size); return -EINVAL; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); - - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_len = cpu_to_le16(msg_size); - req.encap_resp_addr = encap_resp_addr; - req.encap_resp_cmpl_ring = encap_resp_cpr; - memcpy(req.encap_resp, encap_resp, msg_size); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { - netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); - goto fwd_resp_exit; } - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_fwd_resp error %d\n", - resp->error_code); - rc = -1; + rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_len = cpu_to_le16(msg_size); + req->encap_resp_addr = encap_resp_addr; + req->encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req->encap_resp, encap_resp, msg_size); + + rc = hwrm_req_send(bp, req); } - -fwd_resp_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); return rc; } static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_reject_fwd_resp_input req = {0}; - struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_reject_fwd_resp_input *req; + int rc; if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { - netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); - goto fwd_err_resp_exit; - } + rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_fwd_err_resp error %d\n", - resp->error_code); - rc = -1; + rc = hwrm_req_send(bp, req); } - -fwd_err_resp_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); return rc; } static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_exec_fwd_resp_input req = {0}; - struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_exec_fwd_resp_input *req; + int rc; if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - - if (rc) { - netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); - goto exec_fwd_resp_exit; - } + rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - if (resp->error_code) { - netdev_err(bp->dev, "hwrm_exec_fw_resp error %d\n", - resp->error_code); - rc = -1; + rc = hwrm_req_send(bp, req); } - -exec_fwd_resp_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); return rc; } @@ -935,9 +1145,10 @@ static int bnxt_vf_configure_mac(struct bnxt *bp, struct bnxt_vf_info *vf) * if the PF assigned MAC address is zero */ if (req->enables & cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)) { + bool trust = bnxt_is_trusted_vf(bp, vf); + if (is_valid_ether_addr(req->dflt_mac_addr) && - ((vf->flags & BNXT_VF_TRUST) || - !is_valid_ether_addr(vf->mac_addr) || + (trust || !is_valid_ether_addr(vf->mac_addr) || ether_addr_equal(req->dflt_mac_addr, vf->mac_addr))) { ether_addr_copy(vf->vf_mac_addr, req->dflt_mac_addr); return bnxt_hwrm_exec_fwd_resp(bp, vf, msg_size); @@ -962,7 +1173,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) * Otherwise, it must match the VF MAC address if firmware spec >= * 1.2.2 */ - if (vf->flags & BNXT_VF_TRUST) { + if (bnxt_is_trusted_vf(bp, vf)) { mac_ok = true; } else if (is_valid_ether_addr(vf->mac_addr)) { if (ether_addr_equal((const u8 *)req->l2_addr, vf->mac_addr)) @@ -974,7 +1185,7 @@ static int bnxt_vf_validate_set_mac(struct bnxt *bp, struct bnxt_vf_info *vf) /* There are two cases: * 1.If firmware spec < 0x10202,VF MAC address is not forwarded * to the PF and so it doesn't have to match - * 2.Allow VF to modify it's own MAC when PF has not assigned a + * 2.Allow VF to modify its own MAC when PF has not assigned a * valid MAC address and firmware spec >= 0x10202 */ mac_ok = true; @@ -993,17 +1204,22 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) rc = bnxt_hwrm_exec_fwd_resp( bp, vf, sizeof(struct hwrm_port_phy_qcfg_input)); } else { - struct hwrm_port_phy_qcfg_output phy_qcfg_resp; + struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {}; struct hwrm_port_phy_qcfg_input *phy_qcfg_req; phy_qcfg_req = (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; - mutex_lock(&bp->hwrm_cmd_lock); + mutex_lock(&bp->link_lock); memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; + /* New SPEEDS2 fields are beyond the legacy structure, so + * clear the SPEEDS2_SUPPORTED flag. + */ + phy_qcfg_resp.option_flags &= + ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED; phy_qcfg_resp.valid = 1; if (vf->flags & BNXT_VF_LINK_UP) { @@ -1083,16 +1299,52 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) } } +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) +{ + struct hwrm_func_vf_cfg_input *req; + int rc = 0; + + if (!BNXT_VF(bp)) + return 0; + + if (bp->hwrm_spec_code < 0x10202) { + if (is_valid_ether_addr(bp->vf.mac_addr)) + rc = -EADDRNOTAVAIL; + goto mac_done; + } + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + goto mac_done; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + if (!strict) + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + rc = hwrm_req_send(bp, req); +mac_done: + if (rc && strict) { + rc = -EADDRNOTAVAIL; + netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", + mac); + return rc; + } + return 0; +} + void bnxt_update_vf_mac(struct bnxt *bp) { - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; + bool inform_pf = false; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS)) + return; + + req->fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + resp = hwrm_req_hold(bp, req); + if (hwrm_req_send(bp, req)) goto update_vf_mac_exit; /* Store MAC address from the firmware. There are 2 cases: @@ -1102,45 +1354,34 @@ void bnxt_update_vf_mac(struct bnxt *bp) * default but the stored zero MAC will allow the VF user to change * the random MAC address using ndo_set_mac_address() if he wants. */ - if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) + if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr)) { memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN); + /* This means we are now using our own MAC address, let + * the PF know about this MAC address. + */ + if (!is_valid_ether_addr(bp->vf.mac_addr)) + inform_pf = true; + } /* overwrite netdev dev_addr with admin VF MAC */ if (is_valid_ether_addr(bp->vf.mac_addr)) - memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + eth_hw_addr_set(bp->dev, bp->vf.mac_addr); update_vf_mac_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); + if (inform_pf) + bnxt_approve_mac(bp, bp->dev->dev_addr, false); } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) -{ - struct hwrm_func_vf_cfg_input req = {0}; - int rc = 0; - - if (!BNXT_VF(bp)) - return 0; +#else - if (bp->hwrm_spec_code < 0x10202) { - if (is_valid_ether_addr(bp->vf.mac_addr)) - rc = -EADDRNOTAVAIL; - goto mac_done; - } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); -mac_done: - if (rc && strict) { - rc = -EADDRNOTAVAIL; - netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", - mac); - return rc; - } +int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset) +{ + if (*num_vfs) + return -EOPNOTSUPP; return 0; } -#else -void bnxt_sriov_disable(struct bnxt *bp) +void __bnxt_sriov_disable(struct bnxt *bp) { } @@ -1153,7 +1394,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) { } -int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) +int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict) { return 0; } |
