diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
106 files changed, 19262 insertions, 6469 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile index cddd82d4ca0f..9e0d9f710441 100644 --- a/drivers/net/ethernet/intel/ice/Makefile +++ b/drivers/net/ethernet/intel/ice/Makefile @@ -5,6 +5,7 @@ # Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver # +subdir-ccflags-y += -I$(src) obj-$(CONFIG_ICE) += ice.o ice-y := ice_main.o \ @@ -27,8 +28,14 @@ ice-y := ice_main.o \ ice_vlan_mode.o \ ice_flex_pipe.o \ ice_flow.o \ + ice_parser.o \ + ice_parser_rt.o \ ice_idc.o \ - ice_devlink.o \ + devlink/devlink.o \ + devlink/health.o \ + devlink/port.o \ + ice_sf_eth.o \ + ice_sf_vsi_vlan_ops.o \ ice_ddp.o \ ice_fw_update.o \ ice_lag.o \ @@ -36,7 +43,8 @@ ice-y := ice_main.o \ ice_repr.o \ ice_tc_lib.o \ ice_fwlog.o \ - ice_debugfs.o + ice_debugfs.o \ + ice_adapter.o ice-$(CONFIG_PCI_IOV) += \ ice_sriov.o \ ice_virtchnl.o \ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index b516e42b41f0..4af60e2f37df 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -5,12 +5,12 @@ #include "ice.h" #include "ice_lib.h" -#include "ice_devlink.h" +#include "devlink.h" +#include "port.h" #include "ice_eswitch.h" #include "ice_fw_update.h" #include "ice_dcb_lib.h" - -static int ice_active_port_option = -1; +#include "ice_sf_eth.h" /* context for devlink info version reporting */ struct ice_info_ctx { @@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink, } break; case ICE_VERSION_RUNNING: - err = devlink_info_version_running_put(req, key, ctx->buf); + err = devlink_info_version_running_put_ext(req, key, + ctx->buf, + DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to set running version"); goto out_free_ctx; } break; case ICE_VERSION_STORED: - err = devlink_info_version_stored_put(req, key, ctx->buf); + err = devlink_info_version_stored_put_ext(req, key, + ctx->buf, + DEVLINK_INFO_VERSION_TYPE_COMPONENT); if (err) { NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version"); goto out_free_ctx; @@ -478,17 +482,17 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change, case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: if (ice_is_eswitch_mode_switchdev(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Go to legacy mode before doing reinit\n"); + "Go to legacy mode before doing reinit"); return -EOPNOTSUPP; } if (ice_is_adq_active(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Turn off ADQ before doing reinit\n"); + "Turn off ADQ before doing reinit"); return -EOPNOTSUPP; } if (ice_has_vfs(pf)) { NL_SET_ERR_MSG_MOD(extack, - "Remove all VFs before doing reinit\n"); + "Remove all VFs before doing reinit"); return -EOPNOTSUPP; } ice_devlink_reinit_down(pf); @@ -526,248 +530,153 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf, } /** - * ice_devlink_port_opt_speed_str - convert speed to a string - * @speed: speed value - */ -static const char *ice_devlink_port_opt_speed_str(u8 speed) -{ - switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { - case ICE_AQC_PORT_OPT_MAX_LANE_100M: - return "0.1"; - case ICE_AQC_PORT_OPT_MAX_LANE_1G: - return "1"; - case ICE_AQC_PORT_OPT_MAX_LANE_2500M: - return "2.5"; - case ICE_AQC_PORT_OPT_MAX_LANE_5G: - return "5"; - case ICE_AQC_PORT_OPT_MAX_LANE_10G: - return "10"; - case ICE_AQC_PORT_OPT_MAX_LANE_25G: - return "25"; - case ICE_AQC_PORT_OPT_MAX_LANE_50G: - return "50"; - case ICE_AQC_PORT_OPT_MAX_LANE_100G: - return "100"; - } - - return "-"; -} - -#define ICE_PORT_OPT_DESC_LEN 50 -/** - * ice_devlink_port_options_print - Print available port split options - * @pf: the PF to print split port options + * ice_get_tx_topo_user_sel - Read user's choice from flash + * @pf: pointer to pf structure + * @layers: value read from flash will be saved here + * + * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV. * - * Prints a table with available port split options and max port speeds + * Return: zero when read was successful, negative values otherwise. */ -static void ice_devlink_port_options_print(struct ice_pf *pf) +static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers) { - u8 i, j, options_count, cnt, speed, pending_idx, active_idx; - struct ice_aqc_get_port_options_elem *options, *opt; - struct device *dev = ice_pf_to_dev(pf); - bool active_valid, pending_valid; - char desc[ICE_PORT_OPT_DESC_LEN]; - const char *str; - int status; + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int err; - options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, - sizeof(*options), GFP_KERNEL); - if (!options) - return; + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) + return err; - for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { - opt = options + i * ICE_AQC_PORT_OPT_MAX; - options_count = ICE_AQC_PORT_OPT_MAX; - active_valid = 0; + err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + if (err) + goto exit_release_res; - status = ice_aq_get_port_options(&pf->hw, opt, &options_count, - i, true, &active_idx, - &active_valid, &pending_idx, - &pending_valid); - if (status) { - dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", - i, status); - goto err; - } - } + if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL) + *layers = ICE_SCHED_5_LAYERS; + else + *layers = ICE_SCHED_9_LAYERS; - dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); - dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); - dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); +exit_release_res: + ice_release_nvm(hw); - for (i = 0; i < options_count; i++) { - cnt = 0; + return err; +} - if (i == ice_active_port_option) - str = "Active"; - else if ((i == pending_idx) && pending_valid) - str = "Pending"; - else - str = ""; +/** + * ice_update_tx_topo_user_sel - Save user's preference in flash + * @pf: pointer to pf structure + * @layers: value to be saved in flash + * + * Variable "layers" defines user's preference about number of layers in Tx + * Scheduler Topology Tree. This choice should be stored in PFA TLV field + * and be picked up by driver, next time during init. + * + * Return: zero when save was successful, negative values otherwise. + */ +static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers) +{ + struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {}; + struct ice_hw *hw = &pf->hw; + int err; - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%-8s", str); + err = ice_acquire_nvm(hw, ICE_RES_WRITE); + if (err) + return err; - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%-6u", options[i].pmd); + err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0, + sizeof(usr_sel), &usr_sel, true, true, NULL); + if (err) + goto exit_release_res; - for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { - speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; - str = ice_devlink_port_opt_speed_str(speed); - cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, - "%3s ", str); - } + if (layers == ICE_SCHED_5_LAYERS) + usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL; + else + usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL; - dev_dbg(dev, "%s\n", desc); - } + err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2, + sizeof(usr_sel.data), &usr_sel.data, + true, NULL, NULL); +exit_release_res: + ice_release_nvm(hw); -err: - kfree(options); + return err; } /** - * ice_devlink_aq_set_port_option - Send set port option admin queue command - * @pf: the PF to print split port options - * @option_idx: selected port option - * @extack: extended netdev ack structure + * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to store the parameter value * - * Sends set port option admin queue command with selected port option and - * calls NVM write activate. + * Return: zero on success and negative value on failure. */ -static int -ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, - struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) { - struct device *dev = ice_pf_to_dev(pf); - int status; - - status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); - if (status) { - dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); - return -EIO; - } - - status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); - if (status) { - dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); - return -EIO; - } - - status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); - if (status) { - dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", - status, pf->hw.adminq.sq_last_status); - NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); - ice_release_nvm(&pf->hw); - return -EIO; - } + struct ice_pf *pf = devlink_priv(devlink); + int err; - ice_release_nvm(&pf->hw); + err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8); + if (err) + return err; - NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); return 0; } /** - * ice_devlink_port_split - .port_split devlink handler - * @devlink: devlink instance structure - * @port: devlink port structure - * @count: number of ports to split to - * @extack: extended netdev ack structure - * - * Callback for the devlink .port_split operation. - * - * Unfortunately, the devlink expression of available options is limited - * to just a number, so search for an FW port option which supports - * the specified number. As there could be multiple FW port options with - * the same port split count, allow switching between them. When the same - * port split count request is issued again, switch to the next FW port - * option with the same port split count. + * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter + * @devlink: pointer to the devlink instance + * @id: the parameter ID to set + * @ctx: context to get the parameter value + * @extack: netlink extended ACK structure * - * Return: zero on success or an error code on failure. + * Return: zero on success and negative value on failure. */ -static int -ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, - unsigned int count, struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { - struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; - u8 i, j, active_idx, pending_idx, new_option; struct ice_pf *pf = devlink_priv(devlink); - u8 option_count = ICE_AQC_PORT_OPT_MAX; - struct device *dev = ice_pf_to_dev(pf); - bool active_valid, pending_valid; - int status; - - status = ice_aq_get_port_options(&pf->hw, options, &option_count, - 0, true, &active_idx, &active_valid, - &pending_idx, &pending_valid); - if (status) { - dev_dbg(dev, "Couldn't read port split options, err = %d\n", - status); - NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); - return -EIO; - } - - new_option = ICE_AQC_PORT_OPT_MAX; - active_idx = pending_valid ? pending_idx : active_idx; - for (i = 1; i <= option_count; i++) { - /* In order to allow switching between FW port options with - * the same port split count, search for a new option starting - * from the active/pending option (with array wrap around). - */ - j = (active_idx + i) % option_count; - - if (count == options[j].pmd) { - new_option = j; - break; - } - } - - if (new_option == active_idx) { - dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", - count); - NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); - ice_devlink_port_options_print(pf); - return -EINVAL; - } - - if (new_option == ICE_AQC_PORT_OPT_MAX) { - dev_dbg(dev, "request to split: count: %u not found\n", count); - NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); - ice_devlink_port_options_print(pf); - return -EINVAL; - } + int err; - status = ice_devlink_aq_set_port_option(pf, new_option, extack); - if (status) - return status; + err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8); + if (err) + return err; - ice_devlink_port_options_print(pf); + NL_SET_ERR_MSG_MOD(extack, + "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect."); return 0; } /** - * ice_devlink_port_unsplit - .port_unsplit devlink handler - * @devlink: devlink instance structure - * @port: devlink port structure - * @extack: extended netdev ack structure + * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers + * parameter value + * @devlink: unused pointer to devlink instance + * @id: the parameter ID to validate + * @val: value to validate + * @extack: netlink extended ACK structure * - * Callback for the devlink .port_unsplit operation. - * Calls ice_devlink_port_split with split count set to 1. - * There could be no FW option available with split count 1. + * Supported values are: + * - 5 - five layers Tx Scheduler Topology Tree + * - 9 - nine layers Tx Scheduler Topology Tree * - * Return: zero on success or an error code on failure. + * Return: zero when passed parameter value is supported. Negative value on + * error. */ -static int -ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, - struct netlink_ext_ack *extack) +static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) { - return ice_devlink_port_split(devlink, port, 1, extack); + if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) { + NL_SET_ERR_MSG_MOD(extack, + "Wrong number of tx scheduler layers provided."); + return -EINVAL; + } + + return 0; } /** @@ -841,6 +750,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node struct ice_sched_node *tc_node, struct ice_pf *pf) { struct devlink_rate *rate_node = NULL; + struct ice_dynamic_port *sf; struct ice_vf *vf; int i; @@ -852,6 +762,7 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node /* create root node */ rate_node = devl_rate_node_create(devlink, node, node->name, NULL); } else if (node->vsi_handle && + pf->vsi[node->vsi_handle]->type == ICE_VSI_VF && pf->vsi[node->vsi_handle]->vf) { vf = pf->vsi[node->vsi_handle]->vf; if (!vf->devlink_port.devlink_rate) @@ -860,6 +771,16 @@ static void ice_traverse_tx_tree(struct devlink *devlink, struct ice_sched_node */ devl_rate_leaf_create(&vf->devlink_port, node, node->parent->rate_node); + } else if (node->vsi_handle && + pf->vsi[node->vsi_handle]->type == ICE_VSI_SF && + pf->vsi[node->vsi_handle]->sf) { + sf = pf->vsi[node->vsi_handle]->sf; + if (!sf->devlink_port.devlink_rate) + /* leaf nodes doesn't have children + * so we don't set rate_node + */ + devl_rate_leaf_create(&sf->devlink_port, node, + node->parent->rate_node); } else if (node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF && node->parent->rate_node) { rate_node = devl_rate_node_create(devlink, node, node->name, @@ -891,10 +812,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v tc_node = pi->root->children[0]; mutex_lock(&pi->sched_lock); - devl_lock(devlink); for (i = 0; i < tc_node->num_children; i++) ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); - devl_unlock(devlink); mutex_unlock(&pi->sched_lock); return 0; @@ -1062,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv /* preallocate memory for ice_sched_node */ node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + *priv = node; return 0; @@ -1283,6 +1205,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, return status; } +static void ice_set_min_max_msix(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + union devlink_param_value val; + int err; + + err = devl_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + &val); + if (!err) + pf->msix.min = val.vu32; + + err = devl_param_driverinit_value_get(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + &val); + if (!err) + pf->msix.max = val.vu32; +} + /** * ice_devlink_reinit_up - do reinit of the given PF * @pf: pointer to the PF struct @@ -1290,18 +1231,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate, static int ice_devlink_reinit_up(struct ice_pf *pf) { struct ice_vsi *vsi = ice_get_main_vsi(pf); - struct ice_vsi_cfg_params params; int err; + err = ice_init_hw(&pf->hw); + if (err) { + dev_err(ice_pf_to_dev(pf), "ice_init_hw failed: %d\n", err); + return err; + } + + /* load MSI-X values */ + ice_set_min_max_msix(pf); + err = ice_init_dev(pf); if (err) - return err; + goto unroll_hw_init; - params = ice_vsi_to_params(vsi); - params.flags = ICE_VSI_FLAG_INIT; + vsi->flags = ICE_VSI_FLAG_INIT; rtnl_lock(); - err = ice_vsi_cfg(vsi, ¶ms); + err = ice_vsi_cfg(vsi); rtnl_unlock(); if (err) goto err_vsi_cfg; @@ -1319,6 +1267,8 @@ err_load: rtnl_unlock(); err_vsi_cfg: ice_deinit_dev(pf); +unroll_hw_init: + ice_deinit_hw(&pf->hw); return err; } @@ -1378,37 +1328,51 @@ static const struct devlink_ops ice_devlink_ops = { .rate_leaf_parent_set = ice_devlink_set_parent, .rate_node_parent_set = ice_devlink_set_parent, + + .port_new = ice_devlink_port_new, }; +static const struct devlink_ops ice_sf_devlink_ops; + static int ice_devlink_enable_roce_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; + + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; - ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false; + ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2); return 0; } -static int -ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) +static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; bool roce_ena = ctx->val.vbool; int ret; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + if (!roce_ena) { ice_unplug_aux_dev(pf); - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; return 0; } - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; ret = ice_plug_aux_dev(pf); if (ret) - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2; return ret; } @@ -1419,11 +1383,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id, struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; + + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) return -EOPNOTSUPP; - if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) { + if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) { NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); return -EOPNOTSUPP; } @@ -1436,30 +1405,40 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id, struct devlink_param_gset_ctx *ctx) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; + + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; - ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP; + ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP); return 0; } -static int -ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) +static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct ice_pf *pf = devlink_priv(devlink); + struct iidc_rdma_core_dev_info *cdev; bool iw_ena = ctx->val.vbool; int ret; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + if (!iw_ena) { ice_unplug_aux_dev(pf); - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; return 0; } - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP; + cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP; ret = ice_plug_aux_dev(pf); if (ret) - pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP; + cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP; return ret; } @@ -1474,7 +1453,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) return -EOPNOTSUPP; - if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) { + if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) { NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously"); return -EOPNOTSUPP; } @@ -1482,260 +1461,349 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, return 0; } -static const struct devlink_param ice_devlink_params[] = { - DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), - ice_devlink_enable_roce_get, - ice_devlink_enable_roce_set, - ice_devlink_enable_roce_validate), - DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), - ice_devlink_enable_iw_get, - ice_devlink_enable_iw_set, - ice_devlink_enable_iw_validate), +#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled" +#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled" +#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized" -}; +/** + * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode. + * @mode: local forwarding for mode used in port_info struct. + * + * Return: Mode respective string or "Invalid". + */ +static const char * +ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode) +{ + switch (mode) { + case ICE_LOCAL_FWD_MODE_ENABLED: + return DEVLINK_LOCAL_FWD_ENABLED_STR; + case ICE_LOCAL_FWD_MODE_PRIORITIZED: + return DEVLINK_LOCAL_FWD_PRIORITIZED_STR; + case ICE_LOCAL_FWD_MODE_DISABLED: + return DEVLINK_LOCAL_FWD_DISABLED_STR; + } -static void ice_devlink_free(void *devlink_ptr) -{ - devlink_free((struct devlink *)devlink_ptr); + return "Invalid"; } /** - * ice_allocate_pf - Allocate devlink and return PF structure pointer - * @dev: the device to allocate for + * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name. + * @mode_str: local forwarding mode string. * - * Allocate a devlink instance for this device and return the private area as - * the PF structure. The devlink memory is kept track of through devres by - * adding an action to remove it when unwinding. + * Return: Mode value or negative number if invalid. */ -struct ice_pf *ice_allocate_pf(struct device *dev) +static int ice_devlink_local_fwd_str_to_mode(const char *mode_str) { - struct devlink *devlink; - - devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); - if (!devlink) - return NULL; - - /* Add an action to teardown the devlink when unwinding the driver */ - if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) - return NULL; + if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR)) + return ICE_LOCAL_FWD_MODE_ENABLED; + else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR)) + return ICE_LOCAL_FWD_MODE_PRIORITIZED; + else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR)) + return ICE_LOCAL_FWD_MODE_DISABLED; - return devlink_priv(devlink); + return -EINVAL; } /** - * ice_devlink_register - Register devlink interface for this PF - * @pf: the PF to register the devlink for. - * - * Register the devlink instance associated with this physical function. + * ice_devlink_local_fwd_get - Get local_fwd parameter. + * @devlink: Pointer to the devlink instance. + * @id: The parameter ID to set. + * @ctx: Context to store the parameter value. * - * Return: zero on success or an error code on failure. + * Return: Zero. */ -void ice_devlink_register(struct ice_pf *pf) +static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) { - struct devlink *devlink = priv_to_devlink(pf); + struct ice_pf *pf = devlink_priv(devlink); + struct ice_port_info *pi; + const char *mode_str; - devlink_register(devlink); + pi = pf->hw.port_info; + mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode); + snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str); + + return 0; } /** - * ice_devlink_unregister - Unregister devlink resources for this PF. - * @pf: the PF structure to cleanup + * ice_devlink_local_fwd_set - Set local_fwd parameter. + * @devlink: Pointer to the devlink instance. + * @id: The parameter ID to set. + * @ctx: Context to get the parameter value. + * @extack: Netlink extended ACK structure. * - * Releases resources used by devlink and cleans up associated memory. + * Return: Zero. */ -void ice_devlink_unregister(struct ice_pf *pf) +static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { - devlink_unregister(priv_to_devlink(pf)); + int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr); + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_port_info *pi; + + pi = pf->hw.port_info; + if (pi->local_fwd_mode != new_local_fwd_mode) { + pi->local_fwd_mode = new_local_fwd_mode; + dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr); + ice_schedule_reset(pf, ICE_RESET_CORER); + } + + return 0; } /** - * ice_devlink_set_switch_id - Set unique switch id based on pci dsn - * @pf: the PF to create a devlink port for - * @ppid: struct with switch id information + * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value. + * @devlink: Unused pointer to devlink instance. + * @id: The parameter ID to validate. + * @val: Value to validate. + * @extack: Netlink extended ACK structure. + * + * Supported values are: + * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled + * "prioritized" - local_fwd traffic is prioritized in scheduling. + * + * Return: Zero when passed parameter value is supported. Negative value on + * error. */ -static void -ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) +static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) { + NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported."); + return -EINVAL; + } + + return 0; +} + +static int +ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) { - struct pci_dev *pdev = pf->pdev; - u64 id; + struct ice_pf *pf = devlink_priv(devlink); - id = pci_get_dsn(pdev); + if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors) + return -EINVAL; - ppid->id_len = sizeof(id); - put_unaligned_be64(id, &ppid->id); + return 0; } -int ice_devlink_register_params(struct ice_pf *pf) +static int +ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) { - struct devlink *devlink = priv_to_devlink(pf); + if (val.vu32 < ICE_MIN_MSIX) + return -EINVAL; - return devlink_params_register(devlink, ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); + return 0; } -void ice_devlink_unregister_params(struct ice_pf *pf) +static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_priv(devlink); + bool new_state = val.vbool; + + if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) + return -EOPNOTSUPP; + + return 0; +} + +enum ice_param_id { + ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, + ICE_DEVLINK_PARAM_ID_LOCAL_FWD, +}; + +static const struct devlink_param ice_dvl_rdma_params[] = { + DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_enable_roce_get, + ice_devlink_enable_roce_set, + ice_devlink_enable_roce_validate), + DEVLINK_PARAM_GENERIC(ENABLE_IWARP, BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_enable_iw_get, + ice_devlink_enable_iw_set, + ice_devlink_enable_iw_validate), + DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, ice_devlink_enable_rdma_validate), +}; + +static const struct devlink_param ice_dvl_msix_params[] = { + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, ice_devlink_msix_max_pf_validate), + DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, ice_devlink_msix_min_pf_validate), +}; + +static const struct devlink_param ice_dvl_sched_params[] = { + DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, + "tx_scheduling_layers", + DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_PERMANENT), + ice_devlink_tx_sched_layers_get, + ice_devlink_tx_sched_layers_set, + ice_devlink_tx_sched_layers_validate), + DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD, + "local_forwarding", DEVLINK_PARAM_TYPE_STRING, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_local_fwd_get, + ice_devlink_local_fwd_set, + ice_devlink_local_fwd_validate), +}; + +static void ice_devlink_free(void *devlink_ptr) { - devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params, - ARRAY_SIZE(ice_devlink_params)); + devlink_free((struct devlink *)devlink_ptr); } /** - * ice_devlink_set_port_split_options - Set port split options - * @pf: the PF to set port split options - * @attrs: devlink attributes + * ice_allocate_pf - Allocate devlink and return PF structure pointer + * @dev: the device to allocate for * - * Sets devlink port split options based on available FW port options + * Allocate a devlink instance for this device and return the private area as + * the PF structure. The devlink memory is kept track of through devres by + * adding an action to remove it when unwinding. */ -static void -ice_devlink_set_port_split_options(struct ice_pf *pf, - struct devlink_port_attrs *attrs) +struct ice_pf *ice_allocate_pf(struct device *dev) { - struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; - u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; - bool active_valid, pending_valid; - int status; + struct devlink *devlink; - status = ice_aq_get_port_options(&pf->hw, options, &option_count, - 0, true, &active_idx, &active_valid, - &pending_idx, &pending_valid); - if (status) { - dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", - status); - return; - } + devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); + if (!devlink) + return NULL; - /* find the biggest available port split count */ - for (i = 0; i < option_count; i++) - attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); + /* Add an action to teardown the devlink when unwinding the driver */ + if (devm_add_action_or_reset(dev, ice_devlink_free, devlink)) + return NULL; - attrs->splittable = attrs->lanes ? 1 : 0; - ice_active_port_option = active_idx; + return devlink_priv(devlink); } -static const struct devlink_port_ops ice_devlink_port_ops = { - .port_split = ice_devlink_port_split, - .port_unsplit = ice_devlink_port_unsplit, -}; - /** - * ice_devlink_create_pf_port - Create a devlink port for this PF - * @pf: the PF to create a devlink port for + * ice_allocate_sf - Allocate devlink and return SF structure pointer + * @dev: the device to allocate for + * @pf: pointer to the PF structure * - * Create and register a devlink_port for this PF. - * This function has to be called under devl_lock. + * Allocate a devlink instance for SF. * - * Return: zero on success or an error code on failure. + * Return: ice_sf_priv pointer to allocated memory or ERR_PTR in case of error */ -int ice_devlink_create_pf_port(struct ice_pf *pf) +struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf) { - struct devlink_port_attrs attrs = {}; - struct devlink_port *devlink_port; struct devlink *devlink; - struct ice_vsi *vsi; - struct device *dev; int err; - devlink = priv_to_devlink(pf); - - dev = ice_pf_to_dev(pf); - - devlink_port = &pf->devlink_port; - - vsi = ice_get_main_vsi(pf); - if (!vsi) - return -EIO; - - attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; - attrs.phys.port_number = pf->hw.bus.func; - - /* As FW supports only port split options for whole device, - * set port split options only for first PF. - */ - if (pf->hw.pf_id == 0) - ice_devlink_set_port_split_options(pf, &attrs); - - ice_devlink_set_switch_id(pf, &attrs.switch_id); - - devlink_port_attrs_set(devlink_port, &attrs); + devlink = devlink_alloc(&ice_sf_devlink_ops, sizeof(struct ice_sf_priv), + dev); + if (!devlink) + return ERR_PTR(-ENOMEM); - err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, - &ice_devlink_port_ops); + err = devl_nested_devlink_set(priv_to_devlink(pf), devlink); if (err) { - dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", - pf->hw.pf_id, err); - return err; + devlink_free(devlink); + return ERR_PTR(err); } - return 0; + return devlink_priv(devlink); } /** - * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF - * @pf: the PF to cleanup + * ice_devlink_register - Register devlink interface for this PF + * @pf: the PF to register the devlink for. + * + * Register the devlink instance associated with this physical function. * - * Unregisters the devlink_port structure associated with this PF. - * This function has to be called under devl_lock. + * Return: zero on success or an error code on failure. */ -void ice_devlink_destroy_pf_port(struct ice_pf *pf) +void ice_devlink_register(struct ice_pf *pf) { - devl_port_unregister(&pf->devlink_port); + struct devlink *devlink = priv_to_devlink(pf); + + devl_register(devlink); } /** - * ice_devlink_create_vf_port - Create a devlink port for this VF - * @vf: the VF to create a port for - * - * Create and register a devlink_port for this VF. + * ice_devlink_unregister - Unregister devlink resources for this PF. + * @pf: the PF structure to cleanup * - * Return: zero on success or an error code on failure. + * Releases resources used by devlink and cleans up associated memory. */ -int ice_devlink_create_vf_port(struct ice_vf *vf) +void ice_devlink_unregister(struct ice_pf *pf) { - struct devlink_port_attrs attrs = {}; - struct devlink_port *devlink_port; - struct devlink *devlink; - struct ice_vsi *vsi; - struct device *dev; - struct ice_pf *pf; - int err; - - pf = vf->pf; - dev = ice_pf_to_dev(pf); - devlink_port = &vf->devlink_port; - - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return -EINVAL; + devl_unregister(priv_to_devlink(pf)); +} - attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; - attrs.pci_vf.pf = pf->hw.bus.func; - attrs.pci_vf.vf = vf->vf_id; +int ice_devlink_register_params(struct ice_pf *pf) +{ + struct devlink *devlink = priv_to_devlink(pf); + union devlink_param_value value; + struct ice_hw *hw = &pf->hw; + int status; - ice_devlink_set_switch_id(pf, &attrs.switch_id); + status = devl_params_register(devlink, ice_dvl_rdma_params, + ARRAY_SIZE(ice_dvl_rdma_params)); + if (status) + return status; - devlink_port_attrs_set(devlink_port, &attrs); - devlink = priv_to_devlink(pf); + status = devl_params_register(devlink, ice_dvl_msix_params, + ARRAY_SIZE(ice_dvl_msix_params)); + if (status) + goto unregister_rdma_params; - err = devlink_port_register(devlink, devlink_port, vsi->idx); - if (err) { - dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", - vf->vf_id, err); - return err; - } + if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) + status = devl_params_register(devlink, ice_dvl_sched_params, + ARRAY_SIZE(ice_dvl_sched_params)); + if (status) + goto unregister_msix_params; + + value.vu32 = pf->msix.max; + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, + value); + value.vu32 = pf->msix.min; + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, + value); + + value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags); + devl_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + value); return 0; + +unregister_msix_params: + devl_params_unregister(devlink, ice_dvl_msix_params, + ARRAY_SIZE(ice_dvl_msix_params)); +unregister_rdma_params: + devl_params_unregister(devlink, ice_dvl_rdma_params, + ARRAY_SIZE(ice_dvl_rdma_params)); + return status; } -/** - * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF - * @vf: the VF to cleanup - * - * Unregisters the devlink_port structure associated with this VF. - */ -void ice_devlink_destroy_vf_port(struct ice_vf *vf) +void ice_devlink_unregister_params(struct ice_pf *pf) { - devl_rate_leaf_destroy(&vf->devlink_port); - devlink_port_unregister(&vf->devlink_port); + struct devlink *devlink = priv_to_devlink(pf); + struct ice_hw *hw = &pf->hw; + + devl_params_unregister(devlink, ice_dvl_rdma_params, + ARRAY_SIZE(ice_dvl_rdma_params)); + devl_params_unregister(devlink, ice_dvl_msix_params, + ARRAY_SIZE(ice_dvl_msix_params)); + + if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) + devl_params_unregister(devlink, ice_dvl_sched_params, + ARRAY_SIZE(ice_dvl_sched_params)); } #define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) @@ -1976,8 +2044,8 @@ void ice_devlink_init_regions(struct ice_pf *pf) u64 nvm_size, sram_size; nvm_size = pf->hw.flash.flash_size; - pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1, - nvm_size); + pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1, + nvm_size); if (IS_ERR(pf->nvm_region)) { dev_err(dev, "failed to create NVM devlink region, err %ld\n", PTR_ERR(pf->nvm_region)); @@ -1985,17 +2053,17 @@ void ice_devlink_init_regions(struct ice_pf *pf) } sram_size = pf->hw.flash.sr_words * 2u; - pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops, - 1, sram_size); + pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops, + 1, sram_size); if (IS_ERR(pf->sram_region)) { dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n", PTR_ERR(pf->sram_region)); pf->sram_region = NULL; } - pf->devcaps_region = devlink_region_create(devlink, - &ice_devcaps_region_ops, 10, - ICE_AQ_MAX_BUF_LEN); + pf->devcaps_region = devl_region_create(devlink, + &ice_devcaps_region_ops, 10, + ICE_AQ_MAX_BUF_LEN); if (IS_ERR(pf->devcaps_region)) { dev_err(dev, "failed to create device-caps devlink region, err %ld\n", PTR_ERR(pf->devcaps_region)); @@ -2012,11 +2080,11 @@ void ice_devlink_init_regions(struct ice_pf *pf) void ice_devlink_destroy_regions(struct ice_pf *pf) { if (pf->nvm_region) - devlink_region_destroy(pf->nvm_region); + devl_region_destroy(pf->nvm_region); if (pf->sram_region) - devlink_region_destroy(pf->sram_region); + devl_region_destroy(pf->sram_region); if (pf->devcaps_region) - devlink_region_destroy(pf->devcaps_region); + devl_region_destroy(pf->devcaps_region); } diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h index d291c0e2e17b..1af3b0763fbb 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.h +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h @@ -5,6 +5,7 @@ #define _ICE_DEVLINK_H_ struct ice_pf *ice_allocate_pf(struct device *dev); +struct ice_sf_priv *ice_allocate_sf(struct device *dev, struct ice_pf *pf); void ice_devlink_register(struct ice_pf *pf); void ice_devlink_unregister(struct ice_pf *pf); diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c new file mode 100644 index 000000000000..19c3d37aa768 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/health.c @@ -0,0 +1,550 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ + +#include "ice.h" +#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */ +#include "health.h" + +#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \ + devlink_fmsg_put(fmsg, #name, (obj)->name) + +#define ICE_HEALTH_STATUS_DATA_SIZE 2 + +struct ice_health_status { + enum ice_aqc_health_status code; + const char *description; + const char *solution; + const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE]; +}; + +/* + * In addition to the health status codes provided below, the firmware might + * generate Health Status Codes that are not pertinent to the end-user. + * For instance, Health Code 0x1002 is triggered when the command fails. + * Such codes should be disregarded by the end-user. + * The below lookup requires to be sorted by code. + */ + +static const char ice_common_port_solutions[] = + "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex."; +static const char ice_port_number_label[] = "Port Number"; +static const char ice_update_nvm_solution[] = "Update to the latest NVM image."; + +static const struct ice_health_status ice_health_status_lookup[] = { + {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.", + "Change or replace the module or cable.", {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM, + "Device cannot communicate with the module.", + "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.", + "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.", + "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.", + "Change or replace the module or cable. Change the port option.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.", + ice_common_port_solutions, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.", + NULL, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.", + "Change the port option. Update to the latest NVM image."}, + {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.", + "Change the module or configure the port option to match the current module speed. Change the port option.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT, + "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.", + "Check link partner connection and configuration.", + {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED, + "Port speed is limited by PHY capabilities.", + "Change the module to align to port option.", {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.", + "Change the port option. Update to the latest NVM image."}, + {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD, + "Failed to load the firmware image in the external PHY.", + ice_update_nvm_solution, {ice_port_number_label}}, + {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.", + ice_update_nvm_solution, {"Extended Error"}}, + {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.", + "If issue persists, call customer support.", {"Access Type"}}, + {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.", + "Update to latest base driver and DDP package."}, + {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.", + ice_update_nvm_solution}, + {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.", + ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}}, + {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB, + "Supplied MIB file is invalid. DCB reverted to default configuration.", + "Disable FW-LLDP and check DCBx system configuration.", + {ice_port_number_label, "MIB ID"}}, +}; + +static int ice_health_status_lookup_compare(const void *a, const void *b) +{ + return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code; +} + +static const struct ice_health_status *ice_get_health_status(u16 code) +{ + struct ice_health_status key = { .code = code }; + + return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup), + sizeof(struct ice_health_status), ice_health_status_lookup_compare); +} + +static void ice_describe_status_code(struct devlink_fmsg *fmsg, + struct ice_aqc_health_status_elem *hse) +{ + static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" }; + const struct ice_health_status *health_code; + u32 internal_data[2]; + u16 status_code; + + status_code = le16_to_cpu(hse->health_status_code); + + devlink_fmsg_put(fmsg, "Syndrome", status_code); + if (status_code) { + internal_data[0] = le32_to_cpu(hse->internal_data1); + internal_data[1] = le32_to_cpu(hse->internal_data2); + + health_code = ice_get_health_status(status_code); + if (!health_code) + return; + + devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description); + if (health_code->solution) + devlink_fmsg_string_pair_put(fmsg, "Possible Solution", + health_code->solution); + + for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) { + if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA) + devlink_fmsg_u32_pair_put(fmsg, + health_code->data_label[i] ? + health_code->data_label[i] : + aux_label[i], + internal_data[i]); + } + } +} + +static int +ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.port_status); + return 0; +} + +static int +ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + void *priv_ctx, struct netlink_ext_ack __always_unused *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.port_status); + return 0; +} + +static int +ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.fw_status); + return 0; +} + +static int +ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg, + void *priv_ctx, struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = devlink_health_reporter_priv(reporter); + + ice_describe_status_code(fmsg, &pf->health_reporters.fw_status); + return 0; +} + +static void ice_config_health_events(struct ice_pf *pf, bool enable) +{ + u8 enable_bits = 0; + int ret; + + if (enable) + enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK | + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK; + + ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits); + if (ret) + dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n", + str_enable_disable(enable), ret, + ice_aq_str(pf->hw.adminq.sq_last_status)); +} + +/** + * ice_process_health_status_event - Process the health status event from FW + * @pf: pointer to the PF structure + * @event: event structure containing the Health Status Event opcode + * + * Decode the Health Status Events and print the associated messages + */ +void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event) +{ + const struct ice_aqc_health_status_elem *health_info; + u16 count; + + health_info = (struct ice_aqc_health_status_elem *)event->msg_buf; + count = le16_to_cpu(event->desc.params.get_health_status.health_status_count); + + if (count > (event->buf_len / sizeof(*health_info))) { + dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n"); + return; + } + + for (size_t i = 0; i < count; i++) { + const struct ice_health_status *health_code; + u16 status_code; + + status_code = le16_to_cpu(health_info->health_status_code); + health_code = ice_get_health_status(status_code); + + if (health_code) { + switch (le16_to_cpu(health_info->event_source)) { + case ICE_AQC_HEALTH_STATUS_GLOBAL: + pf->health_reporters.fw_status = *health_info; + devlink_health_report(pf->health_reporters.fw, + "FW syndrome reported", NULL); + break; + case ICE_AQC_HEALTH_STATUS_PF: + case ICE_AQC_HEALTH_STATUS_PORT: + pf->health_reporters.port_status = *health_info; + devlink_health_report(pf->health_reporters.port, + "Port syndrome reported", NULL); + break; + default: + dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n"); + } + } else { + u32 data1, data2; + u16 source; + + source = le16_to_cpu(health_info->event_source); + data1 = le32_to_cpu(health_info->internal_data1); + data2 = le32_to_cpu(health_info->internal_data2); + dev_dbg(ice_pf_to_dev(pf), + "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x", + status_code, source, data1, data2); + } + health_info++; + } +} + +/** + * ice_devlink_health_report - boilerplate to call given @reporter + * + * @reporter: devlink health reporter to call, do nothing on NULL + * @msg: message to pass up, "event name" is fine + * @priv_ctx: typically some event struct + */ +static void ice_devlink_health_report(struct devlink_health_reporter *reporter, + const char *msg, void *priv_ctx) +{ + if (!reporter) + return; + + /* We do not do auto recovering, so return value of the below function + * will always be 0, thus we do ignore it. + */ + devlink_health_report(reporter, msg, priv_ctx); +} + +struct ice_mdd_event { + enum ice_mdd_src src; + u16 vf_num; + u16 queue; + u8 pf_num; + u8 event; +}; + +static const char *ice_mdd_src_to_str(enum ice_mdd_src src) +{ + switch (src) { + case ICE_MDD_SRC_TX_PQM: + return "tx_pqm"; + case ICE_MDD_SRC_TX_TCLAN: + return "tx_tclan"; + case ICE_MDD_SRC_TX_TDPU: + return "tx_tdpu"; + case ICE_MDD_SRC_RX: + return "rx"; + default: + return "invalid"; + } +} + +static int +ice_mdd_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct ice_mdd_event *mdd_event = priv_ctx; + const char *src; + + if (!mdd_event) + return 0; + + src = ice_mdd_src_to_str(mdd_event->src); + + devlink_fmsg_obj_nest_start(fmsg); + devlink_fmsg_put(fmsg, "src", src); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue); + devlink_fmsg_obj_nest_end(fmsg); + + return 0; +} + +/** + * ice_report_mdd_event - Report an MDD event through devlink health + * @pf: the PF device structure + * @src: the HW block that was the source of this MDD event + * @pf_num: the pf_num on which the MDD event occurred + * @vf_num: the vf_num on which the MDD event occurred + * @event: the event type of the MDD event + * @queue: the queue on which the MDD event occurred + * + * Report an MDD event that has occurred on this PF. + */ +void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num, + u16 vf_num, u8 event, u16 queue) +{ + struct ice_mdd_event ev = { + .src = src, + .pf_num = pf_num, + .vf_num = vf_num, + .event = event, + .queue = queue, + }; + + ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev); +} + +/** + * ice_fmsg_put_ptr - put hex value of pointer into fmsg + * + * @fmsg: devlink fmsg under construction + * @name: name to pass + * @ptr: 64 bit value to print as hex and put into fmsg + */ +static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name, + void *ptr) +{ + char buf[sizeof(ptr) * 3]; + + sprintf(buf, "%p", ptr); + devlink_fmsg_put(fmsg, name, buf); +} + +struct ice_tx_hang_event { + u32 head; + u32 intr; + u16 vsi_num; + u16 queue; + u16 next_to_clean; + u16 next_to_use; + struct ice_tx_ring *tx_ring; +}; + +static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct ice_tx_hang_event *event = priv_ctx; + struct sk_buff *skb; + + if (!event) + return 0; + + skb = event->tx_ring->tx_buf->skb; + devlink_fmsg_obj_nest_start(fmsg); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean); + ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use); + devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name); + ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc); + ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma); + ice_fmsg_put_ptr(fmsg, "skb-ptr", skb); + devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc, + event->tx_ring->count * sizeof(struct ice_tx_desc)); + devlink_fmsg_dump_skb(fmsg, skb); + devlink_fmsg_obj_nest_end(fmsg); + + return 0; +} + +void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring, + u16 vsi_num, u32 head, u32 intr) +{ + struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf; + + buf->tx_ring = tx_ring; + buf->vsi_num = vsi_num; + buf->head = head; + buf->intr = intr; +} + +void ice_report_tx_hang(struct ice_pf *pf) +{ + struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf; + struct ice_tx_ring *tx_ring = buf->tx_ring; + + struct ice_tx_hang_event ev = { + .head = buf->head, + .intr = buf->intr, + .vsi_num = buf->vsi_num, + .queue = tx_ring->q_index, + .next_to_clean = tx_ring->next_to_clean, + .next_to_use = tx_ring->next_to_use, + .tx_ring = tx_ring, + }; + + ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev); +} + +static struct devlink_health_reporter * +ice_init_devlink_rep(struct ice_pf *pf, + const struct devlink_health_reporter_ops *ops) +{ + struct devlink *devlink = priv_to_devlink(pf); + struct devlink_health_reporter *rep; + const u64 graceful_period = 0; + + rep = devl_health_reporter_create(devlink, ops, graceful_period, pf); + if (IS_ERR(rep)) { + struct device *dev = ice_pf_to_dev(pf); + + dev_err(dev, "failed to create devlink %s health report er", + ops->name); + return NULL; + } + return rep; +} + +#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \ + ._field = ice_##_name##_reporter_##_field, + +#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \ + static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \ + .name = #_name, \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \ + } + +#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \ + static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \ + .name = #_name, \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \ + ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \ + } + +ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump); +ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump); +ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose); +ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose); + +/** + * ice_health_init - allocate and init all ice devlink health reporters and + * accompanied data + * + * @pf: PF struct + */ +void ice_health_init(struct ice_pf *pf) +{ + struct ice_health *reps = &pf->health_reporters; + + reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops); + reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops); + + if (ice_is_fw_health_report_supported(&pf->hw)) { + reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops); + reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops); + ice_config_health_events(pf, true); + } +} + +/** + * ice_deinit_devl_reporter - destroy given devlink health reporter + * @reporter: reporter to destroy + */ +static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter) +{ + if (reporter) + devl_health_reporter_destroy(reporter); +} + +/** + * ice_health_deinit - deallocate all ice devlink health reporters and + * accompanied data + * + * @pf: PF struct + */ +void ice_health_deinit(struct ice_pf *pf) +{ + ice_deinit_devl_reporter(pf->health_reporters.mdd); + ice_deinit_devl_reporter(pf->health_reporters.tx_hang); + if (ice_is_fw_health_report_supported(&pf->hw)) { + ice_deinit_devl_reporter(pf->health_reporters.fw); + ice_deinit_devl_reporter(pf->health_reporters.port); + ice_config_health_events(pf, false); + } +} + +static +void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter) +{ + if (reporter) + devlink_health_reporter_state_update(reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); +} + +/** + * ice_health_clear - clear devlink health issues after a reset + * @pf: the PF device structure + * + * Mark the PF in healthy state again after a reset has completed. + */ +void ice_health_clear(struct ice_pf *pf) +{ + ice_health_assign_healthy_state(pf->health_reporters.mdd); + ice_health_assign_healthy_state(pf->health_reporters.tx_hang); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h new file mode 100644 index 000000000000..5edfc4d2adce --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/health.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _HEALTH_H_ +#define _HEALTH_H_ + +#include <linux/types.h> + +/** + * DOC: health.h + * + * This header file stores everything that is needed for broadly understood + * devlink health mechanism for ice driver. + */ + +struct ice_aqc_health_status_elem; +struct ice_pf; +struct ice_tx_ring; +struct ice_rq_event_info; + +enum ice_mdd_src { + ICE_MDD_SRC_TX_PQM, + ICE_MDD_SRC_TX_TCLAN, + ICE_MDD_SRC_TX_TDPU, + ICE_MDD_SRC_RX, +}; + +/** + * struct ice_health - stores ice devlink health reporters and accompanied data + * @fw: devlink health reporter for FW Health Status events + * @mdd: devlink health reporter for MDD detection event + * @port: devlink health reporter for Port Health Status events + * @tx_hang: devlink health reporter for tx_hang event + * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from + * non-sleeping context + * @tx_ring: ring that the hang occurred on + * @head: descriptor head + * @intr: interrupt register value + * @vsi_num: VSI owning the queue that the hang occurred on + * @fw_status: buffer for last received FW Status event + * @port_status: buffer for last received Port Status event + */ +struct ice_health { + struct devlink_health_reporter *fw; + struct devlink_health_reporter *mdd; + struct devlink_health_reporter *port; + struct devlink_health_reporter *tx_hang; + struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf, + struct ice_tx_ring *tx_ring; + u32 head; + u32 intr; + u16 vsi_num; + ); + struct ice_aqc_health_status_elem fw_status; + struct ice_aqc_health_status_elem port_status; +}; + +void ice_process_health_status_event(struct ice_pf *pf, + struct ice_rq_event_info *event); + +void ice_health_init(struct ice_pf *pf); +void ice_health_deinit(struct ice_pf *pf); +void ice_health_clear(struct ice_pf *pf); + +void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring, + u16 vsi_num, u32 head, u32 intr); +void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num, + u16 vf_num, u8 event, u16 queue); +void ice_report_tx_hang(struct ice_pf *pf); + +#endif /* _HEALTH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/devlink/port.c b/drivers/net/ethernet/intel/ice/devlink/port.c new file mode 100644 index 000000000000..767419a67fef --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/port.c @@ -0,0 +1,999 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ + +#include <linux/vmalloc.h> + +#include "ice.h" +#include "devlink.h" +#include "port.h" +#include "ice_lib.h" +#include "ice_fltr.h" + +static int ice_active_port_option = -1; + +/** + * ice_devlink_port_opt_speed_str - convert speed to a string + * @speed: speed value + */ +static const char *ice_devlink_port_opt_speed_str(u8 speed) +{ + switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) { + case ICE_AQC_PORT_OPT_MAX_LANE_100M: + return "0.1"; + case ICE_AQC_PORT_OPT_MAX_LANE_1G: + return "1"; + case ICE_AQC_PORT_OPT_MAX_LANE_2500M: + return "2.5"; + case ICE_AQC_PORT_OPT_MAX_LANE_5G: + return "5"; + case ICE_AQC_PORT_OPT_MAX_LANE_10G: + return "10"; + case ICE_AQC_PORT_OPT_MAX_LANE_25G: + return "25"; + case ICE_AQC_PORT_OPT_MAX_LANE_50G: + return "50"; + case ICE_AQC_PORT_OPT_MAX_LANE_100G: + return "100"; + } + + return "-"; +} + +#define ICE_PORT_OPT_DESC_LEN 50 +/** + * ice_devlink_port_options_print - Print available port split options + * @pf: the PF to print split port options + * + * Prints a table with available port split options and max port speeds + */ +static void ice_devlink_port_options_print(struct ice_pf *pf) +{ + u8 i, j, options_count, cnt, speed, pending_idx, active_idx; + struct ice_aqc_get_port_options_elem *options, *opt; + struct device *dev = ice_pf_to_dev(pf); + bool active_valid, pending_valid; + char desc[ICE_PORT_OPT_DESC_LEN]; + const char *str; + int status; + + options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV, + sizeof(*options), GFP_KERNEL); + if (!options) + return; + + for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) { + opt = options + i * ICE_AQC_PORT_OPT_MAX; + options_count = ICE_AQC_PORT_OPT_MAX; + active_valid = 0; + + status = ice_aq_get_port_options(&pf->hw, opt, &options_count, + i, true, &active_idx, + &active_valid, &pending_idx, + &pending_valid); + if (status) { + dev_dbg(dev, "Couldn't read port option for port %d, err %d\n", + i, status); + goto err; + } + } + + dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n"); + dev_dbg(dev, "Status Split Quad 0 Quad 1\n"); + dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n"); + + for (i = 0; i < options_count; i++) { + cnt = 0; + + if (i == ice_active_port_option) + str = "Active"; + else if ((i == pending_idx) && pending_valid) + str = "Pending"; + else + str = ""; + + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%-8s", str); + + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%-6u", options[i].pmd); + + for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) { + speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed; + str = ice_devlink_port_opt_speed_str(speed); + cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt, + "%3s ", str); + } + + dev_dbg(dev, "%s\n", desc); + } + +err: + kfree(options); +} + +/** + * ice_devlink_aq_set_port_option - Send set port option admin queue command + * @pf: the PF to print split port options + * @option_idx: selected port option + * @extack: extended netdev ack structure + * + * Sends set port option admin queue command with selected port option and + * calls NVM write activate. + */ +static int +ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx, + struct netlink_ext_ack *extack) +{ + struct device *dev = ice_pf_to_dev(pf); + int status; + + status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx); + if (status) { + dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Port split request failed"); + return -EIO; + } + + status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE); + if (status) { + dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); + return -EIO; + } + + status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL); + if (status) { + dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n", + status, pf->hw.adminq.sq_last_status); + NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data"); + ice_release_nvm(&pf->hw); + return -EIO; + } + + ice_release_nvm(&pf->hw); + + NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split"); + return 0; +} + +/** + * ice_devlink_port_split - .port_split devlink handler + * @devlink: devlink instance structure + * @port: devlink port structure + * @count: number of ports to split to + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_split operation. + * + * Unfortunately, the devlink expression of available options is limited + * to just a number, so search for an FW port option which supports + * the specified number. As there could be multiple FW port options with + * the same port split count, allow switching between them. When the same + * port split count request is issued again, switch to the next FW port + * option with the same port split count. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port, + unsigned int count, struct netlink_ext_ack *extack) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; + u8 i, j, active_idx, pending_idx, new_option; + struct ice_pf *pf = devlink_priv(devlink); + u8 option_count = ICE_AQC_PORT_OPT_MAX; + struct device *dev = ice_pf_to_dev(pf); + bool active_valid, pending_valid; + int status; + + status = ice_aq_get_port_options(&pf->hw, options, &option_count, + 0, true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) { + dev_dbg(dev, "Couldn't read port split options, err = %d\n", + status); + NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options"); + return -EIO; + } + + new_option = ICE_AQC_PORT_OPT_MAX; + active_idx = pending_valid ? pending_idx : active_idx; + for (i = 1; i <= option_count; i++) { + /* In order to allow switching between FW port options with + * the same port split count, search for a new option starting + * from the active/pending option (with array wrap around). + */ + j = (active_idx + i) % option_count; + + if (count == options[j].pmd) { + new_option = j; + break; + } + } + + if (new_option == active_idx) { + dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n", + count); + NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set"); + ice_devlink_port_options_print(pf); + return -EINVAL; + } + + if (new_option == ICE_AQC_PORT_OPT_MAX) { + dev_dbg(dev, "request to split: count: %u not found\n", count); + NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config"); + ice_devlink_port_options_print(pf); + return -EINVAL; + } + + status = ice_devlink_aq_set_port_option(pf, new_option, extack); + if (status) + return status; + + ice_devlink_port_options_print(pf); + + return 0; +} + +/** + * ice_devlink_port_unsplit - .port_unsplit devlink handler + * @devlink: devlink instance structure + * @port: devlink port structure + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_unsplit operation. + * Calls ice_devlink_port_split with split count set to 1. + * There could be no FW option available with split count 1. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + return ice_devlink_port_split(devlink, port, 1, extack); +} + +/** + * ice_devlink_set_port_split_options - Set port split options + * @pf: the PF to set port split options + * @attrs: devlink attributes + * + * Sets devlink port split options based on available FW port options + */ +static void +ice_devlink_set_port_split_options(struct ice_pf *pf, + struct devlink_port_attrs *attrs) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX]; + u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX; + bool active_valid, pending_valid; + int status; + + status = ice_aq_get_port_options(&pf->hw, options, &option_count, + 0, true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) { + dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n", + status); + return; + } + + /* find the biggest available port split count */ + for (i = 0; i < option_count; i++) + attrs->lanes = max_t(int, attrs->lanes, options[i].pmd); + + attrs->splittable = attrs->lanes ? 1 : 0; + ice_active_port_option = active_idx; +} + +static const struct devlink_port_ops ice_devlink_port_ops = { + .port_split = ice_devlink_port_split, + .port_unsplit = ice_devlink_port_unsplit, +}; + +/** + * ice_devlink_set_switch_id - Set unique switch id based on pci dsn + * @pf: the PF to create a devlink port for + * @ppid: struct with switch id information + */ +static void +ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid) +{ + struct pci_dev *pdev = pf->pdev; + u64 id; + + id = pci_get_dsn(pdev); + + ppid->id_len = sizeof(id); + put_unaligned_be64(id, &ppid->id); +} + +/** + * ice_devlink_create_pf_port - Create a devlink port for this PF + * @pf: the PF to create a devlink port for + * + * Create and register a devlink_port for this PF. + * This function has to be called under devl_lock. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_pf_port(struct ice_pf *pf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; + int err; + + devlink = priv_to_devlink(pf); + + dev = ice_pf_to_dev(pf); + + devlink_port = &pf->devlink_port; + + vsi = ice_get_main_vsi(pf); + if (!vsi) + return -EIO; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; + attrs.phys.port_number = pf->hw.pf_id; + + /* As FW supports only port split options for whole device, + * set port split options only for first PF. + */ + if (pf->hw.pf_id == 0) + ice_devlink_set_port_split_options(pf, &attrs); + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + + devlink_port_attrs_set(devlink_port, &attrs); + + err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_port_ops); + if (err) { + dev_err(dev, "Failed to create devlink port for PF %d, error %d\n", + pf->hw.pf_id, err); + return err; + } + + return 0; +} + +/** + * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF + * @pf: the PF to cleanup + * + * Unregisters the devlink_port structure associated with this PF. + * This function has to be called under devl_lock. + */ +void ice_devlink_destroy_pf_port(struct ice_pf *pf) +{ + devl_port_unregister(&pf->devlink_port); +} + +/** + * ice_devlink_port_get_vf_fn_mac - .port_fn_hw_addr_get devlink handler + * @port: devlink port structure + * @hw_addr: MAC address of the port + * @hw_addr_len: length of MAC address + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_fn_hw_addr_get operation + * Return: zero on success or an error code on failure. + */ +static int ice_devlink_port_get_vf_fn_mac(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_vf *vf = container_of(port, struct ice_vf, devlink_port); + + ether_addr_copy(hw_addr, vf->dev_lan_addr); + *hw_addr_len = ETH_ALEN; + + return 0; +} + +/** + * ice_devlink_port_set_vf_fn_mac - .port_fn_hw_addr_set devlink handler + * @port: devlink port structure + * @hw_addr: MAC address of the port + * @hw_addr_len: length of MAC address + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_fn_hw_addr_set operation + * Return: zero on success or an error code on failure. + */ +static int ice_devlink_port_set_vf_fn_mac(struct devlink_port *port, + const u8 *hw_addr, + int hw_addr_len, + struct netlink_ext_ack *extack) + +{ + struct devlink_port_attrs *attrs = &port->attrs; + struct devlink_port_pci_vf_attrs *pci_vf; + struct devlink *devlink = port->devlink; + struct ice_pf *pf; + u16 vf_id; + + pf = devlink_priv(devlink); + pci_vf = &attrs->pci_vf; + vf_id = pci_vf->vf; + + return __ice_set_vf_mac(pf, vf_id, hw_addr); +} + +static const struct devlink_port_ops ice_devlink_vf_port_ops = { + .port_fn_hw_addr_get = ice_devlink_port_get_vf_fn_mac, + .port_fn_hw_addr_set = ice_devlink_port_set_vf_fn_mac, +}; + +/** + * ice_devlink_create_vf_port - Create a devlink port for this VF + * @vf: the VF to create a port for + * + * Create and register a devlink_port for this VF. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_vf_port(struct ice_vf *vf) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct device *dev; + struct ice_pf *pf; + int err; + + pf = vf->pf; + dev = ice_pf_to_dev(pf); + devlink_port = &vf->devlink_port; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF; + attrs.pci_vf.pf = pf->hw.pf_id; + attrs.pci_vf.vf = vf->vf_id; + + ice_devlink_set_switch_id(pf, &attrs.switch_id); + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_vf_port_ops); + if (err) { + dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", + vf->vf_id, err); + return err; + } + + return 0; +} + +/** + * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF + * @vf: the VF to cleanup + * + * Unregisters the devlink_port structure associated with this VF. + */ +void ice_devlink_destroy_vf_port(struct ice_vf *vf) +{ + devl_rate_leaf_destroy(&vf->devlink_port); + devl_port_unregister(&vf->devlink_port); +} + +/** + * ice_devlink_create_sf_dev_port - Register virtual port for a subfunction + * @sf_dev: the subfunction device to create a devlink port for + * + * Register virtual flavour devlink port for the subfunction auxiliary device + * created after activating a dynamically added devlink port. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev) +{ + struct devlink_port_attrs attrs = {}; + struct ice_dynamic_port *dyn_port; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + + dyn_port = sf_dev->dyn_port; + vsi = dyn_port->vsi; + + devlink_port = &sf_dev->priv->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL; + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(sf_dev->priv); + + return devl_port_register(devlink, devlink_port, vsi->idx); +} + +/** + * ice_devlink_destroy_sf_dev_port - Destroy virtual port for a subfunction + * @sf_dev: the subfunction device to create a devlink port for + * + * Unregisters the virtual port associated with this subfunction. + */ +void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev) +{ + devl_port_unregister(&sf_dev->priv->devlink_port); +} + +/** + * ice_activate_dynamic_port - Activate a dynamic port + * @dyn_port: dynamic port instance to activate + * @extack: extack for reporting error messages + * + * Activate the dynamic port based on its flavour. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_activate_dynamic_port(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack) +{ + int err; + + if (dyn_port->active) + return 0; + + err = ice_sf_eth_activate(dyn_port, extack); + if (err) + return err; + + dyn_port->active = true; + + return 0; +} + +/** + * ice_deactivate_dynamic_port - Deactivate a dynamic port + * @dyn_port: dynamic port instance to deactivate + * + * Undo activation of a dynamic port. + */ +static void ice_deactivate_dynamic_port(struct ice_dynamic_port *dyn_port) +{ + if (!dyn_port->active) + return; + + ice_sf_eth_deactivate(dyn_port); + dyn_port->active = false; +} + +/** + * ice_dealloc_dynamic_port - Deallocate and remove a dynamic port + * @dyn_port: dynamic port instance to deallocate + * + * Free resources associated with a dynamically added devlink port. Will + * deactivate the port if its currently active. + */ +static void ice_dealloc_dynamic_port(struct ice_dynamic_port *dyn_port) +{ + struct devlink_port *devlink_port = &dyn_port->devlink_port; + struct ice_pf *pf = dyn_port->pf; + + ice_deactivate_dynamic_port(dyn_port); + + xa_erase(&pf->sf_nums, devlink_port->attrs.pci_sf.sf); + ice_eswitch_detach_sf(pf, dyn_port); + ice_vsi_free(dyn_port->vsi); + xa_erase(&pf->dyn_ports, dyn_port->vsi->idx); + kfree(dyn_port); +} + +/** + * ice_dealloc_all_dynamic_ports - Deallocate all dynamic devlink ports + * @pf: pointer to the pf structure + */ +void ice_dealloc_all_dynamic_ports(struct ice_pf *pf) +{ + struct ice_dynamic_port *dyn_port; + unsigned long index; + + xa_for_each(&pf->dyn_ports, index, dyn_port) + ice_dealloc_dynamic_port(dyn_port); +} + +/** + * ice_devlink_port_new_check_attr - Check that new port attributes are valid + * @pf: pointer to the PF structure + * @new_attr: the attributes for the new port + * @extack: extack for reporting error messages + * + * Check that the attributes for the new port are valid before continuing to + * allocate the devlink port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_new_check_attr(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack) +{ + if (new_attr->flavour != DEVLINK_PORT_FLAVOUR_PCI_SF) { + NL_SET_ERR_MSG_MOD(extack, "Flavour other than pcisf is not supported"); + return -EOPNOTSUPP; + } + + if (new_attr->controller_valid) { + NL_SET_ERR_MSG_MOD(extack, "Setting controller is not supported"); + return -EOPNOTSUPP; + } + + if (new_attr->port_index_valid) { + NL_SET_ERR_MSG_MOD(extack, "Driver does not support user defined port index assignment"); + return -EOPNOTSUPP; + } + + if (new_attr->pfnum != pf->hw.pf_id) { + NL_SET_ERR_MSG_MOD(extack, "Incorrect pfnum supplied"); + return -EINVAL; + } + + if (!pci_msix_can_alloc_dyn(pf->pdev)) { + NL_SET_ERR_MSG_MOD(extack, "Dynamic MSIX-X interrupt allocation is not supported"); + return -EOPNOTSUPP; + } + + return 0; +} + +/** + * ice_devlink_port_del - devlink handler for port delete + * @devlink: pointer to devlink + * @port: devlink port to be deleted + * @extack: pointer to extack + * + * Deletes devlink port and deallocates all resources associated with + * created subfunction. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_del(struct devlink *devlink, struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + ice_dealloc_dynamic_port(dyn_port); + + return 0; +} + +/** + * ice_devlink_port_fn_hw_addr_set - devlink handler for mac address set + * @port: pointer to devlink port + * @hw_addr: hw address to set + * @hw_addr_len: hw address length + * @extack: extack for reporting error messages + * + * Sets mac address for the port, verifies arguments and copies address + * to the subfunction structure. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, + int hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + if (dyn_port->attached) { + NL_SET_ERR_MSG_MOD(extack, + "Ethernet address can be change only in detached state"); + return -EBUSY; + } + + if (hw_addr_len != ETH_ALEN || !is_valid_ether_addr(hw_addr)) { + NL_SET_ERR_MSG_MOD(extack, "Invalid ethernet address"); + return -EADDRNOTAVAIL; + } + + ether_addr_copy(dyn_port->hw_addr, hw_addr); + + return 0; +} + +/** + * ice_devlink_port_fn_hw_addr_get - devlink handler for mac address get + * @port: pointer to devlink port + * @hw_addr: hw address to set + * @hw_addr_len: hw address length + * @extack: extack for reporting error messages + * + * Returns mac address for the port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_hw_addr_get(struct devlink_port *port, u8 *hw_addr, + int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + ether_addr_copy(hw_addr, dyn_port->hw_addr); + *hw_addr_len = ETH_ALEN; + + return 0; +} + +/** + * ice_devlink_port_fn_state_set - devlink handler for port state set + * @port: pointer to devlink port + * @state: state to set + * @extack: extack for reporting error messages + * + * Activates or deactivates the port. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_state_set(struct devlink_port *port, + enum devlink_port_fn_state state, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + switch (state) { + case DEVLINK_PORT_FN_STATE_ACTIVE: + return ice_activate_dynamic_port(dyn_port, extack); + + case DEVLINK_PORT_FN_STATE_INACTIVE: + ice_deactivate_dynamic_port(dyn_port); + break; + } + + return 0; +} + +/** + * ice_devlink_port_fn_state_get - devlink handler for port state get + * @port: pointer to devlink port + * @state: admin configured state of the port + * @opstate: current port operational state + * @extack: extack for reporting error messages + * + * Gets port state. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_devlink_port_fn_state_get(struct devlink_port *port, + enum devlink_port_fn_state *state, + enum devlink_port_fn_opstate *opstate, + struct netlink_ext_ack *extack) +{ + struct ice_dynamic_port *dyn_port; + + dyn_port = ice_devlink_port_to_dyn(port); + + if (dyn_port->active) + *state = DEVLINK_PORT_FN_STATE_ACTIVE; + else + *state = DEVLINK_PORT_FN_STATE_INACTIVE; + + if (dyn_port->attached) + *opstate = DEVLINK_PORT_FN_OPSTATE_ATTACHED; + else + *opstate = DEVLINK_PORT_FN_OPSTATE_DETACHED; + + return 0; +} + +static const struct devlink_port_ops ice_devlink_port_sf_ops = { + .port_del = ice_devlink_port_del, + .port_fn_hw_addr_get = ice_devlink_port_fn_hw_addr_get, + .port_fn_hw_addr_set = ice_devlink_port_fn_hw_addr_set, + .port_fn_state_get = ice_devlink_port_fn_state_get, + .port_fn_state_set = ice_devlink_port_fn_state_set, +}; + +/** + * ice_reserve_sf_num - Reserve a subfunction number for this port + * @pf: pointer to the pf structure + * @new_attr: devlink port attributes requested + * @extack: extack for reporting error messages + * @sfnum: on success, the sf number reserved + * + * Reserve a subfunction number for this port. Only called for + * DEVLINK_PORT_FLAVOUR_PCI_SF ports. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_reserve_sf_num(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, u32 *sfnum) +{ + int err; + + /* If user didn't request an explicit number, pick one */ + if (!new_attr->sfnum_valid) + return xa_alloc(&pf->sf_nums, sfnum, NULL, xa_limit_32b, + GFP_KERNEL); + + /* Otherwise, check and use the number provided */ + err = xa_insert(&pf->sf_nums, new_attr->sfnum, NULL, GFP_KERNEL); + if (err) { + if (err == -EBUSY) + NL_SET_ERR_MSG_MOD(extack, "Subfunction with given sfnum already exists"); + return err; + } + + *sfnum = new_attr->sfnum; + + return 0; +} + +/** + * ice_devlink_create_sf_port - Register PCI subfunction devlink port + * @dyn_port: the dynamic port instance structure for this subfunction + * + * Register PCI subfunction flavour devlink port for a dynamically added + * subfunction port. + * + * Return: zero on success or an error code on failure. + */ +int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port) +{ + struct devlink_port_attrs attrs = {}; + struct devlink_port *devlink_port; + struct devlink *devlink; + struct ice_vsi *vsi; + struct ice_pf *pf; + + vsi = dyn_port->vsi; + pf = dyn_port->pf; + + devlink_port = &dyn_port->devlink_port; + + attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_SF; + attrs.pci_sf.pf = pf->hw.pf_id; + attrs.pci_sf.sf = dyn_port->sfnum; + + devlink_port_attrs_set(devlink_port, &attrs); + devlink = priv_to_devlink(pf); + + return devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_port_sf_ops); +} + +/** + * ice_devlink_destroy_sf_port - Destroy the devlink_port for this SF + * @dyn_port: the dynamic port instance structure for this subfunction + * + * Unregisters the devlink_port structure associated with this SF. + */ +void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port) +{ + devl_rate_leaf_destroy(&dyn_port->devlink_port); + devl_port_unregister(&dyn_port->devlink_port); +} + +/** + * ice_alloc_dynamic_port - Allocate new dynamic port + * @pf: pointer to the pf structure + * @new_attr: devlink port attributes requested + * @extack: extack for reporting error messages + * @devlink_port: index of newly created devlink port + * + * Allocate a new dynamic port instance and prepare it for configuration + * with devlink. + * + * Return: zero on success or an error code on failure. + */ +static int +ice_alloc_dynamic_port(struct ice_pf *pf, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port) +{ + struct ice_dynamic_port *dyn_port; + struct ice_vsi *vsi; + u32 sfnum; + int err; + + err = ice_reserve_sf_num(pf, new_attr, extack, &sfnum); + if (err) + return err; + + dyn_port = kzalloc(sizeof(*dyn_port), GFP_KERNEL); + if (!dyn_port) { + err = -ENOMEM; + goto unroll_reserve_sf_num; + } + + vsi = ice_vsi_alloc(pf); + if (!vsi) { + NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VSI"); + err = -ENOMEM; + goto unroll_dyn_port_alloc; + } + + dyn_port->vsi = vsi; + dyn_port->pf = pf; + dyn_port->sfnum = sfnum; + eth_random_addr(dyn_port->hw_addr); + + err = xa_insert(&pf->dyn_ports, vsi->idx, dyn_port, GFP_KERNEL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Port index reservation failed"); + goto unroll_vsi_alloc; + } + + err = ice_eswitch_attach_sf(pf, dyn_port); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to attach SF to eswitch"); + goto unroll_xa_insert; + } + + *devlink_port = &dyn_port->devlink_port; + + return 0; + +unroll_xa_insert: + xa_erase(&pf->dyn_ports, vsi->idx); +unroll_vsi_alloc: + ice_vsi_free(vsi); +unroll_dyn_port_alloc: + kfree(dyn_port); +unroll_reserve_sf_num: + xa_erase(&pf->sf_nums, sfnum); + + return err; +} + +/** + * ice_devlink_port_new - devlink handler for the new port + * @devlink: pointer to devlink + * @new_attr: pointer to the port new attributes + * @extack: extack for reporting error messages + * @devlink_port: pointer to a new port + * + * Creates new devlink port, checks new port attributes and reject + * any unsupported parameters, allocates new subfunction for that port. + * + * Return: zero on success or an error code on failure. + */ +int +ice_devlink_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port) +{ + struct ice_pf *pf = devlink_priv(devlink); + int err; + + err = ice_devlink_port_new_check_attr(pf, new_attr, extack); + if (err) + return err; + + if (!ice_is_eswitch_mode_switchdev(pf)) { + NL_SET_ERR_MSG_MOD(extack, + "SF ports are only supported in eswitch switchdev mode"); + return -EOPNOTSUPP; + } + + return ice_alloc_dynamic_port(pf, new_attr, extack, devlink_port); +} diff --git a/drivers/net/ethernet/intel/ice/devlink/port.h b/drivers/net/ethernet/intel/ice/devlink/port.h new file mode 100644 index 000000000000..d60efc340945 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/devlink/port.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _DEVLINK_PORT_H_ +#define _DEVLINK_PORT_H_ + +#include "../ice.h" +#include "../ice_sf_eth.h" + +/** + * struct ice_dynamic_port - Track dynamically added devlink port instance + * @hw_addr: the HW address for this port + * @active: true if the port has been activated + * @attached: true it the prot is attached + * @devlink_port: the associated devlink port structure + * @pf: pointer to the PF private structure + * @vsi: the VSI associated with this port + * @repr_id: the representor ID + * @sfnum: the subfunction ID + * @sf_dev: pointer to the subfunction device + * + * An instance of a dynamically added devlink port. Each port flavour + */ +struct ice_dynamic_port { + u8 hw_addr[ETH_ALEN]; + u8 active: 1; + u8 attached: 1; + struct devlink_port devlink_port; + struct ice_pf *pf; + struct ice_vsi *vsi; + unsigned long repr_id; + u32 sfnum; + /* Flavour-specific implementation data */ + union { + struct ice_sf_dev *sf_dev; + }; +}; + +void ice_dealloc_all_dynamic_ports(struct ice_pf *pf); + +int ice_devlink_create_pf_port(struct ice_pf *pf); +void ice_devlink_destroy_pf_port(struct ice_pf *pf); +int ice_devlink_create_vf_port(struct ice_vf *vf); +void ice_devlink_destroy_vf_port(struct ice_vf *vf); +int ice_devlink_create_sf_port(struct ice_dynamic_port *dyn_port); +void ice_devlink_destroy_sf_port(struct ice_dynamic_port *dyn_port); +int ice_devlink_create_sf_dev_port(struct ice_sf_dev *sf_dev); +void ice_devlink_destroy_sf_dev_port(struct ice_sf_dev *sf_dev); + +#define ice_devlink_port_to_dyn(port) \ + container_of(port, struct ice_dynamic_port, devlink_port) + +int +ice_devlink_port_new(struct devlink *devlink, + const struct devlink_port_new_attrs *new_attr, + struct netlink_ext_ack *extack, + struct devlink_port **devlink_port); +#endif /* _DEVLINK_PORT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 365c03d1c462..ddd0ad68185b 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -77,6 +77,8 @@ #include "ice_gnss.h" #include "ice_irq.h" #include "ice_dpll.h" +#include "ice_adapter.h" +#include "devlink/health.h" #define ICE_BAR0 0 #define ICE_REQ_DESC_MULTIPLE 32 @@ -95,9 +97,6 @@ #define ICE_MIN_LAN_OICR_MSIX 1 #define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) #define ICE_FDIR_MSIX 2 -#define ICE_RDMA_NUM_AEQ_MSIX 4 -#define ICE_MIN_RDMA_MSIX 2 -#define ICE_ESWITCH_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_SCATTER 1 @@ -180,11 +179,9 @@ #define ice_for_each_chnl_tc(i) \ for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) -#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX) +#define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX -#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ - ICE_PROMISC_UCAST_RX | \ - ICE_PROMISC_VLAN_TX | \ +#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \ ICE_PROMISC_VLAN_RX) #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) @@ -196,16 +193,16 @@ #define ice_pf_to_dev(pf) (&((pf)->pdev->dev)) -#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned) - enum ice_feature { ICE_F_DSCP, ICE_F_PHY_RCLK, ICE_F_SMA_CTRL, ICE_F_CGU, ICE_F_GNSS, + ICE_F_GCS, ICE_F_ROCE_LAG, ICE_F_SRIOV_LAG, + ICE_F_MBX_LIMIT, ICE_F_MAX }; @@ -317,6 +314,7 @@ enum ice_vsi_state { ICE_VSI_UMAC_FLTR_CHANGED, ICE_VSI_MMAC_FLTR_CHANGED, ICE_VSI_PROMISC_CHANGED, + ICE_VSI_REBUILD_PENDING, ICE_VSI_STATE_NBITS /* must be last */ }; @@ -330,7 +328,6 @@ struct ice_vsi { struct net_device *netdev; struct ice_sw *vsw; /* switch this VSI is on */ struct ice_pf *back; /* back pointer to PF */ - struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_rx_ring **rx_rings; /* Rx ring array */ struct ice_tx_ring **tx_rings; /* Tx ring array */ struct ice_q_vector **q_vectors; /* q_vector array */ @@ -348,12 +345,9 @@ struct ice_vsi { /* tell if only dynamic irq allocation is allowed */ bool irq_dyn_alloc; - enum ice_vsi_type type; u16 vsi_num; /* HW (absolute) index of this VSI */ u16 idx; /* software index in pf->vsi[] */ - struct ice_vf *vf; /* VF associated with this VSI */ - u16 num_gfltr; u16 num_bfltr; @@ -373,9 +367,6 @@ struct ice_vsi { spinlock_t arfs_lock; /* protects aRFS hash table and filter state */ atomic_t *arfs_last_fltr_id; - u16 max_frame; - u16 rx_buf_len; - struct ice_aqc_vsi_props info; /* VSI properties */ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */ @@ -408,13 +399,12 @@ struct ice_vsi { u16 req_rxq; /* User requested Rx queues */ u16 num_rx_desc; u16 num_tx_desc; - u16 qset_handle[ICE_MAX_TRAFFIC_CLASS]; struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_tx_ring **xdp_rings; /* XDP ring array */ - unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ u16 num_xdp_txq; /* Used XDP queues */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + struct mutex xdp_state_lock; struct net_device **target_netdevs; @@ -445,12 +435,23 @@ struct ice_vsi { u8 old_numtc; u16 old_ena_tc; - struct ice_channel *ch; - /* setup back reference, to which aggregator node this VSI * corresponds to */ struct ice_agg_node *agg_node; + + struct_group_tagged(ice_vsi_cfg_params, params, + struct ice_port_info *port_info; /* back pointer to port_info */ + struct ice_channel *ch; /* VSI's channel structure, may be NULL */ + union { + /* VF associated with this VSI, may be NULL */ + struct ice_vf *vf; + /* SF associated with this VSI, may be NULL */ + struct ice_dynamic_port *sf; + }; + u32 flags; /* VSI flags used for rebuild and configuration */ + enum ice_vsi_type type; /* the type of the VSI */ + ); } ____cacheline_internodealigned_in_smp; /* struct that defines an interrupt vector */ @@ -458,7 +459,7 @@ struct ice_q_vector { struct ice_vsi *vsi; u16 v_idx; /* index in the vsi->q_vector array. */ - u16 reg_idx; + u16 reg_idx; /* PF relative register index */ u8 num_ring_rx; /* total number of Rx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */ u8 wb_on_itr:1; /* if true, WB on ITR is enabled */ @@ -472,14 +473,12 @@ struct ice_q_vector { struct ice_ring_container rx; struct ice_ring_container tx; - cpumask_t affinity_mask; - struct irq_affinity_notify affinity_notify; - struct ice_channel *ch; char name[ICE_INT_NAME_STR_LEN]; u16 total_events; /* net_dim(): number of interrupts processed */ + u16 vf_reg_idx; /* VF relative register index */ struct msi_map irq; } ____cacheline_internodealigned_in_smp; @@ -513,6 +512,7 @@ enum ice_pf_flags { ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */ + ICE_FLAG_LLDP_AQ_FLTR, ICE_PF_FLAGS_NBITS /* must be last */ }; @@ -522,17 +522,10 @@ enum ice_misc_thread_tasks { }; struct ice_eswitch { - struct ice_vsi *control_vsi; struct ice_vsi *uplink_vsi; struct ice_esw_br_offloads *br_offloads; struct xarray reprs; bool is_running; - /* struct to allow cp queues management optimization */ - struct { - int to_reach; - int value; - bool is_reaching; - } qs; }; struct ice_agg_node { @@ -542,8 +535,17 @@ struct ice_agg_node { u8 valid; }; +struct ice_pf_msix { + u32 cur; + u32 min; + u32 max; + u32 total; + u32 rest; +}; + struct ice_pf { struct pci_dev *pdev; + struct ice_adapter *adapter; struct devlink_region *nvm_region; struct devlink_region *sram_region; @@ -553,15 +555,8 @@ struct ice_pf { struct devlink_port devlink_port; /* OS reserved IRQ details */ - struct msix_entry *msix_entries; struct ice_irq_tracker irq_tracker; - /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the - * number of MSIX vectors needed for all SR-IOV VFs from the number of - * MSIX vectors allowed on this PF. - */ - u16 sriov_base_vector; - unsigned long *sriov_irq_bm; /* bitmap to track irq usage */ - u16 sriov_irq_size; /* size of the irq_bm bitmap */ + struct ice_virt_irq_tracker virt_irq_tracker; u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */ @@ -594,7 +589,6 @@ struct ice_pf { struct gnss_serial *gnss_serial; struct gnss_device *gnss_dev; u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */ - u16 rdma_base_vector; /* spinlock to protect the AdminQ wait list */ spinlock_t aq_wait_lock; @@ -611,7 +605,7 @@ struct ice_pf { struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */ u16 max_pf_txqs; /* Total Tx queues PF wide */ u16 max_pf_rxqs; /* Total Rx queues PF wide */ - u16 num_lan_msix; /* Total MSIX vectors for base driver */ + struct ice_pf_msix msix; u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ @@ -627,14 +621,12 @@ struct ice_pf { struct ice_hw_port_stats stats_prev; struct ice_hw hw; u8 stat_prev_loaded:1; /* has previous stats been loaded */ - u8 rdma_mode; u16 dcbx_cap; u32 tx_timeout_count; unsigned long tx_timeout_last_recovery; u32 tx_timeout_recovery_level; char int_name[ICE_INT_NAME_STR_LEN]; char int_name_ll_ts[ICE_INT_NAME_STR_LEN]; - struct auxiliary_device *adev; int aux_idx; u32 sw_int_count; /* count of tc_flower filters specific to channel (aka where filter @@ -653,6 +645,9 @@ struct ice_pf { struct ice_eswitch eswitch; struct ice_esw_br_port *br_port; + struct xarray dyn_ports; + struct xarray sf_nums; + #define ICE_INVALID_AGG_NODE_ID 0 #define ICE_PF_AGG_NODE_ID_START 1 #define ICE_MAX_PF_AGG_NODES 32 @@ -662,6 +657,10 @@ struct ice_pf { struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES]; struct ice_dplls dplls; struct device *hwmon_dev; + struct ice_health health_reporters; + struct iidc_rdma_core_dev_info *cdev_info; + + u8 num_quanta_prof_used; }; extern struct workqueue_struct *ice_lag_wq; @@ -749,21 +748,36 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring) } /** - * ice_xsk_pool - get XSK buffer pool bound to a ring + * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID + * @vsi: pointer to VSI + * @qid: index of a queue to look at XSK buff pool presence + * + * Return: A pointer to xsk_buff_pool structure if there is a buffer pool + * attached and configured as zero-copy, NULL otherwise. + */ +static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi, + u16 qid) +{ + struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid); + + if (!ice_is_xdp_ena_vsi(vsi)) + return NULL; + + return (pool && pool->dev) ? pool : NULL; +} + +/** + * ice_rx_xsk_pool - assign XSK buff pool to Rx ring * @ring: Rx ring to use * - * Returns a pointer to xsk_buff_pool structure if there is a buffer pool - * present, NULL otherwise. + * Sets XSK buff pool pointer on Rx ring. */ -static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring) +static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring) { struct ice_vsi *vsi = ring->vsi; u16 qid = ring->q_index; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) - return NULL; - - return xsk_get_pool_from_qid(vsi->netdev, qid); + WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); } /** @@ -788,12 +802,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid) if (!ring) return; - if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) { - ring->xsk_pool = NULL; - return; - } - - ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid); + WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid)); } /** @@ -909,6 +918,7 @@ int ice_vsi_open(struct ice_vsi *vsi); void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_repr_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); +void ice_set_ethtool_sf_ops(struct net_device *netdev); u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf); int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked); @@ -922,9 +932,17 @@ int ice_down(struct ice_vsi *vsi); int ice_down_up(struct ice_vsi *vsi); int ice_vsi_cfg_lan(struct ice_vsi *vsi); struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi); + +enum ice_xdp_cfg { + ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */ + ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */ +}; + int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog); -int ice_destroy_xdp_rings(struct ice_vsi *vsi); +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type); +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); +void ice_map_xdp_rings(struct ice_vsi *vsi); int ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); @@ -986,6 +1004,14 @@ void ice_unload(struct ice_pf *pf); void ice_adv_lnk_speed_maps_init(void); int ice_init_dev(struct ice_pf *pf); void ice_deinit_dev(struct ice_pf *pf); +int ice_change_mtu(struct net_device *netdev, int new_mtu); +void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue); +int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp); +void ice_set_netdev_features(struct net_device *netdev); +int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); +int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +void ice_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); /** * ice_set_rdma_cap - enable RDMA support @@ -1014,4 +1040,62 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf) } extern const struct xdp_metadata_ops ice_xdp_md_ops; + +/** + * ice_is_dual - Check if given config is multi-NAC + * @hw: pointer to HW structure + * + * Return: true if the device is running in mutli-NAC (Network + * Acceleration Complex) configuration variant, false otherwise + * (always false for non-E825 devices). + */ +static inline bool ice_is_dual(struct ice_hw *hw) +{ + return hw->mac_type == ICE_MAC_GENERIC_3K_E825 && + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M); +} + +/** + * ice_is_primary - Check if given device belongs to the primary complex + * @hw: pointer to HW structure + * + * Check if given PF/HW is running on primary complex in multi-NAC + * configuration. + * + * Return: true if the device is dual, false otherwise (always true + * for non-E825 devices). + */ +static inline bool ice_is_primary(struct ice_hw *hw) +{ + return hw->mac_type != ICE_MAC_GENERIC_3K_E825 || + !ice_is_dual(hw) || + (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M); +} + +/** + * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF + * @pf: pointer to PF structure + * + * Return: true if PF owns primary timer, false otherwise. + */ +static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf) +{ + return pf->hw.func_caps.ts_func_info.src_tmr_owned && + ice_is_primary(&pf->hw); +} + +/** + * ice_get_primary_hw - Get pointer to primary ice_hw structure + * @pf: pointer to PF structure + * + * Return: A pointer to ice_hw structure with access to timesync + * register space. + */ +static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf) +{ + if (!pf->adapter->ctrl_pf) + return &pf->hw; + else + return &pf->adapter->ctrl_pf->hw; +} #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c new file mode 100644 index 000000000000..66e070095d1b --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +// SPDX-FileCopyrightText: Copyright Red Hat + +#include <linux/cleanup.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/xarray.h> +#include "ice_adapter.h" +#include "ice.h" + +static DEFINE_XARRAY(ice_adapters); +static DEFINE_MUTEX(ice_adapters_mutex); + +static unsigned long ice_adapter_index(u64 dsn) +{ +#if BITS_PER_LONG == 64 + return dsn; +#else + return (u32)dsn ^ (u32)(dsn >> 32); +#endif +} + +static struct ice_adapter *ice_adapter_new(u64 dsn) +{ + struct ice_adapter *adapter; + + adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); + if (!adapter) + return NULL; + + adapter->device_serial_number = dsn; + spin_lock_init(&adapter->ptp_gltsyn_time_lock); + refcount_set(&adapter->refcount, 1); + + mutex_init(&adapter->ports.lock); + INIT_LIST_HEAD(&adapter->ports.ports); + + return adapter; +} + +static void ice_adapter_free(struct ice_adapter *adapter) +{ + WARN_ON(!list_empty(&adapter->ports.ports)); + mutex_destroy(&adapter->ports.lock); + + kfree(adapter); +} + +/** + * ice_adapter_get - Get a shared ice_adapter structure. + * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter. + * + * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs) + * of the same multi-function PCI device share one ice_adapter structure. + * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put + * to release its reference. + * + * Context: Process, may sleep. + * Return: Pointer to ice_adapter on success. + * ERR_PTR() on error. -ENOMEM is the only possible error. + */ +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev) +{ + u64 dsn = pci_get_dsn(pdev); + struct ice_adapter *adapter; + unsigned long index; + int err; + + index = ice_adapter_index(dsn); + scoped_guard(mutex, &ice_adapters_mutex) { + err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); + if (err == -EBUSY) { + adapter = xa_load(&ice_adapters, index); + refcount_inc(&adapter->refcount); + WARN_ON_ONCE(adapter->device_serial_number != dsn); + return adapter; + } + if (err) + return ERR_PTR(err); + + adapter = ice_adapter_new(dsn); + if (!adapter) + return ERR_PTR(-ENOMEM); + xa_store(&ice_adapters, index, adapter, GFP_KERNEL); + } + return adapter; +} + +/** + * ice_adapter_put - Release a reference to the shared ice_adapter structure. + * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter. + * + * Releases the reference to ice_adapter previously obtained with + * ice_adapter_get. + * + * Context: Process, may sleep. + */ +void ice_adapter_put(struct pci_dev *pdev) +{ + u64 dsn = pci_get_dsn(pdev); + struct ice_adapter *adapter; + unsigned long index; + + index = ice_adapter_index(dsn); + scoped_guard(mutex, &ice_adapters_mutex) { + adapter = xa_load(&ice_adapters, index); + if (WARN_ON(!adapter)) + return; + if (!refcount_dec_and_test(&adapter->refcount)) + return; + + WARN_ON(xa_erase(&ice_adapters, index) != adapter); + } + ice_adapter_free(adapter); +} diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h new file mode 100644 index 000000000000..ac15c0d2bc1a --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_adapter.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* SPDX-FileCopyrightText: Copyright Red Hat */ + +#ifndef _ICE_ADAPTER_H_ +#define _ICE_ADAPTER_H_ + +#include <linux/types.h> +#include <linux/spinlock_types.h> +#include <linux/refcount_types.h> + +struct pci_dev; +struct ice_pf; + +/** + * struct ice_port_list - data used to store the list of adapter ports + * + * This structure contains data used to maintain a list of adapter ports + * + * @ports: list of ports + * @lock: protect access to the ports list + */ +struct ice_port_list { + struct list_head ports; + /* To synchronize the ports list operations */ + struct mutex lock; +}; + +/** + * struct ice_adapter - PCI adapter resources shared across PFs + * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME + * register of the PTP clock. + * @refcount: Reference count. struct ice_pf objects hold the references. + * @ctrl_pf: Control PF of the adapter + * @ports: Ports list + * @device_serial_number: DSN cached for collision detection on 32bit systems + */ +struct ice_adapter { + refcount_t refcount; + /* For access to the GLTSYN_TIME register */ + spinlock_t ptp_gltsyn_time_lock; + + struct ice_pf *ctrl_pf; + struct ice_port_list ports; + u64 device_serial_number; +}; + +struct ice_adapter *ice_adapter_get(struct pci_dev *pdev); +void ice_adapter_put(struct pci_dev *pdev); + +#endif /* _ICE_ADAPTER_H */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1f3e7a6903e5..bdee499f991a 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -12,6 +12,13 @@ #define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9 #define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728 +#define ICE_RXQ_CTX_SIZE_DWORDS 8 +#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TXQ_CTX_SZ 22 + +typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t; +typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t; + struct ice_aqc_generic { __le32 param0; __le32 param1; @@ -121,6 +128,8 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 +#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 +#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087 #define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092 #define ICE_AQC_BIT_ROCEV2_LAG 0x01 #define ICE_AQC_BIT_SRIOV_LAG 0x02 @@ -230,6 +239,13 @@ struct ice_aqc_get_sw_cfg_resp_elem { #define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) }; +/* Loopback port parameter mode values. */ +enum ice_local_fwd_mode { + ICE_LOCAL_FWD_MODE_ENABLED = 0, + ICE_LOCAL_FWD_MODE_DISABLED = 1, + ICE_LOCAL_FWD_MODE_PRIORITIZED = 2, +}; + /* Set Port parameters, (direct, 0x0203) */ struct ice_aqc_set_port_params { __le16 cmd_flags; @@ -238,7 +254,9 @@ struct ice_aqc_set_port_params { __le16 swid; #define ICE_AQC_PORT_SWID_VALID BIT(15) #define ICE_AQC_PORT_SWID_M 0xFF - u8 reserved[10]; + u8 local_fwd_mode; +#define ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID BIT(2) + u8 reserved[9]; }; /* These resource type defines are used for all switch resource @@ -264,6 +282,8 @@ struct ice_aqc_set_port_params { #define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7) #define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12) #define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13) +#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14) +#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15) #define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00 @@ -808,6 +828,23 @@ struct ice_aqc_get_topo { __le32 addr_low; }; +/* Get/Set Tx Topology (indirect 0x0418/0x0417) */ +struct ice_aqc_get_set_tx_topo { + u8 set_flags; +#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0) +#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1) +#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4) +#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5) + + u8 get_flags; +#define ICE_AQC_TX_TOPO_GET_RAM 2 + + __le16 reserved1; + __le32 reserved2; + __le32 addr_high; + __le32 addr_low; +}; + /* Update TSE (indirect 0x0403) * Get TSE (indirect 0x0404) * Add TSE (indirect 0x0401) @@ -1440,6 +1477,71 @@ struct ice_aqc_get_sensor_reading_resp { } data; }; +/* DNL call command (indirect 0x0682) + * Struct is used for both command and response + */ +struct ice_aqc_dnl_call_command { + u8 ctx; /* Used in command, reserved in response */ + u8 reserved; + __le16 activity_id; +#define ICE_AQC_ACT_ID_DNL 0x1129 + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_dnl_equa_param { + __le16 data_in; +#define ICE_AQC_RX_EQU_SHIFT 8 +#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_BW (0x23 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_GAIN (0x30 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_GAIN2 (0x31 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_2 (0x32 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_3 (0x33 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_4 (0x34 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_5 (0x35 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_6 (0x36 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_7 (0x37 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_8 (0x38 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_9 (0x39 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_10 (0x3A << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_11 (0x3B << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_12 (0x3C << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_TX_EQU_PRE1 0x0 +#define ICE_AQC_TX_EQU_PRE3 0x3 +#define ICE_AQC_TX_EQU_ATTEN 0x4 +#define ICE_AQC_TX_EQU_POST1 0x8 +#define ICE_AQC_TX_EQU_PRE2 0xC + __le16 op_code_serdes_sel; +#define ICE_AQC_OP_CODE_SHIFT 4 +#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT) +#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT) + __le32 reserved[3]; +}; + +struct ice_aqc_dnl_equa_respon { + /* Equalization value can be negative */ + int val; + __le32 reserved[3]; +}; + +/* DNL call command/response buffer (indirect 0x0682) */ +struct ice_aqc_dnl_call { + union { + struct ice_aqc_dnl_equa_param txrx_equa_reqs; + __le32 stores[4]; + struct ice_aqc_dnl_equa_respon txrx_equa_resp; + } sto; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -1569,6 +1671,7 @@ struct ice_aqc_get_port_options_elem { #define ICE_AQC_PORT_OPT_MAX_LANE_25G 5 #define ICE_AQC_PORT_OPT_MAX_LANE_50G 6 #define ICE_AQC_PORT_OPT_MAX_LANE_100G 7 +#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8 u8 global_scid[2]; u8 phy_scid[2]; @@ -1663,6 +1766,24 @@ struct ice_aqc_nvm { }; #define ICE_AQC_NVM_START_POINT 0 +#define ICE_AQC_NVM_SECTOR_UNIT 4096 +#define ICE_AQC_NVM_SDP_AC_PTR_OFFSET 0xD8 +#define ICE_AQC_NVM_SDP_AC_PTR_M GENMASK(14, 0) +#define ICE_AQC_NVM_SDP_AC_PTR_INVAL 0x7FFF +#define ICE_AQC_NVM_SDP_AC_PTR_TYPE_M BIT(15) +#define ICE_AQC_NVM_SDP_AC_SDP_NUM_M GENMASK(2, 0) +#define ICE_AQC_NVM_SDP_AC_DIR_M BIT(3) +#define ICE_AQC_NVM_SDP_AC_PIN_M GENMASK(15, 6) +#define ICE_AQC_NVM_SDP_AC_MAX_SIZE 7 + +#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B + +struct ice_aqc_nvm_tx_topo_user_sel { + __le16 length; + u8 data; +#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4) + u8 reserved; +}; /* NVM Checksum Command (direct, 0x0706) */ struct ice_aqc_nvm_checksum { @@ -1693,6 +1814,7 @@ struct ice_aqc_nvm_pass_comp_tbl { #define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0 #define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1 #define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2 +#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3 u8 component_response_code; /* Response only */ #define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0 #define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1 @@ -1970,10 +2092,10 @@ struct ice_aqc_add_txqs_perq { __le16 txq_id; u8 rsvd[2]; __le32 q_teid; - u8 txq_ctx[22]; + ice_txq_ctx_buf_t txq_ctx; u8 rsvd2[2]; struct ice_aqc_txsched_elem info; -}; +} __packed; /* The format of the command buffer for Add Tx LAN Queues (0x0C30) * is an array of the following structs. Please note that the length of @@ -2150,6 +2272,8 @@ struct ice_aqc_get_pkg_info_resp { struct ice_aqc_get_pkg_info pkg_info[]; }; +#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0) + /* Get CGU abilities command response data structure (indirect 0x0C61) */ struct ice_aqc_get_cgu_abilities { u8 num_inputs; @@ -2394,6 +2518,87 @@ enum ice_aqc_fw_logging_mod { ICE_AQC_FW_LOG_ID_MAX, }; +enum ice_aqc_health_status_mask { + ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0), + ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1), + ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2), +}; + +/* Set Health Status (direct 0xFF20) */ +struct ice_aqc_set_health_status_cfg { + u8 event_source; + u8 reserved[15]; +}; + +enum ice_aqc_health_status { + ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101, + ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102, + ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103, + ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104, + ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105, + ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106, + ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107, + ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108, + ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109, + ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B, + ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C, + ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D, + ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F, + ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110, + ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111, + ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112, + ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113, + ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114, + ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115, + ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116, + ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117, + ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120, + ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121, + ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500, + ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501, + ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502, + ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503, + ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504, + ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505, + ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506, + ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507, + ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508, + ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509, + ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A, + ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B, + ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C, + ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D, + ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000, + ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001, + ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002, +}; + +/* Get Health Status (indirect 0xFF22) */ +struct ice_aqc_get_health_status { + __le16 health_status_count; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +enum ice_aqc_health_status_scope { + ICE_AQC_HEALTH_STATUS_PF = 0x1, + ICE_AQC_HEALTH_STATUS_PORT = 0x2, + ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3, +}; + +#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF + +/* Get Health Status event buffer entry (0xFF22), + * repeated per reported health status. + */ +struct ice_aqc_health_status_elem { + __le16 health_status_code; + __le16 event_source; + __le32 internal_data1; + __le32 internal_data2; +}; + /* Set FW Logging configuration (indirect 0xFF30) * Register for FW Logging (indirect 0xFF31) * Query FW Logging (indirect 0xFF32) @@ -2534,20 +2739,28 @@ struct ice_aq_desc { struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; + struct ice_aqc_set_health_status_cfg set_health_status_cfg; + struct ice_aqc_get_health_status get_health_status; + struct ice_aqc_dnl_call_command dnl_call; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; + struct ice_aqc_get_set_tx_topo get_set_tx_topo; } params; }; /* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */ #define ICE_AQ_LG_BUF 512 +#define ICE_AQ_FLAG_DD_S 0 +#define ICE_AQ_FLAG_CMP_S 1 #define ICE_AQ_FLAG_ERR_S 2 #define ICE_AQ_FLAG_LB_S 9 #define ICE_AQ_FLAG_RD_S 10 #define ICE_AQ_FLAG_BUF_S 12 #define ICE_AQ_FLAG_SI_S 13 +#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */ +#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */ #define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */ #define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */ #define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */ @@ -2642,6 +2855,10 @@ enum ice_adminq_opc { ice_aqc_opc_query_sched_res = 0x0412, ice_aqc_opc_remove_rl_profiles = 0x0415, + /* tx topology commands */ + ice_aqc_opc_set_tx_topo = 0x0417, + ice_aqc_opc_get_tx_topo = 0x0418, + /* PHY commands */ ice_aqc_opc_get_phy_caps = 0x0600, ice_aqc_opc_set_phy_cfg = 0x0601, @@ -2653,6 +2870,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_phy_rec_clk_out = 0x0630, ice_aqc_opc_get_phy_rec_clk_out = 0x0631, ice_aqc_opc_get_sensor_reading = 0x0632, + ice_aqc_opc_dnl_call = 0x0682, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, @@ -2725,6 +2943,10 @@ enum ice_adminq_opc { /* Standalone Commands/Events */ ice_aqc_opc_event_lan_overflow = 0x1001, + /* System Diagnostic commands */ + ice_aqc_opc_set_health_status_cfg = 0xFF20, + ice_aqc_opc_get_health_status = 0xFF22, + /* FW Logging Commands */ ice_aqc_opc_fw_logs_config = 0xFF30, ice_aqc_opc_fw_logs_register = 0xFF31, diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c index 7cee365cc7d1..2bc5c7f59844 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.c +++ b/drivers/net/ethernet/intel/ice/ice_arfs.c @@ -511,7 +511,7 @@ void ice_init_arfs(struct ice_vsi *vsi) struct hlist_head *arfs_fltr_list; unsigned int i; - if (!vsi || vsi->type != ICE_VSI_PF) + if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi)) return; arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list), @@ -571,25 +571,6 @@ void ice_clear_arfs(struct ice_vsi *vsi) } /** - * ice_free_cpu_rx_rmap - free setup CPU reverse map - * @vsi: the VSI to be forwarded to - */ -void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) -{ - struct net_device *netdev; - - if (!vsi || vsi->type != ICE_VSI_PF) - return; - - netdev = vsi->netdev; - if (!netdev || !netdev->rx_cpu_rmap) - return; - - free_irq_cpu_rmap(netdev->rx_cpu_rmap); - netdev->rx_cpu_rmap = NULL; -} - -/** * ice_set_cpu_rx_rmap - setup CPU reverse map for each queue * @vsi: the VSI to be forwarded to */ @@ -597,7 +578,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) { struct net_device *netdev; struct ice_pf *pf; - int i; if (!vsi || vsi->type != ICE_VSI_PF) return 0; @@ -610,18 +590,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi) netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n", vsi->type, netdev->name, vsi->num_q_vectors); - netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors); - if (unlikely(!netdev->rx_cpu_rmap)) - return -EINVAL; - - ice_for_each_q_vector(vsi, i) - if (irq_cpu_rmap_add(netdev->rx_cpu_rmap, - vsi->q_vectors[i]->irq.virq)) { - ice_free_cpu_rx_rmap(vsi); - return -EINVAL; - } - - return 0; + return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h index 9669ad9bf7b5..9706293128c3 100644 --- a/drivers/net/ethernet/intel/ice/ice_arfs.h +++ b/drivers/net/ethernet/intel/ice/ice_arfs.h @@ -45,7 +45,6 @@ int ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb, u16 rxq_idx, u32 flow_id); void ice_clear_arfs(struct ice_vsi *vsi); -void ice_free_cpu_rx_rmap(struct ice_vsi *vsi); void ice_init_arfs(struct ice_vsi *vsi); void ice_sync_arfs_fltrs(struct ice_pf *pf); int ice_set_cpu_rx_rmap(struct ice_vsi *vsi); @@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw, enum ice_fltr_ptype flow_type); #else static inline void ice_clear_arfs(struct ice_vsi *vsi) { } -static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { } static inline void ice_init_arfs(struct ice_vsi *vsi) { } static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { } static inline void ice_remove_arfs(struct ice_pf *pf) { } diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index a545a7917e4f..6db4ad8fc70b 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -121,7 +121,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) q_vector->irq.index = -ENOENT; if (vsi->type == ICE_VSI_VF) { - q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector); + ice_calc_vf_reg_idx(vsi->vf, q_vector); goto out; } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) { struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi); @@ -145,17 +145,15 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) skip_alloc: q_vector->reg_idx = q_vector->irq.index; - - /* only set affinity_mask if the CPU is online */ - if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + q_vector->vf_reg_idx = q_vector->irq.index; /* This will not be called in the driver load path because the netdev * will not be created yet. All other cases with register the NAPI * handler here (i.e. resume, reset/rebuild, etc.) */ if (vsi->netdev) - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll); + netif_napi_add_config(vsi->netdev, &q_vector->napi, + ice_napi_poll, v_idx); out: /* tie q_vector and VSI together */ @@ -189,16 +187,11 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) } q_vector = vsi->q_vectors[v_idx]; - ice_for_each_tx_ring(tx_ring, q_vector->tx) { - ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX, - NULL); + ice_for_each_tx_ring(tx_ring, vsi->q_vectors[v_idx]->tx) tx_ring->q_vector = NULL; - } - ice_for_each_rx_ring(rx_ring, q_vector->rx) { - ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX, - NULL); + + ice_for_each_rx_ring(rx_ring, vsi->q_vectors[v_idx]->rx) rx_ring->q_vector = NULL; - } /* only VSI with an associated netdev is set up with NAPI */ if (vsi->netdev) @@ -264,30 +257,6 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 } /** - * ice_eswitch_calc_txq_handle - * @ring: pointer to ring which unique index is needed - * - * To correctly work with many netdevs ring->q_index of Tx rings on switchdev - * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it - * here by finding index in vsi->tx_rings of this ring. - * - * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, - * because VSI is get from ring->vsi, so it has to be present in this VSI. - */ -static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) -{ - const struct ice_vsi *vsi = ring->vsi; - int i; - - ice_for_each_txq(vsi, i) { - if (vsi->tx_rings[i] == ring) - return i; - } - - return ICE_INVAL_Q_INDEX; -} - -/** * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring * @ring: The Tx ring to configure * @@ -303,7 +272,8 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring) if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) return; - netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, + netif_set_xps_queue(ring->netdev, + &ring->q_vector->napi.config->affinity_mask, ring->q_index); } @@ -353,7 +323,7 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; break; - case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_SF: tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; break; default: @@ -375,6 +345,8 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf break; } + tlan_ctx->quanta_prof_idx = ring->quanta_prof_id; + tlan_ctx->tso_ena = ICE_TX_LEGACY; tlan_ctx->tso_qnum = pf_q; @@ -473,12 +445,23 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Max packet size for this queue - must not be set to a larger value * than 5 x DBUF */ - rlan_ctx.rxmax = min_t(u32, vsi->max_frame, + rlan_ctx.rxmax = min_t(u32, ring->max_frame, ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len); /* Rx queue threshold in units of 64 */ rlan_ctx.lrxqthresh = 1; + /* Enable descriptor prefetch */ + rlan_ctx.prefena = 1; + + /* PF acts as uplink for switchdev; set flex descriptor with src_vsi + * metadata and flags to allow redirecting to PR netdev + */ + if (ice_is_eswitch_mode_switchdev(vsi->back)) { + ring->flags |= ICE_RX_FLAGS_MULTIDEV; + rxdid = ICE_RXDID_FLEX_NIC_2; + } + /* Enable Flexible Descriptors in the queue context which * allows this driver to select a specific receive descriptor format * increasing context priority to pick up profile ID; default is 0x01; @@ -487,9 +470,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) */ if (vsi->type != ICE_VSI_VF) ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); - else - ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3, - false); /* Absolute queue number out of 2K needs to be passed */ err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); @@ -531,6 +511,25 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring) } /** + * ice_get_frame_sz - calculate xdp_buff::frame_sz + * @rx_ring: the ring being configured + * + * Return frame size based on underlying PAGE_SIZE + */ +static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring) +{ + unsigned int frame_sz; + +#if (PAGE_SIZE >= 8192) + frame_sz = rx_ring->rx_buf_len; +#else + frame_sz = ice_rx_pg_size(rx_ring) / 2; +#endif + + return frame_sz; +} + +/** * ice_vsi_cfg_rxq - Configure an Rx queue * @ring: the ring being configured * @@ -542,9 +541,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) u32 num_bufs = ICE_RX_DESC_UNUSED(ring); int err; - ring->rx_buf_len = ring->vsi->rx_buf_len; - - if (ring->vsi->type == ICE_VSI_PF) { + if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) { if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, ring->q_index, @@ -554,7 +551,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) return err; } - ring->xsk_pool = ice_xsk_pool(ring); + ice_rx_xsk_pool(ring); if (ring->xsk_pool) { xdp_rxq_info_unreg(&ring->xdp_rxq); @@ -594,7 +591,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) } } - xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq); + xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq); ring->xdp.data = NULL; ring->xdp_ext.pkt_ctx = &ring->pkt_ctx; err = ice_setup_rx_ctx(ring); @@ -615,7 +612,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) return 0; } - ok = ice_alloc_rx_bufs_zc(ring, num_bufs); + ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs); if (!ok) { u16 pf_q = ring->vsi->rxq_map[ring->q_index]; @@ -642,21 +639,25 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx) /** * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length * @vsi: VSI + * @ring: Rx ring to configure + * + * Determine the maximum frame size and Rx buffer length to use for a PF VSI. + * Set these in the associated Rx ring structure. */ -static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi) +static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring) { if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) { - vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX; - vsi->rx_buf_len = ICE_RXBUF_1664; + ring->max_frame = ICE_MAX_FRAME_LEGACY_RX; + ring->rx_buf_len = ICE_RXBUF_1664; #if (PAGE_SIZE < 8192) } else if (!ICE_2K_TOO_SMALL_WITH_PADDING && (vsi->netdev->mtu <= ETH_DATA_LEN)) { - vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; - vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; + ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN; + ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN; #endif } else { - vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; - vsi->rx_buf_len = ICE_RXBUF_3072; + ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX; + ring->rx_buf_len = ICE_RXBUF_3072; } } @@ -671,15 +672,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) { u16 i; - if (vsi->type == ICE_VSI_VF) - goto setup_rings; - - ice_vsi_cfg_frame_size(vsi); -setup_rings: /* set up individual rings */ ice_for_each_rxq(vsi, i) { - int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]); + struct ice_rx_ring *ring = vsi->rx_rings[i]; + int err; + + if (vsi->type != ICE_VSI_VF) + ice_vsi_cfg_frame_size(vsi, ring); + err = ice_vsi_cfg_rxq(ring); if (err) return err; } @@ -794,13 +795,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) return 0; err_out: - while (v_idx--) - ice_free_q_vector(vsi, v_idx); - dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", - vsi->num_q_vectors, vsi->vsi_num, err); - vsi->num_q_vectors = 0; - return err; + dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d", + vsi->num_q_vectors, vsi->vsi_num, v_idx); + vsi->num_q_vectors = v_idx; + return v_idx ? 0 : err; } /** @@ -860,6 +859,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) } rx_rings_rem -= rx_rings_per_v; } + + if (ice_is_xdp_ena_vsi(vsi)) + ice_map_xdp_rings(vsi); } /** @@ -903,8 +905,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); /* copy context contents into the qg_buf */ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); + ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx); /* init queue specific tail reg. It is referred as * transmit comm scheduler queue doorbell. @@ -919,14 +920,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, /* Add unique software queue handle of the Tx queue per * TC into the VSI Tx ring */ - if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { - ring->q_handle = ice_eswitch_calc_txq_handle(ring); - - if (ring->q_handle == ICE_INVAL_Q_INDEX) - return -ENODEV; - } else { - ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); - } + ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); if (ch) status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h index 57abd52386d0..10d9d74f3545 100644 --- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h +++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h @@ -23,7 +23,18 @@ union nac_cgu_dword9 { u32 clk_synce0_amp : 2; u32 one_pps_out_amp : 2; u32 misc24 : 12; - } field; + }; + u32 val; +}; + +#define NAC_CGU_DWORD16_E825C 0x40 +union nac_cgu_dword16_e825c { + struct { + u32 synce_remndr : 6; + u32 synce_phlmt_en : 1; + u32 misc13 : 17; + u32 tspll_ck_refclkfreq : 8; + }; u32 val; }; @@ -39,7 +50,7 @@ union nac_cgu_dword19 { u32 japll_ndivratio : 4; u32 japll_iref_ndivratio : 3; u32 misc27 : 1; - } field; + }; u32 val; }; @@ -63,7 +74,23 @@ union nac_cgu_dword22 { u32 fdpllclk_sel_div2 : 1; u32 time1588clk_sel_div2 : 1; u32 misc3 : 1; - } field; + }; + u32 val; +}; + +#define NAC_CGU_DWORD23_E825C 0x5C +union nac_cgu_dword23_e825c { + struct { + u32 cgupll_fbdiv_intgr : 10; + u32 ux56pll_fbdiv_intgr : 10; + u32 misc20 : 4; + u32 ts_pll_enable : 1; + u32 time_sync_tspll_align_sel : 1; + u32 ext_synce_sel : 1; + u32 ref1588_ck_div : 4; + u32 time_ref_sel : 1; + + }; u32 val; }; @@ -77,7 +104,7 @@ union nac_cgu_dword24 { u32 ext_synce_sel : 1; u32 ref1588_ck_div : 4; u32 time_ref_sel : 1; - } field; + }; u32 val; }; @@ -92,7 +119,7 @@ union tspll_cntr_bist_settings { u32 i_plllock_cnt_6_0 : 7; u32 i_plllock_cnt_10_7 : 4; u32 reserved200 : 4; - } field; + }; u32 val; }; @@ -109,7 +136,45 @@ union tspll_ro_bwm_lf { u32 afcdone_cri : 1; u32 feedfwrdgain_cal_cri_7_0 : 8; u32 m2fbdivmod_cri_7_0 : 8; - } field; + }; + u32 val; +}; + +#define TSPLL_RO_LOCK_E825C 0x3f0 +union tspll_ro_lock_e825c { + struct { + u32 bw_freqov_high_cri_7_0 : 8; + u32 bw_freqov_high_cri_9_8 : 2; + u32 reserved455 : 1; + u32 plllock_gain_tran_cri : 1; + u32 plllock_true_lock_cri : 1; + u32 pllunlock_flag_cri : 1; + u32 afcerr_cri : 1; + u32 afcdone_cri : 1; + u32 feedfwrdgain_cal_cri_7_0 : 8; + u32 reserved462 : 8; + }; + u32 val; +}; + +#define TSPLL_BW_TDC_E825C 0x31c +union tspll_bw_tdc_e825c { + struct { + u32 i_tdc_offset_lock_1_0 : 2; + u32 i_bbthresh1_2_0 : 3; + u32 i_bbthresh2_2_0 : 3; + u32 i_tdcsel_1_0 : 2; + u32 i_tdcovccorr_en_h : 1; + u32 i_divretimeren : 1; + u32 i_bw_ampmeas_window : 1; + u32 i_bw_lowerbound_2_0 : 3; + u32 i_bw_upperbound_2_0 : 3; + u32 i_bw_mode_1_0 : 2; + u32 i_ft_mode_sel_2_0 : 3; + u32 i_bwphase_4_0 : 5; + u32 i_plllock_sel_1_0 : 2; + u32 i_afc_divratio : 1; + }; u32 val; }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index d9f6cc71d900..4fedf0181c4e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -6,6 +6,7 @@ #include "ice_adminq_cmd.h" #include "ice_flow.h" #include "ice_ptp_hw.h" +#include <linux/packing.h> #define ICE_PF_RESET_WAIT_COUNT 300 #define ICE_MAX_NETLIST_SIZE 10 @@ -160,10 +161,16 @@ static int ice_set_mac_type(struct ice_hw *hw) case ICE_DEV_ID_E825C_SGMII: hw->mac_type = ICE_MAC_GENERIC_3K_E825; break; - case ICE_DEV_ID_E830_BACKPLANE: - case ICE_DEV_ID_E830_QSFP56: - case ICE_DEV_ID_E830_SFP: - case ICE_DEV_ID_E830_SFP_DD: + case ICE_DEV_ID_E830CC_BACKPLANE: + case ICE_DEV_ID_E830CC_QSFP56: + case ICE_DEV_ID_E830CC_SFP: + case ICE_DEV_ID_E830CC_SFP_DD: + case ICE_DEV_ID_E830C_BACKPLANE: + case ICE_DEV_ID_E830_XXV_BACKPLANE: + case ICE_DEV_ID_E830C_QSFP: + case ICE_DEV_ID_E830_XXV_QSFP: + case ICE_DEV_ID_E830C_SFP: + case ICE_DEV_ID_E830_XXV_SFP: hw->mac_type = ICE_MAC_E830; break; default: @@ -179,7 +186,7 @@ static int ice_set_mac_type(struct ice_hw *hw) * ice_is_generic_mac - check if device's mac_type is generic * @hw: pointer to the hardware structure * - * Return: true if mac_type is generic (with SBQ support), false if not + * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise. */ bool ice_is_generic_mac(struct ice_hw *hw) { @@ -188,93 +195,39 @@ bool ice_is_generic_mac(struct ice_hw *hw) } /** - * ice_is_e810 - * @hw: pointer to the hardware structure - * - * returns true if the device is E810 based, false if not. - */ -bool ice_is_e810(struct ice_hw *hw) -{ - return hw->mac_type == ICE_MAC_E810; -} - -/** - * ice_is_e810t - * @hw: pointer to the hardware structure + * ice_is_pf_c827 - check if pf contains c827 phy + * @hw: pointer to the hw struct * - * returns true if the device is E810T based, false if not. + * Return: true if the device has c827 phy. */ -bool ice_is_e810t(struct ice_hw *hw) +static bool ice_is_pf_c827(struct ice_hw *hw) { - switch (hw->device_id) { - case ICE_DEV_ID_E810C_SFP: - switch (hw->subsystem_device_id) { - case ICE_SUBDEV_ID_E810T: - case ICE_SUBDEV_ID_E810T2: - case ICE_SUBDEV_ID_E810T3: - case ICE_SUBDEV_ID_E810T4: - case ICE_SUBDEV_ID_E810T6: - case ICE_SUBDEV_ID_E810T7: - return true; - } - break; - case ICE_DEV_ID_E810C_QSFP: - switch (hw->subsystem_device_id) { - case ICE_SUBDEV_ID_E810T2: - case ICE_SUBDEV_ID_E810T3: - case ICE_SUBDEV_ID_E810T5: - return true; - } - break; - default: - break; - } + struct ice_aqc_get_link_topo cmd = {}; + u8 node_part_number; + u16 node_handle; + int status; - return false; -} + if (hw->mac_type != ICE_MAC_E810) + return false; -/** - * ice_is_e823 - * @hw: pointer to the hardware structure - * - * returns true if the device is E823-L or E823-C based, false if not. - */ -bool ice_is_e823(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E823L_BACKPLANE: - case ICE_DEV_ID_E823L_SFP: - case ICE_DEV_ID_E823L_10G_BASE_T: - case ICE_DEV_ID_E823L_1GBE: - case ICE_DEV_ID_E823L_QSFP: - case ICE_DEV_ID_E823C_BACKPLANE: - case ICE_DEV_ID_E823C_QSFP: - case ICE_DEV_ID_E823C_SFP: - case ICE_DEV_ID_E823C_10G_BASE_T: - case ICE_DEV_ID_E823C_SGMII: + if (hw->device_id != ICE_DEV_ID_E810C_QSFP) return true; - default: + + cmd.addr.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); + cmd.addr.topo_params.index = 0; + + status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, + &node_handle); + + if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) return false; - } -} -/** - * ice_is_e825c - Check if a device is E825C family device - * @hw: pointer to the hardware structure - * - * Return: true if the device is E825-C based, false if not. - */ -bool ice_is_e825c(struct ice_hw *hw) -{ - switch (hw->device_id) { - case ICE_DEV_ID_E825C_BACKPLANE: - case ICE_DEV_ID_E825C_QSFP: - case ICE_DEV_ID_E825C_SFP: - case ICE_DEV_ID_E825C_SGMII: + if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) return true; - default: - return false; - } + + return false; } /** @@ -512,7 +465,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, /** * ice_find_netlist_node * @hw: pointer to the hw struct - * @node_type_ctx: type of netlist node to look for + * @node_type: type of netlist node to look for + * @ctx: context of the search * @node_part_number: node part number to look for * @node_handle: output parameter if node found - optional * @@ -522,10 +476,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd, * valid if the function returns zero, and should be ignored on any non-zero * return value. * - * Returns: 0 if the node is found, -ENOENT if no handle was found, and - * a negative error code on failure to access the AQ. + * Return: + * * 0 if the node is found, + * * -ENOENT if no handle was found, + * * negative error code on failure to access the AQ. */ -static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, +static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx, u8 node_part_number, u16 *node_handle) { u8 idx; @@ -536,8 +492,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, int status; cmd.addr.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, - node_type_ctx); + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) | + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx); cmd.addr.topo_params.index = idx; status = ice_aq_get_netlist_node(hw, &cmd, @@ -904,6 +860,9 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; + /* Initialize recipe count with default recipes read from NVM */ + sw->recp_cnt = ICE_SW_LKUP_LAST; + status = ice_init_def_sw_recp(hw); if (status) { devm_kfree(ice_hw_to_dev(hw), hw->switch_info); @@ -931,14 +890,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) } recps = sw->recp_list; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { - struct ice_recp_grp_entry *rg_entry, *tmprg_entry; - recps[i].root_rid = i; - list_for_each_entry_safe(rg_entry, tmprg_entry, - &recps[i].rg_list, l_entry) { - list_del(&rg_entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), rg_entry); - } if (recps[i].adv_rule) { struct ice_adv_fltr_mgmt_list_entry *tmp_entry; @@ -963,7 +915,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), lst_itr); } } - devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); } ice_rm_all_sw_replay_rule_info(hw); devm_kfree(ice_hw_to_dev(hw), sw->recp_list); @@ -997,6 +948,33 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) } /** + * ice_wait_for_fw - wait for full FW readiness + * @hw: pointer to the hardware structure + * @timeout: milliseconds that can elapse before timing out + * + * Return: 0 on success, -ETIMEDOUT on timeout. + */ +static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) +{ + int fw_loading; + u32 elapsed = 0; + + while (elapsed <= timeout) { + fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; + + /* firmware was not yet loaded, we have to wait more */ + if (fw_loading) { + elapsed += 100; + msleep(100); + continue; + } + return 0; + } + + return -ETIMEDOUT; +} + +/** * ice_init_hw - main hardware initialization routine * @hw: pointer to the hardware structure */ @@ -1056,6 +1034,7 @@ int ice_init_hw(struct ice_hw *hw) goto err_unroll_cqinit; } + hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED; /* set the back pointer to HW */ hw->port_info->hw = hw; @@ -1142,8 +1121,23 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_fltr_mgmt_struct; mutex_init(&hw->tnl_lock); - return 0; + ice_init_chk_recipe_reuse_support(hw); + + /* Some cards require longer initialization times + * due to necessity of loading FW from an external source. + * This can take even half a minute. + */ + if (ice_is_pf_c827(hw)) { + status = ice_wait_for_fw(hw, 30000); + if (status) { + dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out"); + goto err_unroll_fltr_mgmt_struct; + } + } + + hw->lane_num = ice_get_phy_lane_number(hw); + return 0; err_unroll_fltr_mgmt_struct: ice_cleanup_fltr_mgmt_struct(hw); err_unroll_sched: @@ -1329,39 +1323,31 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req) } /** - * ice_copy_rxq_ctx_to_hw + * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers * @hw: pointer to the hardware structure - * @ice_rxq_ctx: pointer to the rxq context + * @rxq_ctx: pointer to the packed Rx queue context * @rxq_index: the index of the Rx queue - * - * Copies rxq context from dense structure to HW register space */ -static int -ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index) +static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, + const ice_rxq_ctx_buf_t *rxq_ctx, + u32 rxq_index) { - u8 i; - - if (!ice_rxq_ctx) - return -EINVAL; - - if (rxq_index > QRX_CTRL_MAX_INDEX) - return -EINVAL; - /* Copy each dword separately to HW */ - for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { - wr32(hw, QRX_CONTEXT(i, rxq_index), - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); + for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) { + u32 ctx = ((const u32 *)rxq_ctx)[i]; - ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, - *((u32 *)(ice_rxq_ctx + (i * sizeof(u32))))); - } + wr32(hw, QRX_CONTEXT(i, rxq_index), ctx); - return 0; + ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx); + } } +#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \ + PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field) + /* LAN Rx Queue Context */ -static const struct ice_ctx_ele ice_rlan_ctx_info[] = { - /* Field Width LSB */ +static const struct packed_field_u8 ice_rlan_ctx_fields[] = { + /* Field Width LSB */ ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0), ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13), ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32), @@ -1382,35 +1368,50 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196), ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198), ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201), - { 0 } }; /** - * ice_write_rxq_ctx + * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer + * @ctx: the Rx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Rx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx, + ice_rxq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + +/** + * ice_write_rxq_ctx - Write Rx Queue context to hardware * @hw: pointer to the hardware structure - * @rlan_ctx: pointer to the rxq context + * @rlan_ctx: pointer to the unpacked Rx queue context * @rxq_index: the index of the Rx queue * - * Converts rxq context from sparse to dense structure and then writes - * it to HW register space and enables the hardware to prefetch descriptors - * instead of only fetching them on demand + * Pack the sparse Rx Queue context into dense hardware format and write it + * into the HW register space. + * + * Return: 0 on success, or -EINVAL if the Rx queue index is invalid. */ int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, u32 rxq_index) { - u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; + ice_rxq_ctx_buf_t buf = {}; - if (!rlan_ctx) + if (rxq_index > QRX_CTRL_MAX_INDEX) return -EINVAL; - rlan_ctx->prefena = 1; + ice_pack_rxq_ctx(rlan_ctx, &buf); + ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index); - ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info); - return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index); + return 0; } /* LAN Tx Queue Context */ -const struct ice_ctx_ele ice_tlan_ctx_info[] = { +static const struct packed_field_u8 ice_tlan_ctx_fields[] = { /* Field Width LSB */ ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0), ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57), @@ -1439,10 +1440,22 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165), ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166), ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168), - ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171), - { 0 } }; +/** + * ice_pack_txq_ctx - Pack Tx queue context into a HW buffer + * @ctx: the Tx queue context to pack + * @buf: the HW buffer to pack into + * + * Pack the Tx queue context from the CPU-friendly unpacked buffer into its + * bit-packed HW layout. + */ +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf) +{ + pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields, + QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST); +} + /* Sideband Queue command wrappers */ /** @@ -1465,8 +1478,9 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, * ice_sbq_rw_reg - Fill Sideband Queue command * @hw: pointer to the HW struct * @in: message info to be filled in descriptor + * @flags: control queue descriptor flags */ -int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) { struct ice_sbq_cmd_desc desc = {0}; struct ice_sbq_msg_req msg = {0}; @@ -1490,7 +1504,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) */ msg_len -= sizeof(msg.data); - desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags = cpu_to_le16(flags); desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); desc.param0.cmd_len = cpu_to_le16(msg_len); status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); @@ -1615,6 +1629,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, case ice_aqc_opc_set_port_params: case ice_aqc_opc_get_vlan_mode_parameters: case ice_aqc_opc_set_vlan_mode_parameters: + case ice_aqc_opc_set_tx_topo: + case ice_aqc_opc_get_tx_topo: case ice_aqc_opc_add_recipe: case ice_aqc_opc_recipe_to_profile: case ice_aqc_opc_get_recipe: @@ -2143,7 +2159,8 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, caps->nvm_unified_update); break; case ICE_AQC_CAPS_RDMA: - caps->rdma = (number == 1); + if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA)) + caps->rdma = (number == 1); ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma); break; case ICE_AQC_CAPS_MAX_MTU: @@ -2171,6 +2188,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps, ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n", prefix, caps->sriov_lag); break; + case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE: + caps->tx_sched_topo_comp_mode_en = (number == 1); + break; default: /* Not one of the recognized common capabilities */ found = false; @@ -2277,8 +2297,13 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); - info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); + if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) { + info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); + info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); + } else { + info->clk_freq = ICE_TIME_REF_FREQ_156_250; + info->clk_src = ICE_CLK_SRC_TCXO; + } if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { info->time_ref = (enum ice_time_ref_freq)info->clk_freq; @@ -2398,6 +2423,25 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, } /** + * ice_func_id_to_logical_id - map from function id to logical pf id + * @active_function_bitmap: active function bitmap + * @pf_id: function number of device + * + * Return: logical PF ID. + */ +static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id) +{ + u8 logical_id = 0; + u8 i; + + for (i = 0; i < pf_id; i++) + if (active_function_bitmap & BIT(i)) + logical_id++; + + return logical_id; +} + +/** * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure @@ -2414,6 +2458,8 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, dev_p->num_funcs = hweight32(number); ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n", dev_p->num_funcs); + + hw->logical_pf_id = ice_func_id_to_logical_id(number, hw->pf_id); } /** @@ -2484,6 +2530,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0); info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0); + info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0); info->ena_ports = logical_id; info->tmr_own_map = phys_id; @@ -2506,6 +2553,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, info->ts_ll_read); ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n", info->ts_ll_int_read); + ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n", + info->ll_phy_tmr_update); ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n", info->ena_ports); ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n", @@ -2552,6 +2601,34 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** + * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. + */ +static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, + struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->nac_topo.mode = le32_to_cpu(cap->number); + dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; + + dev_info(ice_hw_to_dev(hw), + "PF is configured in %s mode with IP instance ID %d\n", + (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? + "primary" : "secondary", dev_p->nac_topo.id); + + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", + dev_p->nac_topo.id); +} + +/** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct * @dev_p: pointer to device capabilities structure @@ -2602,6 +2679,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, case ICE_AQC_CAPS_SENSOR_READING: ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_NAC_TOPOLOGY: + ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -2615,40 +2695,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, } /** - * ice_is_pf_c827 - check if pf contains c827 phy - * @hw: pointer to the hw struct - */ -bool ice_is_pf_c827(struct ice_hw *hw) -{ - struct ice_aqc_get_link_topo cmd = {}; - u8 node_part_number; - u16 node_handle; - int status; - - if (hw->mac_type != ICE_MAC_E810) - return false; - - if (hw->device_id != ICE_DEV_ID_E810C_QSFP) - return true; - - cmd.addr.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) | - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT); - cmd.addr.topo_params.index = 0; - - status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number, - &node_handle); - - if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827) - return false; - - if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE) - return true; - - return false; -} - -/** * ice_is_phy_rclk_in_netlist * @hw: pointer to the hw struct * @@ -2656,9 +2702,11 @@ bool ice_is_pf_c827(struct ice_hw *hw) */ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) { - if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, + ICE_AQC_LINK_TOPO_NODE_CTX_PORT, ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) && - ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY, + ICE_AQC_LINK_TOPO_NODE_CTX_PORT, ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL)) return false; @@ -2674,6 +2722,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw) bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) { if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX, + ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX, NULL)) return false; @@ -2694,12 +2743,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw) bool ice_is_cgu_in_netlist(struct ice_hw *hw) { if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032, NULL)) { hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032; return true; } else if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL, + ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384, NULL)) { hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384; @@ -2718,6 +2769,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw) bool ice_is_gps_in_netlist(struct ice_hw *hw) { if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS, + ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL, ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL)) return false; @@ -2997,6 +3049,9 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->cmd_flags = cpu_to_le16(cmd_flags); + cmd->local_fwd_mode = pi->local_fwd_mode | + ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID; + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -3030,11 +3085,13 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw) * Note: In the structure of [phy_type_low, phy_type_high], there should * be one bit set, as this function will convert one PHY type to its * speed. - * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned - * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned + * + * Return: + * * PHY speed for recognized PHY type + * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned + * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned */ -static u16 -ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) +u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) { u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; @@ -3135,6 +3192,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) case ICE_PHY_TYPE_HIGH_100G_AUI2: speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB; break; + case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4: + case ICE_PHY_TYPE_HIGH_200G_SR4: + case ICE_PHY_TYPE_HIGH_200G_FR4: + case ICE_PHY_TYPE_HIGH_200G_LR4: + case ICE_PHY_TYPE_HIGH_200G_DR4: + case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4: + case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC: + case ICE_PHY_TYPE_HIGH_200G_AUI4: + speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB; + break; default: speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; break; @@ -3286,6 +3353,100 @@ int ice_update_link_info(struct ice_port_info *pi) } /** + * ice_aq_get_phy_equalization - function to read serdes equaliser + * value from firmware using admin queue command. + * @hw: pointer to the HW struct + * @data_in: represents the serdes equalization parameter requested + * @op_code: represents the serdes number and flag to represent tx or rx + * @serdes_num: represents the serdes number + * @output: pointer to the caller-supplied buffer to return serdes equaliser + * + * Return: non-zero status on error and 0 on success. + */ +int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, + u8 serdes_num, int *output) +{ + struct ice_aqc_dnl_call_command *cmd; + struct ice_aqc_dnl_call buf = {}; + struct ice_aq_desc desc; + int err; + + buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); + buf.sto.txrx_equa_reqs.op_code_serdes_sel = + cpu_to_le16(op_code | (serdes_num & 0xF)); + cmd = &desc.params.dnl_call; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | + ICE_AQ_FLAG_RD | + ICE_AQ_FLAG_SI); + desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); + cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); + + err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), + NULL); + *output = err ? 0 : buf.sto.txrx_equa_resp.val; + + return err; +} + +#define FEC_REG_PORT(port) { \ + FEC_CORR_LOW_REG_PORT##port, \ + FEC_CORR_HIGH_REG_PORT##port, \ + FEC_UNCORR_LOW_REG_PORT##port, \ + FEC_UNCORR_HIGH_REG_PORT##port, \ +} + +static const u32 fec_reg[][ICE_FEC_MAX] = { + FEC_REG_PORT(0), + FEC_REG_PORT(1), + FEC_REG_PORT(2), + FEC_REG_PORT(3) +}; + +/** + * ice_aq_get_fec_stats - reads fec stats from phy + * @hw: pointer to the HW struct + * @pcs_quad: represents pcsquad of user input serdes + * @pcs_port: represents the pcs port number part of above pcs quad + * @fec_type: represents FEC stats type + * @output: pointer to the caller-supplied buffer to return requested fec stats + * + * Return: non-zero status on error and 0 on success. + */ +int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + enum ice_fec_stats_types fec_type, u32 *output) +{ + u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); + struct ice_sbq_msg_input msg = {}; + u32 receiver_id, reg_offset; + int err; + + if (pcs_port > 3) + return -EINVAL; + + reg_offset = fec_reg[pcs_port][fec_type]; + + if (pcs_quad == 0) + receiver_id = FEC_RECEIVER_ID_PCS0; + else if (pcs_quad == 1) + receiver_id = FEC_RECEIVER_ID_PCS1; + else + return -EINVAL; + + msg.msg_addr_low = lower_16_bits(reg_offset); + msg.msg_addr_high = receiver_id; + msg.opcode = ice_sbq_msg_rd; + msg.dest_dev = ice_sbq_dev_phy_0; + + err = ice_sbq_rw_reg(hw, &msg, flag); + if (err) + return err; + + *output = msg.data; + return 0; +} + +/** * ice_cache_phy_user_req * @pi: port information structure * @cache_data: PHY logging data @@ -3887,6 +4048,59 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, } /** + * ice_get_phy_lane_number - Get PHY lane number for current adapter + * @hw: pointer to the hw struct + * + * Return: PHY lane number on success, negative error code otherwise. + */ +int ice_get_phy_lane_number(struct ice_hw *hw) +{ + struct ice_aqc_get_port_options_elem *options; + unsigned int lport = 0; + unsigned int lane; + int err; + + options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL); + if (!options) + return -ENOMEM; + + for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) { + u8 options_count = ICE_AQC_PORT_OPT_MAX; + u8 speed, active_idx, pending_idx; + bool active_valid, pending_valid; + + err = ice_aq_get_port_options(hw, options, &options_count, lane, + true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (err) + goto err; + + if (!active_valid) + continue; + + speed = options[active_idx].max_lane_speed; + /* If we don't get speed for this lane, it's unoccupied */ + if (speed > ICE_AQC_PORT_OPT_MAX_LANE_200G) + continue; + + if (hw->pf_id == lport) { + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && + ice_is_dual(hw) && !ice_is_primary(hw)) + lane += ICE_PORTS_PER_QUAD; + kfree(options); + return lane; + } + lport++; + } + + /* PHY lane not found */ + err = -ENXIO; +err: + kfree(options); + return err; +} + +/** * ice_aq_sff_eeprom * @hw: pointer to the HW struct * @lport: bits [7:0] = logical port, bit [8] = logical port valid @@ -4349,205 +4563,6 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, /* End of FW Admin Queue command wrappers */ /** - * ice_pack_ctx_byte - write a byte to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u8 src_byte, dest_byte, mask; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - src_byte = *from; - src_byte <<= shift_width; - src_byte &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_byte, dest, sizeof(dest_byte)); - - dest_byte &= ~mask; /* get the bits not changing */ - dest_byte |= src_byte; /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_byte, sizeof(dest_byte)); -} - -/** - * ice_pack_ctx_word - write a word to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u16 src_word, mask; - __le16 dest_word; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_word = *(u16 *)from; - src_word <<= shift_width; - src_word &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_word, dest, sizeof(dest_word)); - - dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */ - dest_word |= cpu_to_le16(src_word); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_word, sizeof(dest_word)); -} - -/** - * ice_pack_ctx_dword - write a dword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u32 src_dword, mask; - __le32 dest_dword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_dword = *(u32 *)from; - src_dword <<= shift_width; - src_dword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_dword, dest, sizeof(dest_dword)); - - dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */ - dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_dword, sizeof(dest_dword)); -} - -/** - * ice_pack_ctx_qword - write a qword to a packed context structure - * @src_ctx: unpacked source context structure - * @dest_ctx: packed destination context data - * @ce_info: context element description - */ -static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - u64 src_qword, mask; - __le64 dest_qword; - u8 *from, *dest; - u16 shift_width; - - /* copy from the next struct field */ - from = src_ctx + ce_info->offset; - - /* prepare the bits and mask */ - shift_width = ce_info->lsb % 8; - mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); - - /* don't swizzle the bits until after the mask because the mask bits - * will be in a different bit position on big endian machines - */ - src_qword = *(u64 *)from; - src_qword <<= shift_width; - src_qword &= mask; - - /* get the current bits from the target bit string */ - dest = dest_ctx + (ce_info->lsb / 8); - - memcpy(&dest_qword, dest, sizeof(dest_qword)); - - dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */ - dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */ - - /* put it all back */ - memcpy(dest, &dest_qword, sizeof(dest_qword)); -} - -/** - * ice_set_ctx - set context bits in packed structure - * @hw: pointer to the hardware structure - * @src_ctx: pointer to a generic non-packed context structure - * @dest_ctx: pointer to memory for the packed structure - * @ce_info: List of Rx context elements - */ -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) -{ - int f; - - for (f = 0; ce_info[f].width; f++) { - /* We have to deal with each element of the FW response - * using the correct size so that we are correct regardless - * of the endianness of the machine. - */ - if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) { - ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n", - f, ce_info[f].width, ce_info[f].size_of); - continue; - } - switch (ce_info[f].size_of) { - case sizeof(u8): - ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u16): - ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u32): - ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); - break; - case sizeof(u64): - ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); - break; - default: - return -EINVAL; - } - } - - return 0; -} - -/** * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC * @hw: pointer to the HW struct * @vsi_handle: software VSI handle @@ -5641,6 +5656,96 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, } /** + * ice_get_pca9575_handle - find and return the PCA9575 controller + * @hw: pointer to the hw struct + * @pca9575_handle: GPIO controller's handle + * + * Find and return the GPIO controller's handle in the netlist. + * When found - the value will be cached in the hw structure and following calls + * will return cached value. + * + * Return: 0 on success, -ENXIO when there's no PCA9575 present. + */ +int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) +{ + struct ice_aqc_get_link_topo *cmd; + struct ice_aq_desc desc; + int err; + u8 idx; + + /* If handle was read previously return cached value */ + if (hw->io_expander_handle) { + *pca9575_handle = hw->io_expander_handle; + return 0; + } + +#define SW_PCA9575_SFP_TOPO_IDX 2 +#define SW_PCA9575_QSFP_TOPO_IDX 1 + + /* Check if the SW IO expander controlling SMA exists in the netlist. */ + if (hw->device_id == ICE_DEV_ID_E810C_SFP) + idx = SW_PCA9575_SFP_TOPO_IDX; + else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) + idx = SW_PCA9575_QSFP_TOPO_IDX; + else + return -ENXIO; + + /* If handle was not detected read it from the netlist */ + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); + cmd = &desc.params.get_link_topo; + cmd->addr.topo_params.node_type_ctx = + ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL; + cmd->addr.topo_params.index = idx; + + err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); + if (err) + return -ENXIO; + + /* Verify if we found the right IO expander type */ + if (desc.params.get_link_topo.node_part_num != + ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) + return -ENXIO; + + /* If present save the handle and return it */ + hw->io_expander_handle = + le16_to_cpu(desc.params.get_link_topo.addr.handle); + *pca9575_handle = hw->io_expander_handle; + + return 0; +} + +/** + * ice_read_pca9575_reg - read the register from the PCA9575 controller + * @hw: pointer to the hw struct + * @offset: GPIO controller register offset + * @data: pointer to data to be read from the GPIO controller + * + * Return: 0 on success, negative error code otherwise. + */ +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data) +{ + struct ice_aqc_link_topo_addr link_topo; + __le16 addr; + u16 handle; + int err; + + memset(&link_topo, 0, sizeof(link_topo)); + + err = ice_get_pca9575_handle(hw, &handle); + if (err) + return err; + + link_topo.handle = cpu_to_le16(handle); + link_topo.topo_params.node_type_ctx = + FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, + ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); + + addr = cpu_to_le16((u16)offset); + + return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); +} + +/** * ice_aq_set_gpio * @hw: pointer to the hw struct * @gpio_ctrl_handle: GPIO controller node handle @@ -5823,6 +5928,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps) } /** + * ice_is_fw_health_report_supported - checks if firmware supports health events + * @hw: pointer to the hardware structure + * + * Return: true if firmware supports health status reports, + * false otherwise + */ +bool ice_is_fw_health_report_supported(struct ice_hw *hw) +{ + return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ, + ICE_FW_API_HEALTH_REPORT_MIN, + ICE_FW_API_HEALTH_REPORT_PATCH); +} + +/** + * ice_aq_set_health_status_cfg - Configure FW health events + * @hw: pointer to the HW struct + * @event_source: type of diagnostic events to enable + * + * Configure the health status event types that the firmware will send to this + * PF. The supported event types are: PF-specific, all PFs, and global. + * + * Return: 0 on success, negative error code otherwise. + */ +int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source) +{ + struct ice_aqc_set_health_status_cfg *cmd; + struct ice_aq_desc desc; + + cmd = &desc.params.set_health_status_cfg; + + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg); + + cmd->event_source = event_source; + + return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); +} + +/** * ice_aq_set_lldp_mib - Set the LLDP MIB * @hw: pointer to the HW struct * @mib_type: Local, Remote or both Local and Remote MIBs @@ -5872,15 +6015,21 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw) /** * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter * @hw: pointer to HW struct - * @vsi_num: absolute HW index for VSI + * @vsi: VSI to add the filter to * @add: boolean for if adding or removing a filter + * + * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed + * with this HW or VSI, otherwise an error corresponding to + * the AQ transaction result. */ -int -ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) +int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add) { struct ice_aqc_lldp_filter_ctrl *cmd; struct ice_aq_desc desc; + if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw)) + return -EOPNOTSUPP; + cmd = &desc.params.lldp_filter_ctrl; ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl); @@ -5890,7 +6039,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) else cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE; - cmd->vsi_num = cpu_to_le16(vsi_num); + cmd->vsi_num = cpu_to_le16(vsi->vsi_num); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); } diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index ffb22c7ce28b..64c530b39191 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -10,6 +10,7 @@ #include "ice_type.h" #include "ice_nvm.h" #include "ice_flex_pipe.h" +#include "ice_parser.h" #include <linux/avf/virtchnl.h> #include "ice_switch.h" #include "ice_fdir.h" @@ -17,13 +18,34 @@ #define ICE_SQ_SEND_DELAY_TIME_MS 10 #define ICE_SQ_SEND_MAX_EXECUTE 3 +#define FEC_REG_SHIFT 2 +#define FEC_RECV_ID_SHIFT 4 +#define FEC_CORR_LOW_REG_PORT0 (0x02 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT0 (0x03 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT0 (0x04 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT0 (0x05 << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT1 (0x42 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT1 (0x43 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT1 (0x44 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT1 (0x45 << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT2 (0x4A << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT2 (0x4B << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT2 (0x4C << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT2 (0x4D << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT3 (0x52 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT3 (0x53 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT3 (0x54 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT3 (0x55 << FEC_REG_SHIFT) +#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT) +#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT) + int ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); int ice_check_reset(struct ice_hw *hw); int ice_reset(struct ice_hw *hw, enum ice_reset_req req); int ice_create_all_ctrlq(struct ice_hw *hw); int ice_init_all_ctrlq(struct ice_hw *hw); -void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading); void ice_destroy_all_ctrlq(struct ice_hw *hw); int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -70,9 +92,8 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle, bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); -extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info); + +void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf); extern struct mutex ice_global_cfg_lock_sw; @@ -91,7 +112,6 @@ int ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, struct ice_aqc_get_phy_caps_data *caps, struct ice_sq_cd *cd); -bool ice_is_pf_c827(struct ice_hw *hw); bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw); bool ice_is_clock_mux_in_netlist(struct ice_hw *hw); bool ice_is_cgu_in_netlist(struct ice_hw *hw); @@ -111,7 +131,6 @@ int ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags, struct ice_sq_cd *cd); bool ice_is_generic_mac(struct ice_hw *hw); -bool ice_is_e810(struct ice_hw *hw); int ice_clear_pf_cfg(struct ice_hw *hw); int ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi, @@ -121,6 +140,13 @@ int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); +bool ice_is_fw_health_report_supported(struct ice_hw *hw); +int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source); +int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, + u8 serdes_num, int *output); +int +ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + enum ice_fec_stats_types fec_type, u32 *output); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); @@ -166,6 +192,7 @@ ice_aq_get_port_options(struct ice_hw *hw, int ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid, u8 new_option); +int ice_get_phy_lane_number(struct ice_hw *hw); int ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr, u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length, @@ -201,7 +228,7 @@ int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); -int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag); int ice_aq_get_cgu_abilities(struct ice_hw *hw, struct ice_aqc_get_cgu_abilities *abilities); @@ -248,9 +275,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); -bool ice_is_e810t(struct ice_hw *hw); -bool ice_is_e823(struct ice_hw *hw); -bool ice_is_e825c(struct ice_hw *hw); int ice_sched_query_elem(struct ice_hw *hw, u32 node_teid, struct ice_aqc_txsched_elem_data *buf); @@ -261,12 +285,12 @@ int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); bool ice_is_100m_speed_supported(struct ice_hw *hw); +u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high); int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); -int -ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); +int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add); int ice_lldp_execute_pending_mib(struct ice_hw *hw); int ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, @@ -276,5 +300,7 @@ int ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr, u16 bus_addr, __le16 addr, u8 params, const u8 *data, struct ice_sq_cd *cd); +int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle); +int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data); bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw); #endif /* _ICE_COMMON_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index ffe660f34992..e3959ad442a2 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -99,17 +99,6 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) return -ENOMEM; cq->sq.desc_buf.size = size; - cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, - sizeof(struct ice_sq_cd), GFP_KERNEL); - if (!cq->sq.cmd_buf) { - dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, - cq->sq.desc_buf.va, cq->sq.desc_buf.pa); - cq->sq.desc_buf.va = NULL; - cq->sq.desc_buf.pa = 0; - cq->sq.desc_buf.size = 0; - return -ENOMEM; - } - return 0; } @@ -188,7 +177,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) if (cq->rq_buf_size > ICE_AQ_LG_BUF) desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB); desc->opcode = 0; - /* This is in accordance with Admin queue design, there is no + /* This is in accordance with control queue design, there is no * register for buffer size configuration */ desc->datalen = cpu_to_le16(bi->size); @@ -338,8 +327,6 @@ do { \ (qi)->ring.r.ring##_bi[i].size = 0;\ } \ } \ - /* free the buffer info list */ \ - devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ /* free DMA head */ \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ } while (0) @@ -405,11 +392,11 @@ init_ctrlq_exit: } /** - * ice_init_rq - initialize ARQ + * ice_init_rq - initialize receive side of a control queue * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * - * The main initialization routine for the Admin Receive (Event) Queue. + * The main initialization routine for Receive side of a control queue. * Prior to calling this function, the driver *MUST* set the following fields * in the cq->structure: * - cq->num_rq_entries @@ -465,7 +452,7 @@ init_ctrlq_exit: } /** - * ice_shutdown_sq - shutdown the Control ATQ + * ice_shutdown_sq - shutdown the transmit side of a control queue * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * @@ -482,7 +469,7 @@ static int ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto shutdown_sq_out; } - /* Stop firmware AdminQ processing */ + /* Stop processing of the control queue */ wr32(hw, cq->sq.head, 0); wr32(hw, cq->sq.tail, 0); wr32(hw, cq->sq.len, 0); @@ -501,7 +488,7 @@ shutdown_sq_out: } /** - * ice_aq_ver_check - Check the reported AQ API version. + * ice_aq_ver_check - Check the reported AQ API version * @hw: pointer to the hardware structure * * Checks if the driver should load on a given AQ API version. @@ -510,22 +497,31 @@ shutdown_sq_out: */ static bool ice_aq_ver_check(struct ice_hw *hw) { - if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw); + u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw); + + if (hw->api_maj_ver > exp_fw_api_ver_major) { /* Major API version is newer than expected, don't load */ dev_warn(ice_hw_to_dev(hw), "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; - } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { - if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + } else if (hw->api_maj_ver == exp_fw_api_ver_major) { + if (hw->api_min_ver > (exp_fw_api_ver_minor + 2)) dev_info(ice_hw_to_dev(hw), - "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); - else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n", + hw->api_maj_ver, hw->api_min_ver, + exp_fw_api_ver_major, exp_fw_api_ver_minor); + else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor) dev_info(ice_hw_to_dev(hw), - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + exp_fw_api_ver_major, exp_fw_api_ver_minor); } else { /* Major API version is older than expected, log a warning */ dev_info(ice_hw_to_dev(hw), - "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); + "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n", + hw->api_maj_ver, hw->api_min_ver, + exp_fw_api_ver_major, exp_fw_api_ver_minor); } return true; } @@ -684,10 +680,12 @@ struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. */ -static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, + bool unloading) { struct ice_ctl_q_info *cq; @@ -695,7 +693,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) case ICE_CTL_Q_ADMIN: cq = &hw->adminq; if (ice_check_sq_alive(hw, cq)) - ice_aq_q_shutdown(hw, true); + ice_aq_q_shutdown(hw, unloading); break; case ICE_CTL_Q_SB: cq = &hw->sbq; @@ -714,20 +712,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. The driver * may call this at runtime to shutdown and later restart control queues, such * as in response to a reset event. */ -void ice_shutdown_all_ctrlq(struct ice_hw *hw) +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) { /* Shutdown FW admin queue */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); /* Shutdown PHY Sideband */ if (ice_is_sbq_supported(hw)) - ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading); /* Shutdown PF-VF Mailbox */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); } /** @@ -759,7 +758,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); @@ -840,7 +839,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) void ice_destroy_all_ctrlq(struct ice_hw *hw) { /* shut down all the control queues first */ - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); ice_destroy_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) @@ -849,7 +848,7 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw) } /** - * ice_clean_sq - cleans Admin send queue (ATQ) + * ice_clean_sq - cleans send side of a control queue * @hw: pointer to the hardware structure * @cq: pointer to the specific Control queue * @@ -859,21 +858,17 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) { struct ice_ctl_q_ring *sq = &cq->sq; u16 ntc = sq->next_to_clean; - struct ice_sq_cd *details; struct ice_aq_desc *desc; desc = ICE_CTL_Q_DESC(*sq, ntc); - details = ICE_CTL_Q_DETAILS(*sq, ntc); while (rd32(hw, cq->sq.head) != ntc) { ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); memset(desc, 0, sizeof(*desc)); - memset(details, 0, sizeof(*details)); ntc++; if (ntc == sq->count) ntc = 0; desc = ICE_CTL_Q_DESC(*sq, ntc); - details = ICE_CTL_Q_DETAILS(*sq, ntc); } sq->next_to_clean = ntc; @@ -882,18 +877,43 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) } /** + * ice_ctl_q_str - Convert control queue type to string + * @qtype: the control queue type + * + * Return: A string name for the given control queue type. + */ +static const char *ice_ctl_q_str(enum ice_ctl_q qtype) +{ + switch (qtype) { + case ICE_CTL_Q_UNKNOWN: + return "Unknown CQ"; + case ICE_CTL_Q_ADMIN: + return "AQ"; + case ICE_CTL_Q_MAILBOX: + return "MBXQ"; + case ICE_CTL_Q_SB: + return "SBQ"; + default: + return "Unrecognized CQ"; + } +} + +/** * ice_debug_cq * @hw: pointer to the hardware structure + * @cq: pointer to the specific Control queue * @desc: pointer to control queue descriptor * @buf: pointer to command buffer * @buf_len: max length of buf + * @response: true if this is the writeback response * * Dumps debug log about control command with descriptor contents. */ -static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) +static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq, + void *desc, void *buf, u16 buf_len, bool response) { struct ice_aq_desc *cq_desc = desc; - u16 len; + u16 datalen, flags; if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) && !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask)) @@ -902,48 +922,63 @@ static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len) if (!desc) return; - len = le16_to_cpu(cq_desc->datalen); + datalen = le16_to_cpu(cq_desc->datalen); + flags = le16_to_cpu(cq_desc->flags); - ice_debug(hw, ICE_DBG_AQ_DESC, "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n", - le16_to_cpu(cq_desc->opcode), - le16_to_cpu(cq_desc->flags), - le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval)); - ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n", + ice_debug(hw, ICE_DBG_AQ_DESC, "%s %s: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n\tcookie (h,l) 0x%08X 0x%08X\n\tparam (0,1) 0x%08X 0x%08X\n\taddr (h,l) 0x%08X 0x%08X\n", + ice_ctl_q_str(cq->qtype), response ? "Response" : "Command", + le16_to_cpu(cq_desc->opcode), flags, datalen, + le16_to_cpu(cq_desc->retval), le32_to_cpu(cq_desc->cookie_high), - le32_to_cpu(cq_desc->cookie_low)); - ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->cookie_low), le32_to_cpu(cq_desc->params.generic.param0), - le32_to_cpu(cq_desc->params.generic.param1)); - ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n", + le32_to_cpu(cq_desc->params.generic.param1), le32_to_cpu(cq_desc->params.generic.addr_high), le32_to_cpu(cq_desc->params.generic.addr_low)); - if (buf && cq_desc->datalen != 0) { - ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n"); - if (buf_len < len) - len = buf_len; - - ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, buf, len); + /* Dump buffer iff 1) one exists and 2) is either a response indicated + * by the DD and/or CMP flag set or a command with the RD flag set. + */ + if (buf && cq_desc->datalen && + (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) { + char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 "; + + sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ", + le32_to_cpu(cq_desc->params.generic.addr_high), + le32_to_cpu(cq_desc->params.generic.addr_low)); + ice_debug_array_w_prefix(hw, ICE_DBG_AQ_DESC_BUF, prefix, + buf, + min_t(u16, buf_len, datalen)); } } /** - * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ) + * ice_sq_done - poll until the last send on a control queue has completed * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * - * Returns true if the firmware has processed all descriptors on the - * admin send queue. Returns false if there are still requests pending. + * Use read_poll_timeout to poll the control queue head, checking until it + * matches next_to_use. According to the control queue designers, this has + * better timing reliability than the DD bit. + * + * Return: true if all the descriptors on the send side of a control queue + * are finished processing, false otherwise. */ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - /* AQ designers suggest use of head for better - * timing reliability than DD bit + u32 head; + + /* Wait a short time before the initial check, to allow hardware time + * for completion. */ - return rd32(hw, cq->sq.head) == cq->sq.next_to_use; + udelay(5); + + return !rd32_poll_timeout(hw, cq->sq.head, + head, head == cq->sq.next_to_use, + 20, ICE_CTL_Q_SQ_CMD_TIMEOUT); } /** - * ice_sq_send_cmd - send command to Control Queue (ATQ) + * ice_sq_send_cmd - send command to a control queue * @hw: pointer to the HW struct * @cq: pointer to the specific Control queue * @desc: prefilled descriptor describing the command @@ -951,8 +986,9 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * - * This is the main send command routine for the ATQ. It runs the queue, - * cleans the queue, etc. + * Main command for the transmit side of a control queue. It puts the command + * on the queue, bumps the tail, waits for processing of the command, captures + * command status and results, etc. */ int ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -962,8 +998,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_dma_mem *dma_buf = NULL; struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; - struct ice_sq_cd *details; - unsigned long timeout; int status = 0; u16 retval = 0; u32 val = 0; @@ -1007,12 +1041,6 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, goto sq_send_command_error; } - details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); - if (cd) - *details = *cd; - else - memset(details, 0, sizeof(*details)); - /* Call clean and check queue available function to reclaim the * descriptors that were processed by FW/MBX; the function returns the * number of desc available. The clean function called here could be @@ -1049,7 +1077,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, /* Debug desc and buffer */ ice_debug(hw, ICE_DBG_AQ_DESC, "ATQ: Control Send queue desc and buffer:\n"); - ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size); + ice_debug_cq(hw, cq, (void *)desc_on_ring, buf, buf_size, false); (cq->sq.next_to_use)++; if (cq->sq.next_to_use == cq->sq.count) @@ -1057,20 +1085,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, wr32(hw, cq->sq.tail, cq->sq.next_to_use); ice_flush(hw); - /* Wait a short time before initial ice_sq_done() check, to allow - * hardware time for completion. + /* Wait for the command to complete. If it finishes within the + * timeout, copy the descriptor back to temp. */ - udelay(5); - - timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT; - do { - if (ice_sq_done(hw, cq)) - break; - - usleep_range(100, 150); - } while (time_before(jiffies, timeout)); - - /* if ready, copy the desc back to temp */ if (ice_sq_done(hw, cq)) { memcpy(desc, desc_on_ring, sizeof(*desc)); if (buf) { @@ -1102,12 +1119,11 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n"); - ice_debug_cq(hw, (void *)desc, buf, buf_size); + ice_debug_cq(hw, cq, (void *)desc, buf, buf_size, true); /* save writeback AQ if requested */ - if (details->wb_desc) - memcpy(details->wb_desc, desc_on_ring, - sizeof(*details->wb_desc)); + if (cd && cd->wb_desc) + memcpy(cd->wb_desc, desc_on_ring, sizeof(*cd->wb_desc)); /* update the error if time out occurred */ if (!cmd_completed) { @@ -1148,9 +1164,9 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * @e: event info from the receive descriptor, includes any buffers * @pending: number of events that could be left to process * - * This function cleans one Admin Receive Queue element and returns - * the contents through e. It can also return how many events are - * left to process through 'pending'. + * Clean one element from the receive side of a control queue. On return 'e' + * contains contents of the message, and 'pending' contains the number of + * events left to process. */ int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -1206,7 +1222,7 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n"); - ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size); + ice_debug_cq(hw, cq, (void *)desc, e->msg_buf, cq->rq_buf_size, true); /* Restore the original datalen and buffer address in the desc, * FW updates datalen to indicate the event message size diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 8f2fd1613a95..ca97b7365a1b 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -21,9 +21,18 @@ /* Defines that help manage the driver vs FW API checks. * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. */ -#define EXP_FW_API_VER_BRANCH 0x00 -#define EXP_FW_API_VER_MAJOR 0x01 -#define EXP_FW_API_VER_MINOR 0x05 +#define EXP_FW_API_VER_MAJOR_E810 0x01 +#define EXP_FW_API_VER_MINOR_E810 0x05 + +#define EXP_FW_API_VER_MAJOR_E830 0x01 +#define EXP_FW_API_VER_MINOR_E830 0x07 + +#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ + EXP_FW_API_VER_MAJOR_E830 : \ + EXP_FW_API_VER_MAJOR_E810) +#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ + EXP_FW_API_VER_MINOR_E830 : \ + EXP_FW_API_VER_MINOR_E810) /* Different control queue types: These are mainly for SW consumption. */ enum ice_ctl_q { @@ -34,14 +43,13 @@ enum ice_ctl_q { }; /* Control Queue timeout settings - max delay 1s */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT USEC_PER_SEC #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ struct ice_ctl_q_ring { void *dma_head; /* Virtual address to DMA head */ struct ice_dma_mem desc_buf; /* descriptor ring memory */ - void *cmd_buf; /* command buffer memory */ union { struct ice_dma_mem *sq_bi; @@ -71,8 +79,6 @@ struct ice_sq_cd { struct ice_aq_desc *wb_desc; }; -#define ICE_CTL_Q_DETAILS(R, i) (&(((struct ice_sq_cd *)((R).cmd_buf))[i])) - /* rq event information */ struct ice_rq_event_info { struct ice_aq_desc desc; diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c index 74418c445cc4..64737fc62306 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb.c @@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) tlv->ouisubtype = htonl(ouisubtype); /* bytes 0 - 63 - IPv4 DSCP2UP LUT */ - for (i = 0; i < ICE_DSCP_NUM_VAL; i++) { + for (i = 0; i < DSCP_MAX; i++) { /* IPv4 mapping */ buf[i] = dcbcfg->dscp_map[i]; /* IPv6 mapping */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c index 6e20ee610022..533eb8930aa8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c @@ -3,7 +3,7 @@ #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" /** * ice_dcb_get_ena_tc - return bitmap of enabled TCs @@ -187,6 +187,7 @@ void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi) vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg); break; case ICE_VSI_CHNL: + case ICE_VSI_SF: vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi)); vsi->tc_cfg.numtc = 1; break; @@ -291,7 +292,6 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked) switch (vsi->type) { case ICE_VSI_CHNL: - case ICE_VSI_SWITCHDEV_CTRL: case ICE_VSI_PF: if (ena) ice_ena_vsi(vsi, locked); @@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_dcbx_cfg *old_cfg, *curr_cfg; struct device *dev = ice_pf_to_dev(pf); + struct iidc_rdma_event *event; int ret = ICE_DCB_NO_HW_CHG; - struct iidc_event *event; struct ice_vsi *pf_vsi; curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; @@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) goto free_cfg; } - set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type); + set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type); ice_send_event_to_aux(pf, event); kfree(event); @@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - struct iidc_event *event; + struct iidc_rdma_priv_dev_info *privd; + struct iidc_rdma_core_dev_info *cdev; + struct iidc_rdma_event *event; u8 tc_map = 0; int v, ret; @@ -776,21 +778,24 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) /* no need to proceed with remaining cfg if it is CHNL * or switchdev VSI */ - if (vsi->type == ICE_VSI_CHNL || - vsi->type == ICE_VSI_SWITCHDEV_CTRL) + if (vsi->type == ICE_VSI_CHNL) continue; ice_vsi_map_rings_to_vectors(vsi); if (vsi->type == ICE_VSI_PF) ice_dcbnl_set_all(vsi); } - if (!locked) { + + cdev = pf->cdev_info; + if (cdev && !locked) { + privd = cdev->iidc_priv; + ice_setup_dcb_qos_info(pf, &privd->qos_info); /* Notify the AUX drivers that TC change is finished */ event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return; - set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); + set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type); ice_send_event_to_aux(pf, event); kfree(event); } @@ -847,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) goto dcb_init_err; } - ice_cfg_sw_lldp(pf_vsi, false, true); + ice_cfg_sw_rx_lldp(pf, true); pf->dcbx_cap = ice_dcb_get_mode(port_info, true); return 0; @@ -946,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, } /** + * ice_setup_dcb_qos_info - Setup DCB QoS information + * @pf: ptr to ice_pf + * @qos_info: QoS param instance + */ +void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info) +{ + struct ice_dcbx_cfg *dcbx_cfg; + unsigned int i; + u32 up2tc; + + if (!pf || !qos_info) + return; + + dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; + up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); + + qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg); + + for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) + qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7; + + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; + + qos_info->pfc_mode = dcbx_cfg->pfc_mode; + if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE) + for (i = 0; i < DSCP_MAX; i++) + qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i]; +} + +/** * ice_dcb_is_mib_change_pending - Check if MIB change is pending * @state: MIB change state */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h index 800879a88c5e..da9ba814b4e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h @@ -31,6 +31,9 @@ void ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first); void +ice_setup_dcb_qos_info(struct ice_pf *pf, + struct iidc_rdma_qos_params *qos_info); +void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event); /** @@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { } static inline void ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { } +static inline void +ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info) +{ + qos_info->num_tc = 1; + qos_info->tc_info[0].rel_bw = 100; +} #endif /* CONFIG_DCB */ #endif /* _ICE_DCB_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c index 6d50b90a7359..a10c1c8d8697 100644 --- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c +++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c @@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app) if (!ice_is_feature_supported(pf, ICE_F_DSCP)) return -EOPNOTSUPP; - if (app->protocol >= ICE_DSCP_NUM_VAL) { + if (app->protocol >= DSCP_MAX) { netdev_err(netdev, "DSCP value 0x%04X out of range\n", app->protocol); return -EINVAL; @@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) /* if the last DSCP mapping just got deleted, need to switch * to L2 VLAN QoS mode */ - if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) && + if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) && new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) { ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC, diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index fc91c4d41186..59323c019544 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -4,6 +4,7 @@ #include "ice_common.h" #include "ice.h" #include "ice_ddp.h" +#include "ice_sched.h" /* For supporting double VLAN mode, it is necessary to enable or disable certain * boost tcam entries. The metadata labels names that match the following @@ -30,7 +31,7 @@ static const struct ice_tunnel_type_scan tnls[] = { * Verifies various attributes of the package file, including length, format * version, and the requirement of at least one segment. */ -static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) +static enum ice_ddp_state ice_verify_pkg(const struct ice_pkg_hdr *pkg, u32 len) { u32 seg_count; u32 i; @@ -56,13 +57,13 @@ static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) /* all segments must fit within length */ for (i = 0; i < seg_count; i++) { u32 off = le32_to_cpu(pkg->seg_offset[i]); - struct ice_generic_seg_hdr *seg; + const struct ice_generic_seg_hdr *seg; /* segment header must fit */ if (len < off + sizeof(*seg)) return ICE_DDP_PKG_INVALID_FILE; - seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); + seg = (void *)pkg + off; /* segment body must fit */ if (len < off + le32_to_cpu(seg->seg_size)) @@ -118,13 +119,13 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) * * This helper function validates a buffer's header. */ -static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) +static const struct ice_buf_hdr *ice_pkg_val_buf(const struct ice_buf *buf) { - struct ice_buf_hdr *hdr; + const struct ice_buf_hdr *hdr; u16 section_count; u16 data_end; - hdr = (struct ice_buf_hdr *)buf->buf; + hdr = (const struct ice_buf_hdr *)buf->buf; /* verify data */ section_count = le16_to_cpu(hdr->section_count); if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) @@ -164,8 +165,8 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) * unexpected value has been detected (for example an invalid section count or * an invalid buffer end value). */ -static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, - struct ice_pkg_enum *state) +static const struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, + struct ice_pkg_enum *state) { if (ice_seg) { state->buf_table = ice_find_buf_table(ice_seg); @@ -288,11 +289,11 @@ void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, * indicates a base offset of 10, and the index for the entry is 2, then * section handler function should set the offset to 10 + 2 = 12. */ -static void *ice_pkg_enum_entry(struct ice_seg *ice_seg, - struct ice_pkg_enum *state, u32 sect_type, - u32 *offset, - void *(*handler)(u32 sect_type, void *section, - u32 index, u32 *offset)) +void *ice_pkg_enum_entry(struct ice_seg *ice_seg, + struct ice_pkg_enum *state, u32 sect_type, + u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)) { void *entry; @@ -721,6 +722,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx) } } +static bool ice_is_pfcp_profile(u16 prof_idx) +{ + return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE && + prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION; +} + /** * ice_get_sw_prof_type - determine switch profile type * @hw: pointer to the HW structure @@ -738,6 +745,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw, if (ice_is_gtp_u_profile(prof_idx)) return ICE_PROF_TUN_GTPU; + if (ice_is_pfcp_profile(prof_idx)) + return ICE_PROF_TUN_PFCP; + for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) { /* UDP tunnel will have UDP_OF protocol ID and VNI offset */ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF && @@ -1201,6 +1211,131 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, } /** + * ice_is_buffer_metadata - determine if package buffer is a metadata buffer + * @buf: pointer to buffer header + * Return: whether given @buf is a metadata one. + */ +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) +{ + return le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF; +} + +/** + * struct ice_ddp_send_ctx - sending context of current DDP segment + * @hw: pointer to the hardware struct + * + * Keeps current sending state (header, error) for the purpose of proper "last" + * bit setting in ice_aq_download_pkg(). Use via calls to ice_ddp_send_hunk(). + */ +struct ice_ddp_send_ctx { + struct ice_hw *hw; +/* private: only for ice_ddp_send_hunk() */ + struct ice_buf_hdr *hdr; + int err; +}; + +static void ice_ddp_send_ctx_set_err(struct ice_ddp_send_ctx *ctx, int err) +{ + ctx->err = err; +} + +/** + * ice_ddp_send_hunk - send one hunk of data to FW + * @ctx: current segment sending context + * @hunk: next hunk to send, size is always ICE_PKG_BUF_SIZE + * + * Send the next hunk of data to FW, retrying if needed. + * + * Notice: must be called once more with a NULL @hunk to finish up; such call + * will set up the "last" bit of an AQ request. After such call @ctx.hdr is + * cleared, @hw is still valid. + * + * Return: %ICE_DDP_PKG_SUCCESS if there were no problems; a sticky @err + * otherwise. + */ +static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx, + struct ice_buf_hdr *hunk) +{ + struct ice_buf_hdr *prev_hunk = ctx->hdr; + struct ice_hw *hw = ctx->hw; + bool prev_was_last = !hunk; + enum ice_aq_err aq_err; + u32 offset, info; + int attempt, err; + + if (ctx->err) + return ctx->err; + + ctx->hdr = hunk; + if (!prev_hunk) + return ICE_DDP_PKG_SUCCESS; /* no problem so far */ + + for (attempt = 0; attempt < 5; attempt++) { + if (attempt) + msleep(20); + + err = ice_aq_download_pkg(hw, prev_hunk, ICE_PKG_BUF_SIZE, + prev_was_last, &offset, &info, NULL); + + aq_err = hw->adminq.sq_last_status; + if (aq_err != ICE_AQ_RC_ENOSEC && aq_err != ICE_AQ_RC_EBADSIG) + break; + } + + if (err) { + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", + err, offset, info); + ctx->err = ice_map_aq_err_to_ddp_state(aq_err); + } else if (attempt) { + dev_dbg(ice_hw_to_dev(hw), + "ice_aq_download_pkg number of retries: %d\n", attempt); + } + + return ctx->err; +} + +/** + * ice_dwnld_cfg_bufs_no_lock + * @ctx: context of the current buffers section to send + * @bufs: pointer to an array of buffers + * @start: buffer index of first buffer to download + * @count: the number of buffers to download + * + * Downloads package configuration buffers to the firmware. Metadata buffers + * are skipped, and the first metadata buffer found indicates that the rest + * of the buffers are all metadata buffers. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs_no_lock(struct ice_ddp_send_ctx *ctx, struct ice_buf *bufs, + u32 start, u32 count) +{ + struct ice_buf_hdr *bh; + enum ice_ddp_state err; + + if (!bufs || !count) { + ice_ddp_send_ctx_set_err(ctx, ICE_DDP_PKG_ERR); + return ICE_DDP_PKG_ERR; + } + + bufs += start; + + for (int i = 0; i < count; i++, bufs++) { + bh = (struct ice_buf_hdr *)bufs; + /* Metadata buffers should not be sent to FW, + * their presence means "we are done here". + */ + if (ice_is_buffer_metadata(bh)) + break; + + err = ice_ddp_send_hunk(ctx, bh); + if (err) + return err; + } + + return 0; +} + +/** * ice_get_pkg_seg_by_idx * @pkg_hdr: pointer to the package header to be searched * @idx: index of segment @@ -1260,117 +1395,20 @@ ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, } /** - * ice_is_buffer_metadata - determine if package buffer is a metadata buffer - * @buf: pointer to buffer header - */ -static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) -{ - if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) - return true; - - return false; -} - -/** - * ice_is_last_download_buffer - * @buf: pointer to current buffer header - * @idx: index of the buffer in the current sequence - * @count: the buffer count in the current sequence - * - * Note: this routine should only be called if the buffer is not the last buffer - */ -static bool -ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) -{ - struct ice_buf *next_buf; - - if ((idx + 1) == count) - return true; - - /* A set metadata flag in the next buffer will signal that the current - * buffer will be the last buffer downloaded - */ - next_buf = ((struct ice_buf *)buf) + 1; - - return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); -} - -/** - * ice_dwnld_cfg_bufs_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @start: buffer index of first buffer to download - * @count: the number of buffers to download - * @indicate_last: if true, then set last buffer flag on last buffer download - * - * Downloads package configuration buffers to the firmware. Metadata buffers - * are skipped, and the first metadata buffer found indicates that the rest - * of the buffers are all metadata buffers. - */ -static enum ice_ddp_state -ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, - u32 count, bool indicate_last) -{ - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_buf_hdr *bh; - enum ice_aq_err err; - u32 offset, info, i; - - if (!bufs || !count) - return ICE_DDP_PKG_ERR; - - /* If the first buffer's first section has its metadata bit set - * then there are no buffers to be downloaded, and the operation is - * considered a success. - */ - bh = (struct ice_buf_hdr *)(bufs + start); - if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return ICE_DDP_PKG_SUCCESS; - - for (i = 0; i < count; i++) { - bool last = false; - int status; - - bh = (struct ice_buf_hdr *)(bufs + start + i); - - if (indicate_last) - last = ice_is_last_download_buffer(bh, i, count); - - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last, - &offset, &info, NULL); - - /* Save AQ status from download package */ - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", - status, offset, info); - err = hw->adminq.sq_last_status; - state = ice_map_aq_err_to_ddp_state(err); - break; - } - - if (last) - break; - } - - return state; -} - -/** * ice_download_pkg_sig_seg - download a signature segment - * @hw: pointer to the hardware structure + * @ctx: context of the current buffers section to send * @seg: pointer to signature segment */ static enum ice_ddp_state -ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) +ice_download_pkg_sig_seg(struct ice_ddp_send_ctx *ctx, struct ice_sign_seg *seg) { - return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, - le32_to_cpu(seg->buf_tbl.buf_count), - false); + return ice_dwnld_cfg_bufs_no_lock(ctx, seg->buf_tbl.buf_array, 0, + le32_to_cpu(seg->buf_tbl.buf_count)); } /** * ice_download_pkg_config_seg - download a config segment - * @hw: pointer to the hardware structure + * @ctx: context of the current buffers section to send * @pkg_hdr: pointer to package header * @idx: segment index * @start: starting buffer @@ -1379,8 +1417,9 @@ ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) * Note: idx must reference a ICE segment */ static enum ice_ddp_state -ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, - u32 idx, u32 start, u32 count) +ice_download_pkg_config_seg(struct ice_ddp_send_ctx *ctx, + struct ice_pkg_hdr *pkg_hdr, u32 idx, u32 start, + u32 count) { struct ice_buf_table *bufs; struct ice_seg *seg; @@ -1396,46 +1435,56 @@ ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, if (start >= buf_count || start + count > buf_count) return ICE_DDP_PKG_ERR; - return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, - true); + return ice_dwnld_cfg_bufs_no_lock(ctx, bufs->buf_array, start, count); +} + +static bool ice_is_last_sign_seg(u32 flags) +{ + return !(flags & ICE_SIGN_SEG_FLAGS_VALID) || /* behavior prior to valid */ + (flags & ICE_SIGN_SEG_FLAGS_LAST); } /** * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment - * @hw: pointer to the hardware structure + * @ctx: context of the current buffers section to send * @pkg_hdr: pointer to package header * @idx: segment index (must be a signature segment) * * Note: idx must reference a signature segment */ static enum ice_ddp_state -ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, - u32 idx) +ice_dwnld_sign_and_cfg_segs(struct ice_ddp_send_ctx *ctx, + struct ice_pkg_hdr *pkg_hdr, u32 idx) { + u32 conf_idx, start, count, flags; enum ice_ddp_state state; struct ice_sign_seg *seg; - u32 conf_idx; - u32 start; - u32 count; seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); if (!seg) { state = ICE_DDP_PKG_ERR; - goto exit; + ice_ddp_send_ctx_set_err(ctx, state); + return state; } - conf_idx = le32_to_cpu(seg->signed_seg_idx); - start = le32_to_cpu(seg->signed_buf_start); count = le32_to_cpu(seg->signed_buf_count); + state = ice_download_pkg_sig_seg(ctx, seg); + if (state || !count) + return state; - state = ice_download_pkg_sig_seg(hw, seg); - if (state) - goto exit; + conf_idx = le32_to_cpu(seg->signed_seg_idx); + start = le32_to_cpu(seg->signed_buf_start); - state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, + state = ice_download_pkg_config_seg(ctx, pkg_hdr, conf_idx, start, count); -exit: + /* finish up by sending last hunk with "last" flag set if requested by + * DDP content + */ + flags = le32_to_cpu(seg->flags); + if (ice_is_last_sign_seg(flags)) + state = ice_ddp_send_hunk(ctx, NULL); + return state; } @@ -1490,6 +1539,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { enum ice_aq_err aq_err = hw->adminq.sq_last_status; enum ice_ddp_state state = ICE_DDP_PKG_ERR; + struct ice_ddp_send_ctx ctx = { .hw = hw }; int status; u32 i; @@ -1510,7 +1560,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) hw->pkg_sign_type)) continue; - state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); + state = ice_dwnld_sign_and_cfg_segs(&ctx, pkg_hdr, i); if (state) break; } @@ -1535,6 +1585,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { + struct ice_ddp_send_ctx ctx = { .hw = hw }; enum ice_ddp_state state; struct ice_buf_hdr *bh; int status; @@ -1547,7 +1598,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * considered a success. */ bh = (struct ice_buf_hdr *)bufs; - if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + if (ice_is_buffer_metadata(bh)) return ICE_DDP_PKG_SUCCESS; status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); @@ -1557,7 +1608,9 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); } - state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + ice_dwnld_cfg_bufs_no_lock(&ctx, bufs, 0, count); + /* finish up by sending last hunk with "last" flag set */ + state = ice_ddp_send_hunk(&ctx, NULL); if (!state) state = ice_post_dwnld_pkg_actions(hw); @@ -1771,9 +1824,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * success it returns a pointer to the segment header, otherwise it will * return NULL. */ -static struct ice_generic_seg_hdr * +static const struct ice_generic_seg_hdr * ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, - struct ice_pkg_hdr *pkg_hdr) + const struct ice_pkg_hdr *pkg_hdr) { u32 i; @@ -1784,11 +1837,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, /* Search all package segments for the requested segment type */ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { - struct ice_generic_seg_hdr *seg; + const struct ice_generic_seg_hdr *seg; - seg = (struct ice_generic_seg_hdr - *)((u8 *)pkg_hdr + - le32_to_cpu(pkg_hdr->seg_offset[i])); + seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]); if (le32_to_cpu(seg->seg_type) == seg_type) return seg; @@ -2263,3 +2314,211 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, return state; } + +/** + * ice_get_set_tx_topo - get or set Tx topology + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @buf_size: buffer size + * @cd: pointer to command details structure or NULL + * @flags: pointer to descriptor flags + * @set: 0-get, 1-set topology + * + * The function will get or set Tx topology + * + * Return: zero when set was successful, negative values otherwise. + */ +static int +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, + struct ice_sq_cd *cd, u8 *flags, bool set) +{ + struct ice_aqc_get_set_tx_topo *cmd; + struct ice_aq_desc desc; + int status; + + cmd = &desc.params.get_set_tx_topo; + if (set) { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo); + cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED; + /* requested to update a new topology, not a default topology */ + if (buf) + cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM | + ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW; + + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } else { + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo); + cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM; + + if (hw->mac_type == ICE_MAC_E810 || + hw->mac_type == ICE_MAC_GENERIC) + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); + } + + status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd); + if (status) + return status; + /* read the return flag values (first byte) for get operation */ + if (!set && flags) + *flags = desc.params.get_set_tx_topo.set_flags; + + return 0; +} + +/** + * ice_cfg_tx_topo - Initialize new Tx topology if available + * @hw: pointer to the HW struct + * @buf: pointer to Tx topology buffer + * @len: buffer size + * + * The function will apply the new Tx topology from the package buffer + * if available. + * + * Return: zero when update was successful, negative values otherwise. + */ +int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len) +{ + u8 *new_topo = NULL, *topo __free(kfree) = NULL; + const struct ice_run_time_cfg_seg *seg; + const struct ice_buf_hdr *section; + const struct ice_pkg_hdr *pkg_hdr; + enum ice_ddp_state state; + u16 offset, size = 0; + u32 reg = 0; + int status; + u8 flags; + + if (!buf || !len) + return -EINVAL; + + /* Does FW support new Tx topology mode ? */ + if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) { + ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n"); + return -EOPNOTSUPP; + } + + topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); + if (!topo) + return -ENOMEM; + + /* Get the current Tx topology flags */ + status = ice_get_set_tx_topo(hw, topo, ICE_AQ_MAX_BUF_LEN, NULL, &flags, + false); + + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); + return status; + } + + /* Is default topology already applied ? */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n"); + return -EEXIST; + } + + /* Is new topology already applied ? */ + if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n"); + return -EEXIST; + } + + /* Setting topology already issued? */ + if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) { + ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n"); + /* Add a small delay before exiting */ + msleep(2000); + return -EEXIST; + } + + /* Change the topology from new to default (5 to 9) */ + if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) && + hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) { + ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n"); + goto update_topo; + } + + pkg_hdr = (const struct ice_pkg_hdr *)buf; + state = ice_verify_pkg(pkg_hdr, len); + if (state) { + ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n", + state); + return -EIO; + } + + /* Find runtime configuration segment */ + seg = (const struct ice_run_time_cfg_seg *) + ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); + if (!seg) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); + return -EIO; + } + + if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n", + seg->buf_table.buf_count); + return -EIO; + } + + section = ice_pkg_val_buf(seg->buf_table.buf_array); + if (!section || le32_to_cpu(section->section_entry[0].type) != + ICE_SID_TX_5_LAYER_TOPO) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n"); + return -EIO; + } + + size = le16_to_cpu(section->section_entry[0].size); + offset = le16_to_cpu(section->section_entry[0].offset); + if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n"); + return -EIO; + } + + /* Make sure the section fits in the buffer */ + if (offset + size > ICE_PKG_BUF_SIZE) { + ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n"); + return -EIO; + } + + /* Get the new topology buffer, reuse current topo copy mem */ + static_assert(ICE_PKG_BUF_SIZE == ICE_AQ_MAX_BUF_LEN); + new_topo = topo; + memcpy(new_topo, (u8 *)section + offset, size); + +update_topo: + /* Acquire global lock to make sure that set topology issued + * by one PF. + */ + status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE, + ICE_GLOBAL_CFG_LOCK_TIMEOUT); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n"); + return status; + } + + /* Check if reset was triggered already. */ + reg = rd32(hw, GLGEN_RSTAT); + if (reg & GLGEN_RSTAT_DEVSTATE_M) { + /* Reset is in progress, re-init the HW again */ + ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n"); + ice_check_reset(hw); + return 0; + } + + /* Set new topology */ + status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true); + if (status) { + ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n"); + return status; + } + + /* New topology is updated, delay 1 second before issuing the CORER */ + msleep(1000); + ice_reset(hw, ICE_RESET_CORER); + /* CORER will clear the global lock, so no explicit call + * required for release. + */ + + return 0; +} diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index ff66c2ffb1a2..8a2d57fc5dae 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -181,7 +181,10 @@ struct ice_sign_seg { __le32 signed_seg_idx; __le32 signed_buf_start; __le32 signed_buf_count; -#define ICE_SIGN_SEG_RESERVED_COUNT 44 +#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000 +#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001 + __le32 flags; +#define ICE_SIGN_SEG_RESERVED_COUNT 40 u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; struct ice_buf_table buf_tbl; }; @@ -261,10 +264,17 @@ struct ice_meta_sect { #define ICE_SID_CDID_KEY_BUILDER_RSS 47 #define ICE_SID_CDID_REDIR_RSS 48 +#define ICE_SID_RXPARSER_CAM 50 +#define ICE_SID_RXPARSER_NOMATCH_CAM 51 +#define ICE_SID_RXPARSER_IMEM 52 #define ICE_SID_RXPARSER_MARKER_PTYPE 55 #define ICE_SID_RXPARSER_BOOST_TCAM 56 +#define ICE_SID_RXPARSER_PROTO_GRP 57 #define ICE_SID_RXPARSER_METADATA_INIT 58 #define ICE_SID_TXPARSER_BOOST_TCAM 66 +#define ICE_SID_RXPARSER_MARKER_GRP 72 +#define ICE_SID_RXPARSER_PG_SPILL 76 +#define ICE_SID_RXPARSER_NOMATCH_SPILL 78 #define ICE_SID_XLT0_PE 80 #define ICE_SID_XLT_KEY_BUILDER_PE 81 @@ -276,6 +286,7 @@ struct ice_meta_sect { #define ICE_SID_CDID_KEY_BUILDER_PE 87 #define ICE_SID_CDID_REDIR_PE 88 +#define ICE_SID_RXPARSER_FLAG_REDIR 97 /* Label Metadata section IDs */ #define ICE_SID_LBL_FIRST 0x80000010 #define ICE_SID_LBL_RXPARSER_TMEM 0x80000018 @@ -430,7 +441,7 @@ struct ice_pkg_enum { u32 buf_idx; u32 type; - struct ice_buf_hdr *buf; + const struct ice_buf_hdr *buf; u32 sect_idx; void *sect; u32 sect_type; @@ -451,7 +462,14 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count); int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count); u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld); +void * +ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, + u32 sect_type, u32 *offset, + void *(*handler)(u32 sect_type, void *section, + u32 index, u32 *offset)); void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, u32 sect_type); +int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len); + #endif diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 9dfae9bce758..34fd604132f5 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -16,14 +16,26 @@ #define ICE_DEV_ID_E823L_1GBE 0x124F /* Intel(R) Ethernet Connection E823-L for QSFP */ #define ICE_DEV_ID_E823L_QSFP 0x151D +/* Intel(R) Ethernet Controller E830-CC for backplane */ +#define ICE_DEV_ID_E830CC_BACKPLANE 0x12D1 +/* Intel(R) Ethernet Controller E830-CC for QSFP */ +#define ICE_DEV_ID_E830CC_QSFP56 0x12D2 +/* Intel(R) Ethernet Controller E830-CC for SFP */ +#define ICE_DEV_ID_E830CC_SFP 0x12D3 +/* Intel(R) Ethernet Controller E830-CC for SFP-DD */ +#define ICE_DEV_ID_E830CC_SFP_DD 0x12D4 /* Intel(R) Ethernet Controller E830-C for backplane */ -#define ICE_DEV_ID_E830_BACKPLANE 0x12D1 +#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5 /* Intel(R) Ethernet Controller E830-C for QSFP */ -#define ICE_DEV_ID_E830_QSFP56 0x12D2 +#define ICE_DEV_ID_E830C_QSFP 0x12D8 /* Intel(R) Ethernet Controller E830-C for SFP */ -#define ICE_DEV_ID_E830_SFP 0x12D3 -/* Intel(R) Ethernet Controller E830-C for SFP-DD */ -#define ICE_DEV_ID_E830_SFP_DD 0x12D4 +#define ICE_DEV_ID_E830C_SFP 0x12DA +/* Intel(R) Ethernet Controller E830-XXV for backplane */ +#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC +/* Intel(R) Ethernet Controller E830-XXV for QSFP */ +#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD +/* Intel(R) Ethernet Controller E830-XXV for SFP */ +#define ICE_DEV_ID_E830_XXV_SFP 0x12DE /* Intel(R) Ethernet Controller E810-C for backplane */ #define ICE_DEV_ID_E810C_BACKPLANE 0x1591 /* Intel(R) Ethernet Controller E810-C for QSFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index e92be6f130a3..bce3ad6ca2a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -9,6 +9,8 @@ #define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50 #define ICE_DPLL_PIN_IDX_INVALID 0xff #define ICE_DPLL_RCLK_NUM_PER_PF 1 +#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25 +#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125 /** * enum ice_dpll_pin_type - enumerate ice pin types: @@ -30,6 +32,10 @@ static const char * const pin_type_name[] = { [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input", }; +static const struct dpll_pin_frequency ice_esync_range[] = { + DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ), +}; + /** * ice_dpll_is_reset - check if reset is in progress * @pf: private board structure @@ -89,7 +95,7 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin, } if (ret) { NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin freq:%u on pin:%u\n", + "err:%d %s failed to set pin freq:%u on pin:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), freq, pin->idx); @@ -316,7 +322,7 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, } if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to enable %s pin:%u\n", + "err:%d %s failed to enable %s pin:%u", ret, ice_aq_str(hw->adminq.sq_last_status), pin_type_name[pin_type], pin->idx); @@ -361,7 +367,7 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin, } if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to disable %s pin:%u\n", + "err:%d %s failed to disable %s pin:%u", ret, ice_aq_str(hw->adminq.sq_last_status), pin_type_name[pin_type], pin->idx); @@ -394,8 +400,8 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, switch (pin_type) { case ICE_DPLL_PIN_TYPE_INPUT: - ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, - NULL, &pin->flags[0], + ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, &pin->status, + NULL, NULL, &pin->flags[0], &pin->freq, &pin->phase_adjust); if (ret) goto err; @@ -430,7 +436,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, goto err; parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL; - if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) { + if (ICE_AQC_GET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) { pin->state[pf->dplls.eec.dpll_idx] = parent == pf->dplls.eec.dpll_idx ? DPLL_PIN_STATE_CONNECTED : @@ -473,7 +479,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, err: if (extack) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to update %s pin:%u\n", + "err:%d %s failed to update %s pin:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), pin_type_name[pin_type], pin->idx); @@ -512,7 +518,7 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll, (u8)prio); if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin prio:%u on pin:%u\n", + "err:%d %s failed to set pin prio:%u on pin:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), prio, pin->idx); @@ -651,6 +657,8 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, struct ice_dpll_pin *p = pin_priv; struct ice_dpll *d = dpll_priv; + if (state == DPLL_PIN_STATE_SELECTABLE) + return -EINVAL; if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED) return 0; @@ -996,7 +1004,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, mutex_unlock(&pf->dplls.lock); if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n", + "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), phase_adjust, p->idx, d->dpll_idx); @@ -1099,6 +1107,214 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv, } /** + * ice_dpll_output_esync_set - callback for setting embedded sync + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @freq: requested embedded sync frequency + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting embedded sync frequency value + * on output pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_output_esync_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 freq, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + u8 flags = 0; + int ret; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN) + flags = ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; + if (freq == DPLL_PIN_FREQUENCY_1_HZ) { + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) { + ret = 0; + } else { + flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags, + 0, 0, 0); + } + } else { + if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)) { + ret = 0; + } else { + flags &= ~ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; + ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flags, + 0, 0, 0); + } + } + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_output_esync_get - callback for getting embedded sync config + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @esync: on success holds embedded sync pin properties + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting embedded sync frequency value + * and capabilities on output pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_output_esync_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + struct dpll_pin_esync *esync, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (!(p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY) || + p->freq != DPLL_PIN_FREQUENCY_10_MHZ) { + mutex_unlock(&pf->dplls.lock); + return -EOPNOTSUPP; + } + esync->range = ice_esync_range; + esync->range_num = ARRAY_SIZE(ice_esync_range); + if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) { + esync->freq = DPLL_PIN_FREQUENCY_1_HZ; + esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT; + } else { + esync->freq = 0; + esync->pulse = 0; + } + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** + * ice_dpll_input_esync_set - callback for setting embedded sync + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @freq: requested embedded sync frequency + * @extack: error reporting + * + * Dpll subsystem callback. Handler for setting embedded sync frequency value + * on input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_input_esync_set(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 freq, struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + u8 flags_en = 0; + int ret; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN) + flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN; + if (freq == DPLL_PIN_FREQUENCY_1_HZ) { + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) { + ret = 0; + } else { + flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, + flags_en, 0, 0); + } + } else { + if (!(p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)) { + ret = 0; + } else { + flags_en &= ~ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN; + ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, + flags_en, 0, 0); + } + } + mutex_unlock(&pf->dplls.lock); + + return ret; +} + +/** + * ice_dpll_input_esync_get - callback for getting embedded sync config + * @pin: pointer to a pin + * @pin_priv: private data pointer passed on pin registration + * @dpll: registered dpll pointer + * @dpll_priv: private data pointer passed on dpll registration + * @esync: on success holds embedded sync pin properties + * @extack: error reporting + * + * Dpll subsystem callback. Handler for getting embedded sync frequency value + * and capabilities on input pin. + * + * Context: Acquires pf->dplls.lock + * Return: + * * 0 - success + * * negative - error + */ +static int +ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + struct dpll_pin_esync *esync, + struct netlink_ext_ack *extack) +{ + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + struct ice_pf *pf = d->pf; + + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); + if (!(p->status & ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP) || + p->freq != DPLL_PIN_FREQUENCY_10_MHZ) { + mutex_unlock(&pf->dplls.lock); + return -EOPNOTSUPP; + } + esync->range = ice_esync_range; + esync->range_num = ARRAY_SIZE(ice_esync_range); + if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN) { + esync->freq = DPLL_PIN_FREQUENCY_1_HZ; + esync->pulse = ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT; + } else { + esync->freq = 0; + esync->pulse = 0; + } + mutex_unlock(&pf->dplls.lock); + + return 0; +} + +/** * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin * @pin: pointer to a pin * @pin_priv: private data pointer passed on pin registration @@ -1146,7 +1362,7 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, &p->freq); if (ret) NL_SET_ERR_MSG_FMT(extack, - "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n", + "err:%d %s failed to set pin state:%u for pin:%u on parent:%u", ret, ice_aq_str(pf->hw.adminq.sq_last_status), state, p->idx, parent->idx); @@ -1222,6 +1438,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = { .phase_adjust_get = ice_dpll_pin_phase_adjust_get, .phase_adjust_set = ice_dpll_input_phase_adjust_set, .phase_offset_get = ice_dpll_phase_offset_get, + .esync_set = ice_dpll_input_esync_set, + .esync_get = ice_dpll_input_esync_get, }; static const struct dpll_pin_ops ice_dpll_output_ops = { @@ -1232,6 +1450,8 @@ static const struct dpll_pin_ops ice_dpll_output_ops = { .direction_get = ice_dpll_output_direction, .phase_adjust_get = ice_dpll_pin_phase_adjust_get, .phase_adjust_set = ice_dpll_output_phase_adjust_set, + .esync_set = ice_dpll_output_esync_set, + .esync_get = ice_dpll_output_esync_get, }; static const struct dpll_device_ops ice_dpll_ops = { @@ -1626,6 +1846,8 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, struct dpll_pin *parent; int ret, i; + if (WARN_ON((!vsi || !vsi->netdev))) + return -EINVAL; ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF, pf->dplls.clock_id); if (ret) @@ -1641,8 +1863,6 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, if (ret) goto unregister_pins; } - if (WARN_ON((!vsi || !vsi->netdev))) - return -EINVAL; dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); return 0; @@ -1833,7 +2053,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf) struct kthread_worker *kworker; kthread_init_delayed_work(&d->work, ice_dpll_periodic_work); - kworker = kthread_create_worker(0, "ice-dplls-%s", + kworker = kthread_run_worker(0, "ice-dplls-%s", dev_name(ice_pf_to_dev(pf))); if (IS_ERR(kworker)) return PTR_ERR(kworker); @@ -1845,6 +2065,85 @@ static int ice_dpll_init_worker(struct ice_pf *pf) } /** + * ice_dpll_phase_range_set - initialize phase adjust range helper + * @range: pointer to phase adjust range struct to be initialized + * @phase_adj: a value to be used as min(-)/max(+) boundary + */ +static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range, + u32 phase_adj) +{ + range->min = -phase_adj; + range->max = phase_adj; +} + +/** + * ice_dpll_init_info_pins_generic - initializes generic pins info + * @pf: board private structure + * @input: if input pins initialized + * + * Init information for generic pins, cache them in PF's pins structures. + * + * Return: + * * 0 - success + * * negative - init failure reason + */ +static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input) +{ + struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps; + static const char labels[][sizeof("99")] = { + "0", "1", "2", "3", "4", "5", "6", "7", "8", + "9", "10", "11", "12", "13", "14", "15" }; + u32 cap = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE; + enum ice_dpll_pin_type pin_type; + int i, pin_num, ret = -EINVAL; + struct ice_dpll_pin *pins; + u32 phase_adj_max; + + if (input) { + pin_num = pf->dplls.num_inputs; + pins = pf->dplls.inputs; + phase_adj_max = pf->dplls.input_phase_adj_max; + pin_type = ICE_DPLL_PIN_TYPE_INPUT; + cap |= DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE; + } else { + pin_num = pf->dplls.num_outputs; + pins = pf->dplls.outputs; + phase_adj_max = pf->dplls.output_phase_adj_max; + pin_type = ICE_DPLL_PIN_TYPE_OUTPUT; + } + if (pin_num > ARRAY_SIZE(labels)) + return ret; + + for (i = 0; i < pin_num; i++) { + pins[i].idx = i; + pins[i].prop.board_label = labels[i]; + ice_dpll_phase_range_set(&pins[i].prop.phase_range, + phase_adj_max); + pins[i].prop.capabilities = cap; + pins[i].pf = pf; + ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); + if (ret) + break; + if (input && pins[i].freq == ICE_DPLL_PIN_GEN_RCLK_FREQ) + pins[i].prop.type = DPLL_PIN_TYPE_MUX; + else + pins[i].prop.type = DPLL_PIN_TYPE_EXT; + if (!input) + continue; + ret = ice_aq_get_cgu_ref_prio(&pf->hw, de->dpll_idx, i, + &de->input_prio[i]); + if (ret) + break; + ret = ice_aq_get_cgu_ref_prio(&pf->hw, dp->dpll_idx, i, + &dp->input_prio[i]); + if (ret) + break; + } + + return ret; +} + +/** * ice_dpll_init_info_direct_pins - initializes direct pins info * @pf: board private structure * @pin_type: type of pins being initialized @@ -1865,6 +2164,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, struct ice_hw *hw = &pf->hw; struct ice_dpll_pin *pins; unsigned long caps; + u32 phase_adj_max; u8 freq_supp_num; bool input; @@ -1872,16 +2172,20 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, case ICE_DPLL_PIN_TYPE_INPUT: pins = pf->dplls.inputs; num_pins = pf->dplls.num_inputs; + phase_adj_max = pf->dplls.input_phase_adj_max; input = true; break; case ICE_DPLL_PIN_TYPE_OUTPUT: pins = pf->dplls.outputs; num_pins = pf->dplls.num_outputs; + phase_adj_max = pf->dplls.output_phase_adj_max; input = false; break; default: return -EINVAL; } + if (num_pins != ice_cgu_get_num_pins(hw, input)) + return ice_dpll_init_info_pins_generic(pf, input); for (i = 0; i < num_pins; i++) { caps = 0; @@ -1899,19 +2203,13 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf, return ret; caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE | DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE); - pins[i].prop.phase_range.min = - pf->dplls.input_phase_adj_max; - pins[i].prop.phase_range.max = - -pf->dplls.input_phase_adj_max; } else { - pins[i].prop.phase_range.min = - pf->dplls.output_phase_adj_max; - pins[i].prop.phase_range.max = - -pf->dplls.output_phase_adj_max; ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps); if (ret) return ret; } + ice_dpll_phase_range_set(&pins[i].prop.phase_range, + phase_adj_max); pins[i].prop.capabilities = caps; ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL); if (ret) @@ -2019,8 +2317,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu) dp->dpll_idx = abilities.pps_dpll_idx; d->num_inputs = abilities.num_inputs; d->num_outputs = abilities.num_outputs; - d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj); - d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj); + d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) & + ICE_AQC_GET_CGU_MAX_PHASE_ADJ; + d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) & + ICE_AQC_GET_CGU_MAX_PHASE_ADJ; alloc_size = sizeof(*d->inputs) * d->num_inputs; d->inputs = kzalloc(alloc_size, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h index 93172e93995b..c320f1bf7d6d 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.h +++ b/drivers/net/ethernet/intel/ice/ice_dpll.h @@ -31,6 +31,7 @@ struct ice_dpll_pin { struct dpll_pin_properties prop; u32 freq; s32 phase_adjust; + u8 status; }; /** ice_dpll - store info required for DPLL control diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index 9069725c71b4..6aae03771746 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -7,89 +7,10 @@ #include "ice_eswitch_br.h" #include "ice_fltr.h" #include "ice_repr.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" #include "ice_tc_lib.h" /** - * ice_eswitch_del_sp_rules - delete adv rules added on PRs - * @pf: pointer to the PF struct - * - * Delete all advanced rules that were used to forward packets with the - * device's VSI index to the corresponding eswitch ctrl VSI queue. - */ -static void ice_eswitch_del_sp_rules(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(&pf->eswitch.reprs, id, repr) { - if (repr->sp_rule.rid) - ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule); - } -} - -/** - * ice_eswitch_add_sp_rule - add adv rule with device's VSI index - * @pf: pointer to PF struct - * @repr: pointer to the repr struct - * - * This function adds advanced rule that forwards packets with - * device's VSI index to the corresponding eswitch ctrl VSI queue. - */ -static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr) -{ - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - struct ice_adv_rule_info rule_info = { 0 }; - struct ice_adv_lkup_elem *list; - struct ice_hw *hw = &pf->hw; - const u16 lkups_cnt = 1; - int err; - - list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); - if (!list) - return -ENOMEM; - - ice_rule_add_src_vsi_metadata(list); - - rule_info.sw_act.flag = ICE_FLTR_TX; - rule_info.sw_act.vsi_handle = ctrl_vsi->idx; - rule_info.sw_act.fltr_act = ICE_FWD_TO_Q; - rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id + - ctrl_vsi->rxq_map[repr->q_id]; - rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE; - rule_info.flags_info.act_valid = true; - rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN; - rule_info.src_vsi = repr->src_vsi->idx; - - err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, - &repr->sp_rule); - if (err) - dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d", - repr->id); - - kfree(list); - return err; -} - -static int -ice_eswitch_add_sp_rules(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - int err; - - xa_for_each(&pf->eswitch.reprs, id, repr) { - err = ice_eswitch_add_sp_rule(pf, repr); - if (err) { - ice_eswitch_del_sp_rules(pf); - return err; - } - } - - return 0; -} - -/** * ice_eswitch_setup_env - configure eswitch HW filters * @pf: pointer to PF struct * @@ -99,12 +20,16 @@ ice_eswitch_add_sp_rules(struct ice_pf *pf) static int ice_eswitch_setup_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct net_device *netdev = uplink_vsi->netdev; + bool if_running = netif_running(netdev); struct ice_vsi_vlan_ops *vlan_ops; - bool rule_added = false; + + if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state)) + if (ice_down(uplink_vsi)) + return -ENODEV; ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, false); netif_addr_lock_bh(netdev); __dev_uc_unsync(netdev, NULL); @@ -112,98 +37,47 @@ static int ice_eswitch_setup_env(struct ice_pf *pf) netif_addr_unlock_bh(netdev); if (ice_vsi_add_vlan_zero(uplink_vsi)) + goto err_vlan_zero; + + if (ice_set_dflt_vsi(uplink_vsi)) goto err_def_rx; - if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) { - if (ice_set_dflt_vsi(uplink_vsi)) - goto err_def_rx; - rule_added = true; - } + if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true, + ICE_FLTR_TX)) + goto err_def_tx; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); if (vlan_ops->dis_rx_filtering(uplink_vsi)) - goto err_dis_rx; - - if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_uplink; - - if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override)) - goto err_override_control; + goto err_vlan_filtering; if (ice_vsi_update_local_lb(uplink_vsi, true)) goto err_override_local_lb; + if (if_running && ice_up(uplink_vsi)) + goto err_up; + return 0; +err_up: + ice_vsi_update_local_lb(uplink_vsi, false); err_override_local_lb: - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); -err_override_control: - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); -err_override_uplink: vlan_ops->ena_rx_filtering(uplink_vsi); -err_dis_rx: - if (rule_added) - ice_clear_dflt_vsi(uplink_vsi); +err_vlan_filtering: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); +err_def_tx: + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); err_def_rx: + ice_vsi_del_vlan_zero(uplink_vsi); +err_vlan_zero: ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); - return -ENODEV; -} - -/** - * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI - * @eswitch: pointer to eswitch struct - * - * In eswitch number of allocated Tx/Rx rings is equal. - * - * This function fills q_vectors structures associated with representor and - * move each ring pairs to port representor netdevs. Each port representor - * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to - * number of VFs. - */ -static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch) -{ - struct ice_vsi *vsi = eswitch->control_vsi; - unsigned long repr_id = 0; - int q_id; - - ice_for_each_txq(vsi, q_id) { - struct ice_q_vector *q_vector; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; - struct ice_repr *repr; - - repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX, - XA_PRESENT); - if (!repr) - break; - - repr_id += 1; - repr->q_id = q_id; - q_vector = repr->q_vector; - tx_ring = vsi->tx_rings[q_id]; - rx_ring = vsi->rx_rings[q_id]; - - q_vector->vsi = vsi; - q_vector->reg_idx = vsi->q_vectors[0]->reg_idx; - - q_vector->num_ring_tx = 1; - q_vector->tx.tx_ring = tx_ring; - tx_ring->q_vector = q_vector; - tx_ring->next = NULL; - tx_ring->netdev = repr->netdev; - /* In switchdev mode, from OS stack perspective, there is only - * one queue for given netdev, so it needs to be indexed as 0. - */ - tx_ring->q_index = 0; + if (if_running) + ice_up(uplink_vsi); - q_vector->num_ring_rx = 1; - q_vector->rx.rx_ring = rx_ring; - rx_ring->q_vector = q_vector; - rx_ring->next = NULL; - rx_ring->netdev = repr->netdev; - } + return -ENODEV; } /** @@ -225,8 +99,6 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) repr->dst = NULL; ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); - - netif_napi_del(&repr->q_vector->napi); } /** @@ -236,43 +108,64 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr) */ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) { - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; + struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; - ice_remove_vsi_fltr(&pf->hw, vsi->idx); repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); if (!repr->dst) - goto err_add_mac_fltr; + return -ENOMEM; - if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) - goto err_dst_free; + netif_keep_dst(uplink_vsi->netdev); - if (ice_vsi_add_vlan_zero(vsi)) - goto err_update_security; + dst = repr->dst; + dst->u.port_info.port_id = vsi->vsi_num; + dst->u.port_info.lower_dev = uplink_vsi->netdev; + + return 0; +} - netif_napi_add(repr->netdev, &repr->q_vector->napi, - ice_napi_poll); +/** + * ice_eswitch_cfg_vsi - configure VSI to work in slow-path + * @vsi: VSI structure of representee + * @mac: representee MAC + * + * Return: 0 on success, non-zero on error. + */ +int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + int err; - netif_keep_dst(repr->netdev); + ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); - dst = repr->dst; - dst->u.port_info.port_id = vsi->vsi_num; - dst->u.port_info.lower_dev = repr->netdev; - ice_repr_set_traffic_vsi(repr, ctrl_vsi); + err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); + if (err) + goto err_update_security; + + err = ice_vsi_add_vlan_zero(vsi); + if (err) + goto err_vlan_zero; return 0; -err_update_security: +err_vlan_zero: ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); -err_dst_free: - metadata_dst_free(repr->dst); - repr->dst = NULL; -err_add_mac_fltr: - ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); +err_update_security: + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); - return -ENODEV; + return err; +} + +/** + * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev + * @vsi: VSI structure of representee + * @mac: representee MAC + */ +void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); } /** @@ -280,16 +173,16 @@ err_add_mac_fltr: * @repr_id: representor ID * @vsi: VSI for which port representor is configured */ -void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) +void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_repr *repr; - int ret; + int err; if (!ice_is_switchdev_running(pf)) return; - repr = xa_load(&pf->eswitch.reprs, repr_id); + repr = xa_load(&pf->eswitch.reprs, *repr_id); if (!repr) return; @@ -299,12 +192,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) if (repr->br_port) repr->br_port->vsi = vsi; - ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); - if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, - ICE_FWD_TO_VSI); + err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac); + if (err) dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d", repr->id); + + /* The VSI number is different, reload the PR with new id */ + if (repr->id != vsi->vsi_num) { + xa_erase(&pf->eswitch.reprs, repr->id); + repr->id = vsi->vsi_num; + if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL)) + dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d", + repr->id); + *repr_id = repr->id; } } @@ -318,27 +218,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) { - struct ice_netdev_priv *np; - struct ice_repr *repr; - struct ice_vsi *vsi; - - np = netdev_priv(netdev); - vsi = np->vsi; - - if (!vsi || !ice_is_switchdev_running(vsi->back)) - return NETDEV_TX_BUSY; - - if (ice_is_reset_in_progress(vsi->back->state) || - test_bit(ICE_VF_DIS, vsi->back->state)) - return NETDEV_TX_BUSY; + struct ice_repr *repr = ice_netdev_to_repr(netdev); + unsigned int len = skb->len; + int ret; - repr = ice_netdev_to_repr(netdev); skb_dst_drop(skb); dst_hold((struct dst_entry *)repr->dst); skb_dst_set(skb, (struct dst_entry *)repr->dst); - skb->queue_mapping = repr->q_id; + skb->dev = repr->dst->u.port_info.lower_dev; + + ret = dev_queue_xmit(skb); + ice_repr_inc_tx_stats(repr, len, ret); - return ice_start_xmit(skb, netdev); + return ret; } /** @@ -354,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, u64 cd_cmd, dst_vsi; if (!dst) { + struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb); + + if (unlikely(eth->h_proto == htons(ETH_P_LLDP))) + return; cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S; off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX); } else { @@ -374,62 +270,20 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, static void ice_eswitch_release_env(struct ice_pf *pf) { struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi; - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; struct ice_vsi_vlan_ops *vlan_ops; vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi); ice_vsi_update_local_lb(uplink_vsi, false); - ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override); - ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override); vlan_ops->ena_rx_filtering(uplink_vsi); - ice_clear_dflt_vsi(uplink_vsi); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_TX); + ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false, + ICE_FLTR_RX); ice_fltr_add_mac_and_broadcast(uplink_vsi, uplink_vsi->port_info->mac.perm_addr, ICE_FWD_TO_VSI); -} - -/** - * ice_eswitch_vsi_setup - configure eswitch control VSI - * @pf: pointer to PF structure - * @pi: pointer to port_info structure - */ -static struct ice_vsi * -ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) -{ - struct ice_vsi_cfg_params params = {}; - - params.type = ICE_VSI_SWITCHDEV_CTRL; - params.pi = pi; - params.flags = ICE_VSI_FLAG_INIT; - - return ice_vsi_setup(pf, ¶ms); -} - -/** - * ice_eswitch_napi_enable - enable NAPI for all port representors - * @reprs: xarray of reprs - */ -static void ice_eswitch_napi_enable(struct xarray *reprs) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(reprs, id, repr) - napi_enable(&repr->q_vector->napi); -} - -/** - * ice_eswitch_napi_disable - disable NAPI for all port representors - * @reprs: xarray of reprs - */ -static void ice_eswitch_napi_disable(struct xarray *reprs) -{ - struct ice_repr *repr; - unsigned long id; - - xa_for_each(reprs, id, repr) - napi_disable(&repr->q_vector->napi); + ice_vsi_cfg_sw_lldp(uplink_vsi, true, true); } /** @@ -438,7 +292,7 @@ static void ice_eswitch_napi_disable(struct xarray *reprs) */ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi, *uplink_vsi; + struct ice_vsi *uplink_vsi; uplink_vsi = ice_get_main_vsi(pf); if (!uplink_vsi) @@ -450,17 +304,10 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) return -EINVAL; } - pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info); - if (!pf->eswitch.control_vsi) - return -ENODEV; - - ctrl_vsi = pf->eswitch.control_vsi; - /* cp VSI is createad with 1 queue as default */ - pf->eswitch.qs.value = 1; pf->eswitch.uplink_vsi = uplink_vsi; if (ice_eswitch_setup_env(pf)) - goto err_vsi; + return -ENODEV; if (ice_eswitch_br_offloads_init(pf)) goto err_br_offloads; @@ -471,8 +318,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf) err_br_offloads: ice_eswitch_release_env(pf); -err_vsi: - ice_vsi_release(ctrl_vsi); return -ENODEV; } @@ -482,14 +327,10 @@ err_vsi: */ static void ice_eswitch_disable_switchdev(struct ice_pf *pf) { - struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi; - ice_eswitch_br_offloads_deinit(pf); ice_eswitch_release_env(pf); - ice_vsi_release(ctrl_vsi); pf->eswitch.is_running = false; - pf->eswitch.qs.is_reaching = false; } /** @@ -530,7 +371,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev", pf->hw.pf_id); - xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC); + xa_init(&pf->eswitch.reprs); NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev"); break; } @@ -602,56 +443,17 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) static void ice_eswitch_stop_reprs(struct ice_pf *pf) { - ice_eswitch_del_sp_rules(pf); ice_eswitch_stop_all_tx_queues(pf); - ice_eswitch_napi_disable(&pf->eswitch.reprs); } static void ice_eswitch_start_reprs(struct ice_pf *pf) { - ice_eswitch_napi_enable(&pf->eswitch.reprs); ice_eswitch_start_all_tx_queues(pf); - ice_eswitch_add_sp_rules(pf); -} - -static void -ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change) -{ - struct ice_vsi *cp = eswitch->control_vsi; - int queues = 0; - - if (eswitch->qs.is_reaching) { - if (eswitch->qs.to_reach >= eswitch->qs.value + change) { - queues = eswitch->qs.to_reach; - eswitch->qs.is_reaching = false; - } else { - queues = 0; - } - } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) || - change < 0) { - queues = cp->alloc_txq + change; - } - - if (queues) { - cp->req_txq = queues; - cp->req_rxq = queues; - ice_vsi_close(cp); - ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT); - ice_vsi_open(cp); - } else if (!change) { - /* change == 0 means that VSI wasn't open, open it here */ - ice_vsi_open(cp); - } - - eswitch->qs.value += change; - ice_eswitch_remap_rings_to_vectors(eswitch); } -int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +static int +ice_eswitch_attach(struct ice_pf *pf, struct ice_repr *repr, unsigned long *id) { - struct ice_repr *repr; - int change = 1; int err; if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY) @@ -661,31 +463,24 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err = ice_eswitch_enable_switchdev(pf); if (err) return err; - /* Control plane VSI is created with 1 queue as default */ - pf->eswitch.qs.to_reach -= 1; - change = 0; } ice_eswitch_stop_reprs(pf); - repr = ice_repr_add_vf(vf); - if (IS_ERR(repr)) { - err = PTR_ERR(repr); + err = repr->ops.add(repr); + if (err) goto err_create_repr; - } err = ice_eswitch_setup_repr(pf, repr); if (err) goto err_setup_repr; - err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr, - XA_LIMIT(1, INT_MAX), GFP_KERNEL); + err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL); if (err) goto err_xa_alloc; - vf->repr_id = repr->id; + *id = repr->id; - ice_eswitch_cp_change_queues(&pf->eswitch, change); ice_eswitch_start_reprs(pf); return 0; @@ -693,7 +488,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err_xa_alloc: ice_eswitch_release_repr(pf, repr); err_setup_repr: - ice_repr_rem_vf(repr); + repr->ops.rem(repr); err_create_repr: if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); @@ -702,73 +497,135 @@ err_create_repr: return err; } -void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) +/** + * ice_eswitch_attach_vf - attach VF to a eswitch + * @pf: pointer to PF structure + * @vf: pointer to VF structure to be attached + * + * During attaching port representor for VF is created. + * + * Return: zero on success or an error code on failure. + */ +int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct ice_repr *repr = ice_repr_create_vf(vf); struct devlink *devlink = priv_to_devlink(pf); + int err; - if (!repr) - return; + if (IS_ERR(repr)) + return PTR_ERR(repr); + + devl_lock(devlink); + err = ice_eswitch_attach(pf, repr, &vf->repr_id); + if (err) + ice_repr_destroy(repr); + devl_unlock(devlink); + + return err; +} +/** + * ice_eswitch_attach_sf - attach SF to a eswitch + * @pf: pointer to PF structure + * @sf: pointer to SF structure to be attached + * + * During attaching port representor for SF is created. + * + * Return: zero on success or an error code on failure. + */ +int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = ice_repr_create_sf(sf); + int err; + + if (IS_ERR(repr)) + return PTR_ERR(repr); + + err = ice_eswitch_attach(pf, repr, &sf->repr_id); + if (err) + ice_repr_destroy(repr); + + return err; +} + +static void ice_eswitch_detach(struct ice_pf *pf, struct ice_repr *repr) +{ ice_eswitch_stop_reprs(pf); + repr->ops.rem(repr); + xa_erase(&pf->eswitch.reprs, repr->id); if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); - else - ice_eswitch_cp_change_queues(&pf->eswitch, -1); ice_eswitch_release_repr(pf, repr); - ice_repr_rem_vf(repr); + ice_repr_destroy(repr); if (xa_empty(&pf->eswitch.reprs)) { + struct devlink *devlink = priv_to_devlink(pf); + /* since all port representors are destroyed, there is * no point in keeping the nodes */ ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf)); - devl_lock(devlink); devl_rate_nodes_destroy(devlink); - devl_unlock(devlink); } else { ice_eswitch_start_reprs(pf); } } /** - * ice_eswitch_rebuild - rebuild eswitch + * ice_eswitch_detach_vf - detach VF from a eswitch * @pf: pointer to PF structure + * @vf: pointer to VF structure to be detached */ -int ice_eswitch_rebuild(struct ice_pf *pf) +void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { - struct ice_repr *repr; - unsigned long id; - int err; + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, vf->repr_id); + struct devlink *devlink = priv_to_devlink(pf); - if (!ice_is_switchdev_running(pf)) - return 0; + if (!repr) + return; - err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT); - if (err) - return err; + devl_lock(devlink); + ice_eswitch_detach(pf, repr); + devl_unlock(devlink); +} - xa_for_each(&pf->eswitch.reprs, id, repr) - ice_eswitch_detach(pf, repr->vf); +/** + * ice_eswitch_detach_sf - detach SF from a eswitch + * @pf: pointer to PF structure + * @sf: pointer to SF structure to be detached + */ +void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) +{ + struct ice_repr *repr = xa_load(&pf->eswitch.reprs, sf->repr_id); - return 0; + if (!repr) + return; + + ice_eswitch_detach(pf, repr); } /** - * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues - * @pf: pointer to PF structure - * @change: how many more (or less) queues is needed + * ice_eswitch_get_target - get netdev based on src_vsi from descriptor + * @rx_ring: ring used to receive the packet + * @rx_desc: descriptor used to get src_vsi value * - * Remember to call ice_eswitch_attach/detach() the "change" times. + * Get src_vsi value from descriptor and load correct representor. If it isn't + * found return rx_ring->netdev. */ -void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) +struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) { - if (pf->eswitch.qs.value + change < 0) - return; + struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch; + struct ice_32b_rx_flex_desc_nic_2 *desc; + struct ice_repr *repr; + + desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc; + repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi)); + if (!repr) + return rx_ring->netdev; - pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change; - pf->eswitch.qs.is_reaching = true; + return repr->netdev; } diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index 1a288a03a79a..5c7dcf21b222 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -5,12 +5,13 @@ #define _ICE_ESWITCH_H_ #include <net/devlink.h> +#include "devlink/port.h" #ifdef CONFIG_ICE_SWITCHDEV -void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); -int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); -int ice_eswitch_rebuild(struct ice_pf *pf); +void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf); +void ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf); +int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf); +int ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int @@ -18,7 +19,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); -void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi); +void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); @@ -26,12 +27,26 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off); netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); -void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change); +struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc); + +int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac); +void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac); #else /* CONFIG_ICE_SWITCHDEV */ -static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } +static inline void +ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf) { } + +static inline void +ice_eswitch_detach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { } static inline int -ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) +ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf) +{ + return -EOPNOTSUPP; +} + +static inline int +ice_eswitch_attach_sf(struct ice_pf *pf, struct ice_dynamic_port *sf) { return -EOPNOTSUPP; } @@ -43,17 +58,7 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } static inline void -ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } - -static inline int ice_eswitch_configure(struct ice_pf *pf) -{ - return 0; -} - -static inline int ice_eswitch_rebuild(struct ice_pf *pf) -{ - return -EOPNOTSUPP; -} +ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { } static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) { @@ -78,7 +83,18 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } -static inline void -ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { } +static inline struct net_device * +ice_eswitch_get_target(struct ice_rx_ring *rx_ring, + union ice_32b_rx_flex_desc *rx_desc) +{ + return rx_ring->netdev; +} + +static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + return -EOPNOTSUPP; +} + +static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { } #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index ac5beecd028b..cccb7ddf61c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -582,10 +582,13 @@ ice_eswitch_br_switchdev_event(struct notifier_block *nb, return NOTIFY_DONE; } -static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) +void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge) { struct ice_esw_br_fdb_entry *entry, *tmp; + if (!bridge) + return; + list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry); } @@ -896,7 +899,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { vsi->back->br_port = NULL; } else { - struct ice_repr *repr = ice_repr_get_by_vsi(vsi); + struct ice_repr *repr = + ice_repr_get(vsi->back, br_port->repr_id); if (repr) repr->br_port = NULL; @@ -937,6 +941,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, br_port->vsi = repr->src_vsi; br_port->vsi_idx = br_port->vsi->idx; br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; + br_port->repr_id = repr->id; repr->br_port = br_port; err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h index 85a8fadb2928..66a2c804338f 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h @@ -46,6 +46,7 @@ struct ice_esw_br_port { enum ice_esw_br_port_type type; u16 vsi_idx; u16 pvid; + u32 repr_id; struct xarray vlans; }; @@ -116,5 +117,6 @@ void ice_eswitch_br_offloads_deinit(struct ice_pf *pf); int ice_eswitch_br_offloads_init(struct ice_pf *pf); +void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge); #endif /* _ICE_ESWITCH_BR_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 78b833b3e1d7..bbf9e6fd315b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -463,7 +463,331 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static int ice_get_regs_len(struct net_device __always_unused *netdev) { - return sizeof(ice_regs_dump_list); + return (sizeof(ice_regs_dump_list) + + sizeof(struct ice_regdump_to_ethtool)); +} + +/** + * ice_ethtool_get_maxspeed - Get the max speed for given lport + * @hw: pointer to the HW struct + * @lport: logical port for which max speed is requested + * @max_speed: return max speed for input lport + * + * Return: 0 on success, negative on failure. + */ +static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {}; + bool active_valid = false, pending_valid = true; + u8 option_count = ICE_AQC_PORT_OPT_MAX; + u8 active_idx = 0, pending_idx = 0; + int status; + + status = ice_aq_get_port_options(hw, options, &option_count, lport, + true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) + return -EIO; + if (!active_valid) + return -EINVAL; + + *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M; + return 0; +} + +/** + * ice_is_serdes_muxed - returns whether serdes is muxed in hardware + * @hw: pointer to the HW struct + * + * Return: true when serdes is muxed, false when serdes is not muxed. + */ +static bool ice_is_serdes_muxed(struct ice_hw *hw) +{ + u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); + + return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value); +} + +static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology, + u8 lport, bool is_muxed) +{ + switch (lport) { + case 0: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 0; + port_topology->primary_serdes_lane = 0; + break; + case 1: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 0; + if (is_muxed) + port_topology->primary_serdes_lane = 2; + else + port_topology->primary_serdes_lane = 4; + break; + case 2: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 1; + port_topology->primary_serdes_lane = 1; + break; + case 3: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 1; + if (is_muxed) + port_topology->primary_serdes_lane = 3; + else + port_topology->primary_serdes_lane = 5; + break; + case 4: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 2; + break; + case 5: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 6; + break; + case 6: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 3; + break; + case 7: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 7; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology, + u8 lport, bool is_muxed) +{ + switch (lport) { + case 0: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 0; + port_topology->primary_serdes_lane = 0; + break; + case 1: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 0; + if (is_muxed) + port_topology->primary_serdes_lane = 2; + else + port_topology->primary_serdes_lane = 4; + break; + case 2: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 1; + port_topology->primary_serdes_lane = 1; + break; + case 3: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 1; + if (is_muxed) + port_topology->primary_serdes_lane = 3; + else + port_topology->primary_serdes_lane = 5; + break; + case 4: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 2; + break; + case 5: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 6; + break; + case 6: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 3; + break; + case 7: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 7; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * ice_get_port_topology - returns physical topology like pcsquad, pcsport, + * serdes number + * @hw: pointer to the HW struct + * @lport: logical port for which physical info requested + * @port_topology: buffer to hold port topology + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_port_topology(struct ice_hw *hw, u8 lport, + struct ice_port_topology *port_topology) +{ + struct ice_aqc_get_link_topo cmd = {}; + u16 node_handle = 0; + u8 cage_type = 0; + bool is_muxed; + int err; + u8 ctx; + + ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S; + ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; + cmd.addr.topo_params.node_type_ctx = ctx; + + err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle); + if (err) + return -EINVAL; + + is_muxed = ice_is_serdes_muxed(hw); + + if (cage_type == 0x11 || /* SFP+ */ + cage_type == 0x12) { /* SFP28 */ + port_topology->serdes_lane_count = 1; + err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed); + if (err) + return err; + } else if (cage_type == 0x13 || /* QSFP */ + cage_type == 0x14) { /* QSFP28 */ + u8 max_speed = 0; + + err = ice_ethtool_get_maxspeed(hw, lport, &max_speed); + if (err) + return err; + + if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G) + port_topology->serdes_lane_count = 4; + else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G) + port_topology->serdes_lane_count = 2; + else + port_topology->serdes_lane_count = 1; + + err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed); + if (err) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +/** + * ice_get_tx_rx_equa - read serdes tx rx equaliser param + * @hw: pointer to the HW struct + * @serdes_num: represents the serdes number + * @ptr: structure to read all serdes parameter for given serdes + * + * Return: all serdes equalization parameter supported per serdes number + */ +static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, + struct ice_serdes_equalization_to_ethtool *ptr) +{ + static const int tx = ICE_AQC_OP_CODE_TX_EQU; + static const int rx = ICE_AQC_OP_CODE_RX_EQU; + struct { + int data_in; + int opcode; + int *out; + } aq_params[] = { + { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 }, + { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 }, + { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten }, + { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 }, + { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 }, + { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 }, + { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 }, + { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 }, + { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf }, + { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf }, + { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf }, + { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf }, + { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc }, + { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw }, + { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain }, + { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 }, + { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 }, + { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 }, + { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 }, + { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 }, + { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 }, + { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 }, + { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 }, + { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 }, + { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 }, + { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 }, + { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 }, + }; + int err; + + for (int i = 0; i < ARRAY_SIZE(aq_params); i++) { + err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in, + aq_params[i].opcode, + serdes_num, aq_params[i].out); + if (err) + break; + } + + return err; +} + +/** + * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per + * pcsquad, pcsport + * @netdev: pointer to net device structure + * @p: output buffer to fill requested register dump + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_extended_regs(struct net_device *netdev, void *p) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_regdump_to_ethtool *ice_prv_regs_buf; + struct ice_port_topology port_topology = {}; + struct ice_port_info *pi; + struct ice_pf *pf; + struct ice_hw *hw; + unsigned int i; + int err; + + pf = np->vsi->back; + hw = &pf->hw; + pi = np->vsi->port_info; + + /* Serdes parameters are not supported if not the PF VSI */ + if (np->vsi->type != ICE_VSI_PF || !pi) + return -EINVAL; + + err = ice_get_port_topology(hw, pi->lport, &port_topology); + if (err) + return -EINVAL; + if (port_topology.serdes_lane_count > 4) + return -EINVAL; + + ice_prv_regs_buf = p; + + /* Get serdes equalization parameter for available serdes */ + for (i = 0; i < port_topology.serdes_lane_count; i++) { + u8 serdes_num = 0; + + serdes_num = port_topology.primary_serdes_lane + i; + err = ice_get_tx_rx_equa(hw, serdes_num, + &ice_prv_regs_buf->equalization[i]); + if (err) + return -EINVAL; + } + + return 0; } static void @@ -475,10 +799,12 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) u32 *regs_buf = (u32 *)p; unsigned int i; - regs->version = 1; + regs->version = 2; for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); + + ice_get_extended_regs(netdev, (void *)®s_buf[i]); } static u32 ice_get_msglevel(struct net_device *netdev) @@ -1492,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) /* Remove rule to direct LLDP packets to default VSI. * The FW LLDP engine will now be consuming them. */ - ice_cfg_sw_lldp(vsi, false, false); + ice_cfg_sw_rx_lldp(vsi->back, false); /* AQ command to start FW LLDP agent will return an * error if the agent is already started @@ -3434,7 +3760,7 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, } static int -ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { struct ice_pf *pf = ice_netdev_to_pf(dev); @@ -3443,8 +3769,6 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) return ethtool_op_get_ts_info(dev, info); info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | - SOF_TIMESTAMPING_RX_SOFTWARE | - SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; @@ -3464,8 +3788,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) */ static int ice_get_max_txq(struct ice_pf *pf) { - return min3(pf->num_lan_msix, (u16)num_online_cpus(), - (u16)pf->hw.func_caps.common_cap.num_txq); + return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq); } /** @@ -3474,8 +3797,7 @@ static int ice_get_max_txq(struct ice_pf *pf) */ static int ice_get_max_rxq(struct ice_pf *pf) { - return min3(pf->num_lan_msix, (u16)num_online_cpus(), - (u16)pf->hw.func_caps.common_cap.num_rxq); + return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq); } /** @@ -3493,8 +3815,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, q_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; - if (q_vector->rx.rx_ring && q_vector->tx.tx_ring) - combined++; + combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx); } return combined; @@ -3593,7 +3914,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) struct ice_pf *pf = vsi->back; int new_rx = 0, new_tx = 0; bool locked = false; - u32 curr_combined; int ret = 0; /* do not support changing channels in Safe Mode */ @@ -3615,22 +3935,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EOPNOTSUPP; } - curr_combined = ice_get_combined_cnt(vsi); - - /* these checks are for cases where user didn't specify a particular - * value on cmd line but we get non-zero value anyway via - * get_channels(); look at ethtool.c in ethtool repository (the user - * space part), particularly, do_schannels() routine - */ - if (ch->rx_count == vsi->num_rxq - curr_combined) - ch->rx_count = 0; - if (ch->tx_count == vsi->num_txq - curr_combined) - ch->tx_count = 0; - if (ch->combined_count == curr_combined) - ch->combined_count = 0; - - if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) { - netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n"); + if (ch->rx_count && ch->tx_count) { + netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n"); return -EINVAL; } @@ -3658,11 +3964,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) return -EINVAL; } - if (pf->adev) { + if (pf->cdev_info && pf->cdev_info->adev) { mutex_lock(&pf->adev_mutex); - device_lock(&pf->adev->dev); + device_lock(&pf->cdev_info->adev->dev); locked = true; - if (pf->adev->dev.driver) { + if (pf->cdev_info->adev->dev.driver) { netdev_err(dev, "Cannot change channels when RDMA is active\n"); ret = -EBUSY; goto adev_unlock; @@ -3681,7 +3987,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) adev_unlock: if (locked) { - device_unlock(&pf->adev->dev); + device_unlock(&pf->cdev_info->adev->dev); mutex_unlock(&pf->adev_mutex); } return ret; @@ -4080,7 +4386,7 @@ ice_repr_get_drvinfo(struct net_device *netdev, { struct ice_repr *repr = ice_netdev_to_repr(netdev); - if (ice_check_vf_ready_for_cfg(repr->vf)) + if (repr->ops.ready(repr)) return; __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi); @@ -4092,8 +4398,7 @@ ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data) struct ice_repr *repr = ice_netdev_to_repr(netdev); /* for port representors only ETH_SS_STATS is supported */ - if (ice_check_vf_ready_for_cfg(repr->vf) || - stringset != ETH_SS_STATS) + if (repr->ops.ready(repr) || stringset != ETH_SS_STATS) return; __ice_get_strings(netdev, stringset, data, repr->src_vsi); @@ -4106,7 +4411,7 @@ ice_repr_get_ethtool_stats(struct net_device *netdev, { struct ice_repr *repr = ice_netdev_to_repr(netdev); - if (ice_check_vf_ready_for_cfg(repr->vf)) + if (repr->ops.ready(repr)) return; __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi); @@ -4297,14 +4602,179 @@ ice_get_module_eeprom(struct net_device *netdev, return 0; } +/** + * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per + * pcsquad, pcsport + * @hw: pointer to the HW struct + * @pcs_quad: pcsquad for input port + * @pcs_port: pcsport for input port + * @fec_stats: buffer to hold FEC statistics for given port + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + struct ethtool_fec_stats *fec_stats) +{ + u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0; + u32 fec_corr_low_val = 0, fec_corr_high_val = 0; + int err; + + if (pcs_quad > 1 || pcs_port > 3) + return -EINVAL; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW, + &fec_corr_low_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH, + &fec_corr_high_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, + ICE_FEC_UNCORR_LOW, + &fec_uncorr_low_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, + ICE_FEC_UNCORR_HIGH, + &fec_uncorr_high_val); + if (err) + return err; + + fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) + + fec_corr_low_val; + fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) + + fec_uncorr_low_val; + return 0; +} + +/** + * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev + * @netdev: network interface device structure + * @fec_stats: buffer to hold FEC statistics for given port + * + */ +static void ice_get_fec_stats(struct net_device *netdev, + struct ethtool_fec_stats *fec_stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_topology port_topology; + struct ice_port_info *pi; + struct ice_pf *pf; + struct ice_hw *hw; + int err; + + pf = np->vsi->back; + hw = &pf->hw; + pi = np->vsi->port_info; + + /* Serdes parameters are not supported if not the PF VSI */ + if (np->vsi->type != ICE_VSI_PF || !pi) + return; + + err = ice_get_port_topology(hw, pi->lport, &port_topology); + if (err) { + netdev_info(netdev, "Extended register dump failed Lport %d\n", + pi->lport); + return; + } + + /* Get FEC correctable, uncorrectable counter */ + err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select, + port_topology.pcs_port, fec_stats); + if (err) + netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n", + pi->lport, err); +} + +#define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \ + ETH_RESET_FILTER | ETH_RESET_OFFLOAD) + +#define ICE_ETHTOOL_CORER ((ICE_ETHTOOL_PFR | ETH_RESET_RAM) << \ + ETH_RESET_SHARED_SHIFT) + +#define ICE_ETHTOOL_GLOBR (ICE_ETHTOOL_CORER | \ + (ETH_RESET_MAC << ETH_RESET_SHARED_SHIFT) | \ + (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)) + +#define ICE_ETHTOOL_VFR ICE_ETHTOOL_PFR + +/** + * ice_ethtool_reset - triggers a given type of reset + * @dev: network interface device structure + * @flags: set of reset flags + * + * Return: 0 on success, -EOPNOTSUPP when using unsupported set of flags. + */ +static int ice_ethtool_reset(struct net_device *dev, u32 *flags) +{ + struct ice_netdev_priv *np = netdev_priv(dev); + struct ice_pf *pf = np->vsi->back; + enum ice_reset_req reset; + + switch (*flags) { + case ICE_ETHTOOL_CORER: + reset = ICE_RESET_CORER; + break; + case ICE_ETHTOOL_GLOBR: + reset = ICE_RESET_GLOBR; + break; + case ICE_ETHTOOL_PFR: + reset = ICE_RESET_PFR; + break; + default: + netdev_info(dev, "Unsupported set of ethtool flags"); + return -EOPNOTSUPP; + } + + ice_schedule_reset(pf, reset); + + *flags = 0; + + return 0; +} + +/** + * ice_repr_ethtool_reset - triggers a VF reset + * @dev: network interface device structure + * @flags: set of reset flags + * + * Return: 0 on success, + * -EOPNOTSUPP when using unsupported set of flags + * -EBUSY when VF is not ready for reset. + */ +static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags) +{ + struct ice_repr *repr = ice_netdev_to_repr(dev); + struct ice_vf *vf; + + if (repr->type != ICE_REPR_TYPE_VF || + *flags != ICE_ETHTOOL_VFR) + return -EOPNOTSUPP; + + vf = repr->vf; + + if (ice_check_vf_ready_for_cfg(vf)) + return -EBUSY; + + *flags = 0; + + return ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK); +} + static const struct ethtool_ops ice_ethtool_ops = { .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE | ETHTOOL_COALESCE_RX_USECS_HIGH, - .cap_rss_sym_xor_supported = true, + .supported_input_xfrm = RXH_XFRM_SYM_XOR, + .rxfh_per_ctx_key = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, + .get_fec_stats = ice_get_fec_stats, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, @@ -4331,6 +4801,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .nway_reset = ice_nway_reset, .get_pauseparam = ice_get_pauseparam, .set_pauseparam = ice_set_pauseparam, + .reset = ice_ethtool_reset, .get_rxfh_key_size = ice_get_rxfh_key_size, .get_rxfh_indir_size = ice_get_rxfh_indir_size, .get_rxfh = ice_get_rxfh, @@ -4383,6 +4854,7 @@ static const struct ethtool_ops ice_ethtool_repr_ops = { .get_strings = ice_repr_get_strings, .get_ethtool_stats = ice_repr_get_ethtool_stats, .get_sset_count = ice_repr_get_sset_count, + .reset = ice_repr_ethtool_reset, }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index b88e3da06f13..23b2cfbc9684 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -9,6 +9,51 @@ struct ice_phy_type_to_ethtool { u8 link_mode; }; +struct ice_serdes_equalization_to_ethtool { + int rx_equ_pre2; + int rx_equ_pre1; + int rx_equ_post1; + int rx_equ_bflf; + int rx_equ_bfhf; + int rx_equ_ctle_gainhf; + int rx_equ_ctle_gainlf; + int rx_equ_ctle_gaindc; + int rx_equ_ctle_bw; + int rx_equ_dfe_gain; + int rx_equ_dfe_gain_2; + int rx_equ_dfe_2; + int rx_equ_dfe_3; + int rx_equ_dfe_4; + int rx_equ_dfe_5; + int rx_equ_dfe_6; + int rx_equ_dfe_7; + int rx_equ_dfe_8; + int rx_equ_dfe_9; + int rx_equ_dfe_10; + int rx_equ_dfe_11; + int rx_equ_dfe_12; + int tx_equ_pre1; + int tx_equ_pre3; + int tx_equ_atten; + int tx_equ_post1; + int tx_equ_pre2; +}; + +struct ice_regdump_to_ethtool { + /* A multilane port can have max 4 serdes */ + struct ice_serdes_equalization_to_ethtool equalization[4]; +}; + +/* Port topology from lport i.e. + * serdes mapping, pcsquad, macport, cage etc... + */ +struct ice_port_topology { + u16 pcs_port; + u16 primary_serdes_lane; + u16 serdes_lane_count; + u16 pcs_quad_select; +}; + /* Macro to make PHY type to Ethtool link mode table entry. * The index is the PHY type. */ diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 9a1a04f5f146..aceec184e89b 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = { static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) { switch (flow) { + case ICE_FLTR_PTYPE_NONF_ETH: + return ETHER_FLOW; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: return TCP_V4_FLOW; case ICE_FLTR_PTYPE_NONF_IPV4_UDP: @@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow) static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth) { switch (eth) { + case ETHER_FLOW: + return ICE_FLTR_PTYPE_NONF_ETH; case TCP_V4_FLOW: return ICE_FLTR_PTYPE_NONF_IPV4_TCP; case UDP_V4_FLOW: @@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd) memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); switch (fsp->flow_type) { + case ETHER_FLOW: + fsp->h_u.ether_spec = rule->eth; + fsp->m_u.ether_spec = rule->eth_mask; + break; case IPV4_USER_FLOW: fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; fsp->h_u.usr_ip4_spec.proto = 0; @@ -526,7 +534,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp, * * Returns the number of available flow director filters to this VSI */ -static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) +int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi) { u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx); u16 num_guar; @@ -1194,6 +1202,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg, } /** + * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule + * @dev: network interface device structure + * @fsp: pointer to ethtool Rx flow specification + * + * Return: true if vlan data is valid, false otherwise + */ +static bool ice_fdir_vlan_valid(struct device *dev, + struct ethtool_rx_flow_spec *fsp) +{ + if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype)) + return false; + + if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID) + return false; + + /* proto and vlan must have vlan-etype defined */ + if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci && + !fsp->m_ext.vlan_etype) { + dev_warn(dev, "Filter with proto and vlan require also vlan-etype"); + return false; + } + + return true; +} + +/** + * ice_set_ether_flow_seg - set address and protocol segments for ether flow + * @dev: network interface device structure + * @seg: flow segment for programming + * @eth_spec: mask data from ethtool + * + * Return: 0 on success and errno in case of error. + */ +static int ice_set_ether_flow_seg(struct device *dev, + struct ice_flow_seg_info *seg, + struct ethhdr *eth_spec) +{ + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH); + + /* empty rules are not valid */ + if (is_zero_ether_addr(eth_spec->h_source) && + is_zero_ether_addr(eth_spec->h_dest) && + !eth_spec->h_proto) + return -EINVAL; + + /* Ethertype */ + if (eth_spec->h_proto == htons(0xFFFF)) { + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } else if (eth_spec->h_proto) { + dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether"); + return -EOPNOTSUPP; + } + + /* Source MAC address */ + if (is_broadcast_ether_addr(eth_spec->h_source)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!is_zero_ether_addr(eth_spec->h_source)) + goto err_mask; + + /* Destination MAC address */ + if (is_broadcast_ether_addr(eth_spec->h_dest)) + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA, + ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + else if (!is_zero_ether_addr(eth_spec->h_dest)) + goto err_mask; + + return 0; + +err_mask: + dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether"); + return -EOPNOTSUPP; +} + +/** + * ice_set_fdir_vlan_seg - set vlan segments for ether flow + * @seg: flow segment for programming + * @ext_masks: masks for additional RX flow fields + * + * Return: 0 on success and errno in case of error. + */ +static int +ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg, + struct ethtool_flow_ext *ext_masks) +{ + ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN); + + if (ext_masks->vlan_etype) { + if (ext_masks->vlan_etype != htons(0xFFFF)) + return -EOPNOTSUPP; + + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + if (ext_masks->vlan_tci) { + if (ext_masks->vlan_tci != htons(0xFFFF)) + return -EOPNOTSUPP; + + ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, + ICE_FLOW_FLD_OFF_INVAL, false); + } + + return 0; +} + +/** * ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter * @pf: PF structure * @fsp: pointer to ethtool Rx flow specification @@ -1209,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, struct device *dev = ice_pf_to_dev(pf); enum ice_fltr_ptype fltr_idx; struct ice_hw *hw = &pf->hw; - bool perfect_filter; + bool perfect_filter = false; int ret; seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL); @@ -1262,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec, &perfect_filter); break; + case ETHER_FLOW: + ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec); + if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) { + if (!ice_fdir_vlan_valid(dev, fsp)) { + ret = -EINVAL; + break; + } + ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext); + } + break; default: ret = -EINVAL; } @@ -1471,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf) */ int ice_fdir_create_dflt_rules(struct ice_pf *pf) { + static const enum ice_fltr_ptype dflt_rules[] = { + ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP, + ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP, + }; int err; /* Create perfect TCP and UDP rules in hardware. */ - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP); - if (err) - return err; + for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) { + err = ice_create_init_fdir_rule(pf, dflt_rules[i]); - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP); - if (err) - return err; - - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP); - if (err) - return err; - - err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP); + if (err) + break; + } return err; } @@ -1696,11 +1827,12 @@ static int ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, struct ice_fdir_fltr *input) { - u16 dest_vsi, q_index = 0; + s16 q_index = ICE_FDIR_NO_QUEUE_IDX; u16 orig_q_index = 0; struct ice_pf *pf; struct ice_hw *hw; int flow_type; + u16 dest_vsi; u8 dest_ctl; if (!vsi || !fsp || !input) @@ -1823,6 +1955,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass; input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto; break; + case ETHER_FLOW: + input->eth = fsp->h_u.ether_spec; + input->eth_mask = fsp->m_u.ether_spec; + break; default: /* not doing un-parsed flow types */ return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c index 5840c3e04a5b..26b357c0ae15 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_fdir.c @@ -4,6 +4,8 @@ #include "ice_common.h" /* These are training packet headers used to program flow director filters. */ +static const u8 ice_fdir_eth_pkt[22]; + static const u8 ice_fdir_tcpv4_pkt[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00, @@ -417,6 +419,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = { /* Flow Director no-op training packet table */ static const struct ice_fdir_base_pkt ice_fdir_pkt[] = { { + ICE_FLTR_PTYPE_NONF_ETH, + sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt, + sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt, + }, + { ICE_FLTR_PTYPE_NONF_IPV4_TCP, sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt, sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt, @@ -914,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, * perspective. The input from user is from Rx filter perspective. */ switch (flow) { + case ICE_FLTR_PTYPE_NONF_ETH: + ice_pkt_insert_mac_addr(loc, input->eth.h_dest); + ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source); + if (input->ext_data.vlan_tag || input->ext_data.vlan_type) { + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET, + input->ext_data.vlan_type); + ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET, + input->ext_data.vlan_tag); + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET, + input->eth.h_proto); + } else { + ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET, + input->eth.h_proto); + } + break; case ICE_FLTR_PTYPE_NONF_IPV4_TCP: ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET, input->ip.v4.src_ip); @@ -1189,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b) * ice_fdir_comp_rules - compare 2 filters * @a: a Flow Director filter data structure * @b: a Flow Director filter data structure - * @v6: bool true if v6 filter * * Returns true if the filters match */ static bool -ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6) +ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b) { enum ice_fltr_ptype flow_type = a->flow_type; /* The calling function already checks that the two filters have the * same flow_type. */ - if (!v6) { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.dst_port == b->ip.v4.dst_port && - a->ip.v4.src_port == b->ip.v4.src_port) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) { - if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && - a->ip.v4.src_ip == b->ip.v4.src_ip && - a->ip.v4.l4_header == b->ip.v4.l4_header && - a->ip.v4.proto == b->ip.v4.proto && - a->ip.v4.ip_ver == b->ip.v4.ip_ver && - a->ip.v4.tos == b->ip.v4.tos) - return true; - } - } else { - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port && - !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, - b->ip.v6.dst_ip) && - !ice_cmp_ipv6_addr(a->ip.v6.src_ip, - b->ip.v6.src_ip)) - return true; - } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) { - if (a->ip.v6.dst_port == b->ip.v6.dst_port && - a->ip.v6.src_port == b->ip.v6.src_port) - return true; - } + switch (flow_type) { + case ICE_FLTR_PTYPE_NONF_ETH: + if (!memcmp(&a->eth, &b->eth, sizeof(a->eth))) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_TCP: + case ICE_FLTR_PTYPE_NONF_IPV4_UDP: + case ICE_FLTR_PTYPE_NONF_IPV4_SCTP: + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.dst_port == b->ip.v4.dst_port && + a->ip.v4.src_port == b->ip.v4.src_port) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV4_OTHER: + if (a->ip.v4.dst_ip == b->ip.v4.dst_ip && + a->ip.v4.src_ip == b->ip.v4.src_ip && + a->ip.v4.l4_header == b->ip.v4.l4_header && + a->ip.v4.proto == b->ip.v4.proto && + a->ip.v4.ip_ver == b->ip.v4.ip_ver && + a->ip.v4.tos == b->ip.v4.tos) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_UDP: + case ICE_FLTR_PTYPE_NONF_IPV6_TCP: + case ICE_FLTR_PTYPE_NONF_IPV6_SCTP: + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port && + !ice_cmp_ipv6_addr(a->ip.v6.dst_ip, + b->ip.v6.dst_ip) && + !ice_cmp_ipv6_addr(a->ip.v6.src_ip, + b->ip.v6.src_ip)) + return true; + break; + case ICE_FLTR_PTYPE_NONF_IPV6_OTHER: + if (a->ip.v6.dst_port == b->ip.v6.dst_port && + a->ip.v6.src_port == b->ip.v6.src_port) + return true; + break; + default: + break; } return false; @@ -1253,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input) bool ret = false; list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) { - enum ice_fltr_ptype flow_type; - if (rule->flow_type != input->flow_type) continue; - flow_type = input->flow_type; - if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP || - flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) - ret = ice_fdir_comp_rules(rule, input, false); - else - ret = ice_fdir_comp_rules(rule, input, true); + ret = ice_fdir_comp_rules(rule, input); if (ret) { if (rule->fltr_id == input->fltr_id && rule->q_index != input->q_index) diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h index 1b9b84490689..820023c0271f 100644 --- a/drivers/net/ethernet/intel/ice/ice_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_fdir.h @@ -8,6 +8,9 @@ #define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF) /* macros for offsets into packets for flow director programming */ +#define ICE_ETH_TYPE_F_OFFSET 12 +#define ICE_ETH_VLAN_TCI_OFFSET 14 +#define ICE_ETH_TYPE_VLAN_OFFSET 16 #define ICE_IPV4_SRC_ADDR_OFFSET 26 #define ICE_IPV4_DST_ADDR_OFFSET 30 #define ICE_IPV4_TCP_SRC_PORT_OFFSET 34 @@ -50,6 +53,8 @@ */ #define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20 +#define ICE_FDIR_NO_QUEUE_IDX -1 + enum ice_fltr_prgm_desc_dest { ICE_FLTR_PRGM_DESC_DEST_DROP_PKT, ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX, @@ -159,6 +164,8 @@ struct ice_fdir_fltr { struct list_head fltr_node; enum ice_fltr_ptype flow_type; + struct ethhdr eth, eth_mask; + union { struct ice_fdir_v4 v4; struct ice_fdir_v6 v6; @@ -181,7 +188,7 @@ struct ice_fdir_fltr { u16 flex_fltr; /* filter control */ - u16 q_index; + s16 q_index; u16 orig_q_index; u16 dest_vsi; u8 dest_ctl; @@ -202,6 +209,8 @@ struct ice_fdir_base_pkt { const u8 *tun_pkt; }; +struct ice_vsi; + int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id); int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id); int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr); @@ -213,6 +222,7 @@ int ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input, u8 *pkt, bool frag, bool tun); int ice_get_fdir_cnt_all(struct ice_hw *hw); +int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi); bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input); bool ice_fdir_has_frag(enum ice_fltr_ptype flow); struct ice_fdir_fltr * diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 20d5db88c99f..ed95072ca6e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -2981,6 +2981,50 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, } /** + * ice_disable_fd_swap - set register appropriately to disable FD SWAP + * @hw: pointer to the HW struct + * @prof_id: profile ID + */ +static void +ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id) +{ + u16 swap_val, fvw_num; + unsigned int i; + + swap_val = ICE_SWAP_VALID; + fvw_num = hw->blk[ICE_BLK_FD].es.fvw / ICE_FDIR_REG_SET_SIZE; + + /* Since the SWAP Flag in the Programming Desc doesn't work, + * here add method to disable the SWAP Option via setting + * certain SWAP and INSET register sets. + */ + for (i = 0; i < fvw_num ; i++) { + u32 raw_swap, raw_in; + unsigned int j; + + raw_swap = 0; + raw_in = 0; + + for (j = 0; j < ICE_FDIR_REG_SET_SIZE; j++) { + raw_swap |= (swap_val++) << (j * BITS_PER_BYTE); + raw_in |= ICE_INSET_DFLT << (j * BITS_PER_BYTE); + } + + /* write the FDIR swap register set */ + wr32(hw, GLQF_FDSWAP(prof_id, i), raw_swap); + + ice_debug(hw, ICE_DBG_INIT, "swap wr(%d, %d): 0x%x = 0x%08x\n", + prof_id, i, GLQF_FDSWAP(prof_id, i), raw_swap); + + /* write the FDIR inset register set */ + wr32(hw, GLQF_FDINSET(prof_id, i), raw_in); + + ice_debug(hw, ICE_DBG_INIT, "inset wr(%d, %d): 0x%x = 0x%08x\n", + prof_id, i, GLQF_FDINSET(prof_id, i), raw_in); + } +} + +/* * ice_add_prof - add profile * @hw: pointer to the HW struct * @blk: hardware block @@ -2991,6 +3035,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, * @es: extraction sequence (length of array is determined by the block) * @masks: mask for extraction sequence * @symm: symmetric setting for RSS profiles + * @fd_swap: enable/disable FDIR paired src/dst fields swap option * * This function registers a profile, which matches a set of PTYPES with a * particular extraction sequence. While the hardware profile is allocated @@ -3000,7 +3045,7 @@ ice_add_prof_attrib(struct ice_prof_map *prof, u8 ptg, u16 ptype, int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks, bool symm) + struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap) { u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE); DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT); @@ -3020,7 +3065,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], status = ice_alloc_prof_id(hw, blk, &prof_id); if (status) goto err_ice_add_prof; - if (blk == ICE_BLK_FD) { + if (blk == ICE_BLK_FD && fd_swap) { /* For Flow Director block, the extraction sequence may * need to be altered in the case where there are paired * fields that have no match. This is necessary because @@ -3031,6 +3076,8 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], status = ice_update_fd_swap(hw, prof_id, es); if (status) goto err_ice_add_prof; + } else if (blk == ICE_BLK_FD) { + ice_disable_fd_swap(hw, prof_id); } status = ice_update_prof_masking(hw, blk, prof_id, masks); if (status) @@ -4099,6 +4146,54 @@ err_ice_add_prof_id_flow: } /** + * ice_flow_assoc_fdir_prof - add an FDIR profile for main/ctrl VSI + * @hw: pointer to the HW struct + * @blk: HW block + * @dest_vsi: dest VSI + * @fdir_vsi: fdir programming VSI + * @hdl: profile handle + * + * Update the hardware tables to enable the FDIR profile indicated by @hdl for + * the VSI specified by @dest_vsi. On success, the flow will be enabled. + * + * Return: 0 on success or negative errno on failure. + */ +int +ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, + u16 dest_vsi, u16 fdir_vsi, u64 hdl) +{ + u16 vsi_num; + int status; + + if (blk != ICE_BLK_FD) + return -EINVAL; + + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); + status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for main VSI flow entry: %d\n", + status); + return status; + } + + vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi); + status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl); + if (status) { + ice_debug(hw, ICE_DBG_FLOW, "Adding HW profile failed for ctrl VSI flow entry: %d\n", + status); + goto err; + } + + return 0; + +err: + vsi_num = ice_get_hw_vsi_num(hw, dest_vsi); + ice_rem_prof_id_flow(hw, blk, vsi_num, hdl); + + return status; +} + +/** * ice_rem_prof_from_list - remove a profile from list * @hw: pointer to the HW struct * @lst: list to remove the profile from diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h index b39d7cdc381f..28b0897adf32 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h @@ -6,6 +6,8 @@ #include "ice_type.h" +#define ICE_FDIR_REG_SET_SIZE 4 + int ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_change_lock(struct ice_hw *hw); @@ -21,9 +23,6 @@ int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups, unsigned long *bm, struct list_head *fv_list); int -ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count); -u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld); -int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size, struct ice_sq_cd *cd); bool @@ -42,13 +41,16 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype); int ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[], const struct ice_ptype_attributes *attr, u16 attr_cnt, - struct ice_fv_word *es, u16 *masks, bool symm); + struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap); struct ice_prof_map * ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id); int ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); int ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl); +int +ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk, + u16 dest_vsi, u16 fdir_vsi, u64 hdl); enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len); enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len); diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h index d427a79d001a..817beca591e0 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_type.h +++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h @@ -93,6 +93,7 @@ enum ice_tunnel_type { TNL_GRETAP, TNL_GTPC, TNL_GTPU, + TNL_PFCP, __TNL_TYPE_CNT, TNL_LAST = 0xFF, TNL_ALL = 0xFF, @@ -358,7 +359,8 @@ enum ice_prof_type { ICE_PROF_TUN_GRE = 0x4, ICE_PROF_TUN_GTPU = 0x8, ICE_PROF_TUN_GTPC = 0x10, - ICE_PROF_TUN_ALL = 0x1E, + ICE_PROF_TUN_PFCP = 0x20, + ICE_PROF_TUN_ALL = 0x3E, ICE_PROF_ALL = 0xFF, }; diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c index fc2b58f56279..d97b751052f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.c +++ b/drivers/net/ethernet/intel/ice/ice_flow.c @@ -409,6 +409,29 @@ static const u32 ice_ptypes_gtpc_tid[] = { }; /* Packet types for GTPU */ +static const struct ice_ptype_attributes ice_attr_gtpu_session[] = { + { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV4_ICMP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV4_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_FRAG, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_UDP_PAY, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_TCP, ICE_PTYPE_ATTR_GTP_SESSION }, + { ICE_MAC_IPV6_GTPU_IPV6_ICMPV6, ICE_PTYPE_ATTR_GTP_SESSION }, +}; + static const struct ice_ptype_attributes ice_attr_gtpu_eh[] = { { ICE_MAC_IPV4_GTPU_IPV4_FRAG, ICE_PTYPE_ATTR_GTP_PDU_EH }, { ICE_MAC_IPV4_GTPU_IPV4_PAY, ICE_PTYPE_ATTR_GTP_PDU_EH }, @@ -1400,7 +1423,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk, /* Add a HW profile for this flow profile */ status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes, params->attr, params->attr_cnt, params->es, - params->mask, symm); + params->mask, symm, true); if (status) { ice_debug(hw, ICE_DBG_FLOW, "Error adding a HW flow profile\n"); goto out; @@ -1523,6 +1546,90 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk, return status; } +#define FLAG_GTP_EH_PDU_LINK BIT_ULL(13) +#define FLAG_GTP_EH_PDU BIT_ULL(14) + +#define HI_BYTE_IN_WORD GENMASK(15, 8) +#define LO_BYTE_IN_WORD GENMASK(7, 0) + +#define FLAG_GTPU_MSK \ + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) +#define FLAG_GTPU_UP \ + (FLAG_GTP_EH_PDU | FLAG_GTP_EH_PDU_LINK) +#define FLAG_GTPU_DW FLAG_GTP_EH_PDU + +/** + * ice_flow_set_parser_prof - Set flow profile based on the parsed profile info + * @hw: pointer to the HW struct + * @dest_vsi: dest VSI + * @fdir_vsi: fdir programming VSI + * @prof: stores parsed profile info from raw flow + * @blk: classification blk + * + * Return: 0 on success or negative errno on failure. + */ +int +ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, + struct ice_parser_profile *prof, enum ice_block blk) +{ + u64 id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX); + struct ice_flow_prof_params *params __free(kfree); + u8 fv_words = hw->blk[blk].es.fvw; + int status; + int i, idx; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) + return -ENOMEM; + + for (i = 0; i < ICE_MAX_FV_WORDS; i++) { + params->es[i].prot_id = ICE_PROT_INVALID; + params->es[i].off = ICE_FV_OFFSET_INVAL; + } + + for (i = 0; i < prof->fv_num; i++) { + if (hw->blk[blk].es.reverse) + idx = fv_words - i - 1; + else + idx = i; + params->es[idx].prot_id = prof->fv[i].proto_id; + params->es[idx].off = prof->fv[i].offset; + params->mask[idx] = (((prof->fv[i].msk) << BITS_PER_BYTE) & + HI_BYTE_IN_WORD) | + (((prof->fv[i].msk) >> BITS_PER_BYTE) & + LO_BYTE_IN_WORD); + } + + switch (prof->flags) { + case FLAG_GTPU_DW: + params->attr = ice_attr_gtpu_down; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_down); + break; + case FLAG_GTPU_UP: + params->attr = ice_attr_gtpu_up; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_up); + break; + default: + if (prof->flags_msk & FLAG_GTPU_MSK) { + params->attr = ice_attr_gtpu_session; + params->attr_cnt = ARRAY_SIZE(ice_attr_gtpu_session); + } + break; + } + + status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes, + params->attr, params->attr_cnt, + params->es, params->mask, false, false); + if (status) + return status; + + status = ice_flow_assoc_fdir_prof(hw, blk, dest_vsi, fdir_vsi, id); + if (status) + ice_rem_prof(hw, blk, id); + + return status; +} + /** * ice_flow_add_prof - Add a flow profile for packet segments and matched fields * @hw: pointer to the HW struct diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h index 2fd2e0cb483d..6cb7bb879c98 100644 --- a/drivers/net/ethernet/intel/ice/ice_flow.h +++ b/drivers/net/ethernet/intel/ice/ice_flow.h @@ -5,6 +5,7 @@ #define _ICE_FLOW_H_ #include "ice_flex_type.h" +#include "ice_parser.h" #define ICE_FLOW_ENTRY_HANDLE_INVAL 0 #define ICE_FLOW_FLD_OFF_INVAL 0xffff @@ -326,6 +327,7 @@ enum ice_rss_cfg_hdr_type { ICE_RSS_ANY_HEADERS }; +struct ice_vsi; struct ice_rss_hash_cfg { u32 addl_hdrs; /* protocol header fields */ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ @@ -445,6 +447,9 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, bool symm, struct ice_flow_prof **prof); int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); int +ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, + struct ice_parser_profile *prof, enum ice_block blk); +int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi, enum ice_flow_priority prio, void *data, u64 *entry_h); diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 319a2d6fe26c..70c201f569ce 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -1,11 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2018-2019, Intel Corporation. */ -#include <asm/unaligned.h> +#include <linux/unaligned.h> #include <linux/uuid.h> #include <linux/crc32.h> #include <linux/pldmfw.h> #include "ice.h" +#include "ice_lib.h" #include "ice_fw_update.h" struct ice_fwu_priv { @@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code, case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED: dev_info(dev, "firmware has rejected updating %s\n", component); break; + case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK: + if (ice_is_recovery_mode(&pf->hw)) + return 0; + break; } switch (code) { @@ -286,10 +291,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon * * Returns: zero on success, or a negative error code on failure. */ -static int -ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, - u16 block_size, u8 *block, bool last_cmd, - u8 *reset_level, struct netlink_ext_ack *extack) +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack) { u16 completion_module, completion_retval; struct device *dev = ice_pf_to_dev(pf); @@ -1005,13 +1009,20 @@ int ice_devlink_flash_update(struct devlink *devlink, return -EOPNOTSUPP; } - if (!hw->dev_caps.common_cap.nvm_unified_update) { + if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) { NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update"); return -EOPNOTSUPP; } memset(&priv, 0, sizeof(priv)); + if (params->component && strcmp(params->component, "fw.mgmt") == 0) { + priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT; + priv.context.component_identifier = NVM_COMP_ID_NVM; + } else if (params->component) { + return -EOPNOTSUPP; + } + /* the E822 device needs a slightly different ops */ if (hw->mac_type == ICE_MAC_GENERIC) priv.context.ops = &ice_fwu_ops_e822; diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h index 750574885716..04b200462757 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.h +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h @@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink, struct netlink_ext_ack *extack); int ice_get_pending_updates(struct ice_pf *pf, u8 *pending, struct netlink_ext_ack *extack); +int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset, + u16 block_size, u8 *block, bool last_cmd, + u8 *reset_level, struct netlink_ext_ack *extack); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index c8ea1af51ad3..6b26290452d4 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf) pf->gnss_serial = gnss; kthread_init_delayed_work(&gnss->read_work, ice_gnss_read); - kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev)); + kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev)); if (IS_ERR(kworker)) { kfree(gnss); return NULL; @@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf) } /** - * ice_gnss_is_gps_present - Check if GPS HW is present + * ice_gnss_is_module_present - Check if GNSS HW is present * @hw: pointer to HW struct + * + * Return: true when GNSS is present, false otherwise. */ -bool ice_gnss_is_gps_present(struct ice_hw *hw) +bool ice_gnss_is_module_present(struct ice_hw *hw) { - if (!hw->func_caps.ts_func_info.src_tmr_owned) - return false; + int err; + u8 data; - if (!ice_is_gps_in_netlist(hw)) + if (!hw->func_caps.ts_func_info.src_tmr_owned || + !ice_is_gps_in_netlist(hw)) return false; -#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) - if (ice_is_e810t(hw)) { - int err; - u8 data; - - err = ice_read_pca9575_reg_e810t(hw, ICE_PCA9575_P0_IN, &data); - if (err || !!(data & ICE_E810T_P0_GNSS_PRSNT_N)) - return false; - } else { - return false; - } -#else - if (!ice_is_e810t(hw)) + err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data); + if (err || !!(data & ICE_P0_GNSS_PRSNT_N)) return false; -#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ return true; } diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index 75e567ad7059..15daf603ed7b 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -37,11 +37,11 @@ struct gnss_serial { #if IS_ENABLED(CONFIG_GNSS) void ice_gnss_init(struct ice_pf *pf); void ice_gnss_exit(struct ice_pf *pf); -bool ice_gnss_is_gps_present(struct ice_hw *hw); +bool ice_gnss_is_module_present(struct ice_hw *hw); #else static inline void ice_gnss_init(struct ice_pf *pf) { } static inline void ice_gnss_exit(struct ice_pf *pf) { } -static inline bool ice_gnss_is_gps_present(struct ice_hw *hw) +static inline bool ice_gnss_is_module_present(struct ice_hw *hw) { return false; } diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index cfac1d432c15..aa4bfbcf85d2 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -6,6 +6,14 @@ #ifndef _ICE_HW_AUTOGEN_H_ #define _ICE_HW_AUTOGEN_H_ +#define GLCOMM_QUANTA_PROF(_i) (0x002D2D68 + ((_i) * 4)) +#define GLCOMM_QUANTA_PROF_MAX_INDEX 15 +#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_S 0 +#define GLCOMM_QUANTA_PROF_QUANTA_SIZE_M ICE_M(0x3FFF, 0) +#define GLCOMM_QUANTA_PROF_MAX_CMD_S 16 +#define GLCOMM_QUANTA_PROF_MAX_CMD_M ICE_M(0xFF, 16) +#define GLCOMM_QUANTA_PROF_MAX_DESC_S 24 +#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24) #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) #define QTX_COMM_HEAD_HEAD_S 0 @@ -157,6 +165,8 @@ #define GLGEN_RTRIG_CORER_M BIT(0) #define GLGEN_RTRIG_GLOBR_M BIT(1) #define GLGEN_STAT 0x000B612C +#define GLGEN_SWITCH_MODE_CONFIG 0x000B81E0 +#define GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M BIT(2) #define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) #define PFGEN_CTRL 0x00091000 #define PFGEN_CTRL_PFSWR_M BIT(0) @@ -177,6 +187,8 @@ #define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24) #define GLINT_CTL_ITR_GRAN_25_S 28 #define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28) +#define GLGEN_MAC_LINK_TOPO 0x000B81DC +#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M GENMASK(1, 0) #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) #define GLINT_DYN_CTL_INTENA_M BIT(0) #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) @@ -529,11 +541,26 @@ #define PFPM_WUS_MAG_M BIT(1) #define PFPM_WUS_MNG_M BIT(3) #define PFPM_WUS_FW_RST_WK_M BIT(31) +#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020 +#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000 #define E830_PRTMAC_CL01_PS_QNT 0x001E32A0 #define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0) #define E830_PRTMAC_CL01_QNT_THR 0x001E3320 #define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0) +#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32)) +#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32)) +#define E830_GLPTM_ART_CTL 0x00088B50 +#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0) +#define E830_GLPTM_ART_TIME_H 0x00088B54 +#define E830_GLPTM_ART_TIME_L 0x00088B58 +#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4)) +#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4)) +#define E830_PFPTM_SEM 0x00088B00 +#define E830_PFPTM_SEM_BUSY_M BIT(0) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN_CLEARPBA_M BIT(1) +#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 +#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4) +#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4) #endif /* _ICE_HW_AUTOGEN_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c index e4c2c1bff6c0..b7aa6812510a 100644 --- a/drivers/net/ethernet/intel/ice/ice_hwmon.c +++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c @@ -96,7 +96,7 @@ static bool ice_is_internal_reading_supported(struct ice_pf *pf) unsigned long sensors = pf->hw.dev_caps.supported_sensors; - return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors); + return test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors); }; void ice_hwmon_init(struct ice_pf *pf) diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c index 145b27f2a4ce..6ab53e430f91 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc.c +++ b/drivers/net/ethernet/intel/ice/ice_idc.c @@ -9,22 +9,25 @@ static DEFINE_XARRAY_ALLOC1(ice_aux_id); /** - * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct - * @pf: pointer to PF struct + * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct + * @cdev: pointer to iidc_rdma_core_dev_info struct * * This function has to be called with a device_lock on the - * pf->adev.dev to avoid race conditions. + * cdev->adev.dev to avoid race conditions. + * + * Return: pointer to the matched auxiliary driver struct */ -static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) +static struct iidc_rdma_core_auxiliary_drv * +ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev) { struct auxiliary_device *adev; - adev = pf->adev; + adev = cdev->adev; if (!adev || !adev->dev.driver) return NULL; - return container_of(adev->dev.driver, struct iidc_auxiliary_drv, - adrv.driver); + return container_of(adev->dev.driver, + struct iidc_rdma_core_auxiliary_drv, adrv.driver); } /** @@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf) * @pf: pointer to PF struct * @event: event struct */ -void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event) +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event) { - struct iidc_auxiliary_drv *iadrv; + struct iidc_rdma_core_auxiliary_drv *iadrv; + struct iidc_rdma_core_dev_info *cdev; if (WARN_ON_ONCE(!in_task())) return; + cdev = pf->cdev_info; + if (!cdev) + return; + mutex_lock(&pf->adev_mutex); - if (!pf->adev) + if (!cdev->adev) goto finish; - device_lock(&pf->adev->dev); - iadrv = ice_get_auxiliary_drv(pf); + device_lock(&cdev->adev->dev); + iadrv = ice_get_auxiliary_drv(cdev); if (iadrv && iadrv->event_handler) - iadrv->event_handler(pf, event); - device_unlock(&pf->adev->dev); + iadrv->event_handler(cdev, event); + device_unlock(&cdev->adev->dev); finish: mutex_unlock(&pf->adev_mutex); } /** * ice_add_rdma_qset - Add Leaf Node for RDMA Qset - * @pf: PF struct + * @cdev: pointer to iidc_rdma_core_dev_info struct * @qset: Resource to be allocated + * + * Return: Zero on success or error code encountered */ -int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev, + struct iidc_rdma_qset_params *qset) { u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS]; struct ice_vsi *vsi; struct device *dev; + struct ice_pf *pf; u32 qset_teid; u16 qs_handle; int status; int i; - if (WARN_ON(!pf || !qset)) + if (WARN_ON(!cdev || !qset)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); dev = ice_pf_to_dev(pf); if (!ice_is_rdma_ena(pf)) @@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) dev_err(dev, "Failed VSI RDMA Qset enable\n"); return status; } - vsi->qset_handle[qset->tc] = qset->qs_handle; qset->teid = qset_teid; return 0; @@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset); /** * ice_del_rdma_qset - Delete leaf node for RDMA Qset - * @pf: PF struct + * @cdev: pointer to iidc_rdma_core_dev_info struct * @qset: Resource to be freed + * + * Return: Zero on success, error code on failure */ -int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) +int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev, + struct iidc_rdma_qset_params *qset) { struct ice_vsi *vsi; + struct ice_pf *pf; u32 teid; u16 q_id; - if (WARN_ON(!pf || !qset)) + if (WARN_ON(!cdev || !qset)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); vsi = ice_find_vsi(pf, qset->vport_id); if (!vsi) { dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n"); @@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset) q_id = qset->qs_handle; teid = qset->teid; - vsi->qset_handle[qset->tc] = 0; - return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); } EXPORT_SYMBOL_GPL(ice_del_rdma_qset); /** * ice_rdma_request_reset - accept request from RDMA to perform a reset - * @pf: struct for PF + * @cdev: pointer to iidc_rdma_core_dev_info struct * @reset_type: type of reset + * + * Return: Zero on success, error code on failure */ -int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type) +int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev, + enum iidc_rdma_reset_type reset_type) { enum ice_reset_req reset; + struct ice_pf *pf; - if (WARN_ON(!pf)) + if (WARN_ON(!cdev)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); + switch (reset_type) { - case IIDC_PFR: + case IIDC_FUNC_RESET: reset = ICE_RESET_PFR; break; - case IIDC_CORER: + case IIDC_DEV_RESET: reset = ICE_RESET_CORER; break; - case IIDC_GLOBR: - reset = ICE_RESET_GLOBR; - break; default: - dev_err(ice_pf_to_dev(pf), "incorrect reset request\n"); return -EINVAL; } @@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset); /** * ice_rdma_update_vsi_filter - update main VSI filters for RDMA - * @pf: pointer to struct for PF + * @cdev: pointer to iidc_rdma_core_dev_info struct * @vsi_id: VSI HW idx to update filter on * @enable: bool whether to enable or disable filters + * + * Return: Zero on success, error code on failure */ -int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) +int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev, + u16 vsi_id, bool enable) { struct ice_vsi *vsi; + struct ice_pf *pf; int status; - if (WARN_ON(!pf)) + if (WARN_ON(!cdev)) return -EINVAL; + pf = pci_get_drvdata(cdev->pdev); vsi = ice_find_vsi(pf, vsi_id); if (!vsi) return -EINVAL; @@ -201,88 +223,54 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable) EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter); /** - * ice_get_qos_params - parse QoS params for RDMA consumption - * @pf: pointer to PF struct - * @qos: set of QoS values - */ -void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos) -{ - struct ice_dcbx_cfg *dcbx_cfg; - unsigned int i; - u32 up2tc; - - dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; - up2tc = rd32(&pf->hw, PRTDCB_TUP2TC); - - qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg); - for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++) - qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7; - - for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) - qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i]; - - qos->pfc_mode = dcbx_cfg->pfc_mode; - if (qos->pfc_mode == IIDC_DSCP_PFC_MODE) - for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++) - qos->dscp_map[i] = dcbx_cfg->dscp_map[i]; -} -EXPORT_SYMBOL_GPL(ice_get_qos_params); - -/** - * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver - * @pf: board private structure to initialize + * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver + * @cdev: pointer to iidc_rdma_core_dev_info struct + * @entry: MSI-X entry to be removed + * + * Return: Zero on success, error code on failure */ -static int ice_alloc_rdma_qvectors(struct ice_pf *pf) +int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, + struct msix_entry *entry) { - if (ice_is_rdma_ena(pf)) { - int i; - - pf->msix_entries = kcalloc(pf->num_rdma_msix, - sizeof(*pf->msix_entries), - GFP_KERNEL); - if (!pf->msix_entries) - return -ENOMEM; + struct msi_map map; + struct ice_pf *pf; - /* RDMA is the only user of pf->msix_entries array */ - pf->rdma_base_vector = 0; + if (WARN_ON(!cdev)) + return -EINVAL; - for (i = 0; i < pf->num_rdma_msix; i++) { - struct msix_entry *entry = &pf->msix_entries[i]; - struct msi_map map; + pf = pci_get_drvdata(cdev->pdev); + map = ice_alloc_irq(pf, true); + if (map.index < 0) + return -ENOMEM; - map = ice_alloc_irq(pf, false); - if (map.index < 0) - break; + entry->entry = map.index; + entry->vector = map.virq; - entry->entry = map.index; - entry->vector = map.virq; - } - } return 0; } +EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector); /** * ice_free_rdma_qvector - free vector resources reserved for RDMA driver - * @pf: board private structure to initialize + * @cdev: pointer to iidc_rdma_core_dev_info struct + * @entry: MSI-X entry to be removed */ -static void ice_free_rdma_qvector(struct ice_pf *pf) +void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev, + struct msix_entry *entry) { - int i; + struct msi_map map; + struct ice_pf *pf; - if (!pf->msix_entries) + if (WARN_ON(!cdev || !entry)) return; - for (i = 0; i < pf->num_rdma_msix; i++) { - struct msi_map map; - - map.index = pf->msix_entries[i].entry; - map.virq = pf->msix_entries[i].vector; - ice_free_irq(pf, map); - } + pf = pci_get_drvdata(cdev->pdev); - kfree(pf->msix_entries); - pf->msix_entries = NULL; + map.index = entry->entry; + map.virq = entry->vector; + ice_free_irq(pf, map); } +EXPORT_SYMBOL_GPL(ice_free_rdma_qvector); /** * ice_adev_release - function to be mapped to AUX dev's release op @@ -290,19 +278,23 @@ static void ice_free_rdma_qvector(struct ice_pf *pf) */ static void ice_adev_release(struct device *dev) { - struct iidc_auxiliary_dev *iadev; + struct iidc_rdma_core_auxiliary_dev *iadev; - iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev); + iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev, + adev.dev); kfree(iadev); } /** * ice_plug_aux_dev - allocate and register AUX device * @pf: pointer to pf struct + * + * Return: Zero on success, error code on failure */ int ice_plug_aux_dev(struct ice_pf *pf) { - struct iidc_auxiliary_dev *iadev; + struct iidc_rdma_core_auxiliary_dev *iadev; + struct iidc_rdma_core_dev_info *cdev; struct auxiliary_device *adev; int ret; @@ -312,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf) if (!ice_is_rdma_ena(pf)) return 0; + cdev = pf->cdev_info; + if (!cdev) + return -ENODEV; + iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) return -ENOMEM; adev = &iadev->adev; - iadev->pf = pf; + iadev->cdev_info = cdev; adev->id = pf->aux_idx; adev->dev.release = ice_adev_release; adev->dev.parent = &pf->pdev->dev; - adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp"; + adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ? + "roce" : "iwarp"; ret = auxiliary_device_init(adev); if (ret) { @@ -337,7 +334,7 @@ int ice_plug_aux_dev(struct ice_pf *pf) } mutex_lock(&pf->adev_mutex); - pf->adev = adev; + cdev->adev = adev; mutex_unlock(&pf->adev_mutex); return 0; @@ -351,8 +348,8 @@ void ice_unplug_aux_dev(struct ice_pf *pf) struct auxiliary_device *adev; mutex_lock(&pf->adev_mutex); - adev = pf->adev; - pf->adev = NULL; + adev = pf->cdev_info->adev; + pf->cdev_info->adev = NULL; mutex_unlock(&pf->adev_mutex); if (adev) { @@ -367,7 +364,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf) */ int ice_init_rdma(struct ice_pf *pf) { + struct iidc_rdma_priv_dev_info *privd; struct device *dev = &pf->pdev->dev; + struct iidc_rdma_core_dev_info *cdev; int ret; if (!ice_is_rdma_ena(pf)) { @@ -375,30 +374,50 @@ int ice_init_rdma(struct ice_pf *pf) return 0; } + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return -ENOMEM; + + pf->cdev_info = cdev; + + privd = kzalloc(sizeof(*privd), GFP_KERNEL); + if (!privd) { + ret = -ENOMEM; + goto err_privd_alloc; + } + + privd->pf_id = pf->hw.pf_id; ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX), GFP_KERNEL); if (ret) { dev_err(dev, "Failed to allocate device ID for AUX driver\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_alloc_xa; } - /* Reserve vector resources */ - ret = ice_alloc_rdma_qvectors(pf); - if (ret < 0) { - dev_err(dev, "failed to reserve vectors for RDMA\n"); - goto err_reserve_rdma_qvector; - } - pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2; + cdev->iidc_priv = privd; + privd->netdev = pf->vsi[0]->netdev; + + privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr; + cdev->pdev = pf->pdev; + privd->vport_id = pf->vsi[0]->vsi_num; + + pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2; + ice_setup_dcb_qos_info(pf, &privd->qos_info); ret = ice_plug_aux_dev(pf); if (ret) goto err_plug_aux_dev; return 0; err_plug_aux_dev: - ice_free_rdma_qvector(pf); -err_reserve_rdma_qvector: - pf->adev = NULL; + pf->cdev_info->adev = NULL; xa_erase(&ice_aux_id, pf->aux_idx); +err_alloc_xa: + kfree(privd); +err_privd_alloc: + kfree(cdev); + pf->cdev_info = NULL; + return ret; } @@ -412,6 +431,8 @@ void ice_deinit_rdma(struct ice_pf *pf) return; ice_unplug_aux_dev(pf); - ice_free_rdma_qvector(pf); xa_erase(&ice_aux_id, pf->aux_idx); + kfree(pf->cdev_info->iidc_priv); + kfree(pf->cdev_info); + pf->cdev_info = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h index 4b0c86757df9..17dbfcfb6a2a 100644 --- a/drivers/net/ethernet/intel/ice/ice_idc_int.h +++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h @@ -4,10 +4,11 @@ #ifndef _ICE_IDC_INT_H_ #define _ICE_IDC_INT_H_ -#include <linux/net/intel/iidc.h> +#include <linux/net/intel/iidc_rdma.h> +#include <linux/net/intel/iidc_rdma_ice.h> struct ice_pf; -void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event); +void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event); #endif /* !_ICE_IDC_INT_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c index ad82ff7d1995..30801fd375f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.c +++ b/drivers/net/ethernet/intel/ice/ice_irq.c @@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors, xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC); } +static int +ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries) +{ + pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL); + if (!pf->virt_irq_tracker.bm) + return -ENOMEM; + + pf->virt_irq_tracker.num_entries = num_entries; + pf->virt_irq_tracker.base = base; + + return 0; +} + /** * ice_deinit_irq_tracker - free xarray tracker * @pf: board private structure @@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf) xa_destroy(&pf->irq_tracker.entries); } +static void ice_deinit_virt_irq_tracker(struct ice_pf *pf) +{ + bitmap_free(pf->virt_irq_tracker.bm); +} + /** * ice_free_irq_res - free a block of resources * @pf: board private structure @@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) /** * ice_get_irq_res - get an interrupt resource * @pf: board private structure - * @dyn_only: force entry to be dynamically allocated + * @dyn_allowed: allow entry to be dynamically allocated * * Allocate new irq entry in the free slot of the tracker. Since xarray * is used, always allocate new entry at the lowest possible index. Set @@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index) * * Returns allocated irq entry or NULL on failure. */ -static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) +static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, + bool dyn_allowed) { - struct xa_limit limit = { .max = pf->irq_tracker.num_entries, + struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1, .min = 0 }; - unsigned int num_static = pf->irq_tracker.num_static; + unsigned int num_static = pf->irq_tracker.num_static - 1; struct ice_irq_entry *entry; unsigned int index; int ret; @@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) if (!entry) return NULL; - /* skip preallocated entries if the caller says so */ - if (dyn_only) - limit.min = num_static; + /* only already allocated if the caller says so */ + if (!dyn_allowed) + limit.max = num_static; ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit, GFP_KERNEL); @@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only) entry = NULL; } else { entry->index = index; - entry->dynamic = index >= num_static; + entry->dynamic = index > num_static; } return entry; } -/** - * ice_reduce_msix_usage - Reduce usage of MSI-X vectors - * @pf: board private structure - * @v_remain: number of remaining MSI-X vectors to be distributed - * - * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled. - * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of - * remaining vectors. - */ -static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain) -{ - int v_rdma; - - if (!ice_is_rdma_ena(pf)) { - pf->num_lan_msix = v_remain; - return; - } - - /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */ - v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) { - dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n"); - clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); - - pf->num_rdma_msix = 0; - pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; - } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) || - (v_remain - v_rdma < v_rdma)) { - /* Support minimum RDMA and give remaining vectors to LAN MSIX - */ - pf->num_rdma_msix = ICE_MIN_RDMA_MSIX; - pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX; - } else { - /* Split remaining MSIX with RDMA after accounting for AEQ MSIX - */ - pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 + - ICE_RDMA_NUM_AEQ_MSIX; - pf->num_lan_msix = v_remain - pf->num_rdma_msix; - } -} - -/** - * ice_ena_msix_range - Request a range of MSIX vectors from the OS - * @pf: board private structure - * - * Compute the number of MSIX vectors wanted and request from the OS. Adjust - * device usage if there are not enough vectors. Return the number of vectors - * reserved or negative on failure. - */ -static int ice_ena_msix_range(struct ice_pf *pf) +#define ICE_RDMA_AEQ_MSIX 1 +static int ice_get_default_msix_amount(struct ice_pf *pf) { - int num_cpus, hw_num_msix, v_other, v_wanted, v_actual; - struct device *dev = ice_pf_to_dev(pf); - int err; - - hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors; - num_cpus = num_online_cpus(); - - /* LAN miscellaneous handler */ - v_other = ICE_MIN_LAN_OICR_MSIX; - - /* Flow Director */ - if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) - v_other += ICE_FDIR_MSIX; - - /* switchdev */ - v_other += ICE_ESWITCH_MSIX; - - v_wanted = v_other; - - /* LAN traffic */ - pf->num_lan_msix = num_cpus; - v_wanted += pf->num_lan_msix; - - /* RDMA auxiliary driver */ - if (ice_is_rdma_ena(pf)) { - pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX; - v_wanted += pf->num_rdma_msix; - } - - if (v_wanted > hw_num_msix) { - int v_remain; - - dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n", - v_wanted, hw_num_msix); - - if (hw_num_msix < ICE_MIN_MSIX) { - err = -ERANGE; - goto exit_err; - } - - v_remain = hw_num_msix - v_other; - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) { - v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX; - v_remain = ICE_MIN_LAN_TXRX_MSIX; - } - - ice_reduce_msix_usage(pf, v_remain); - v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other; - - dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - - /* actually reserve the vectors */ - v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted, - PCI_IRQ_MSIX); - if (v_actual < 0) { - dev_err(dev, "unable to reserve MSI-X vectors\n"); - err = v_actual; - goto exit_err; - } - - if (v_actual < v_wanted) { - dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", - v_wanted, v_actual); - - if (v_actual < ICE_MIN_MSIX) { - /* error if we can't get minimum vectors */ - pci_free_irq_vectors(pf->pdev); - err = -ERANGE; - goto exit_err; - } else { - int v_remain = v_actual - v_other; - - if (v_remain < ICE_MIN_LAN_TXRX_MSIX) - v_remain = ICE_MIN_LAN_TXRX_MSIX; - - ice_reduce_msix_usage(pf, v_remain); - - dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n", - pf->num_lan_msix); - - if (ice_is_rdma_ena(pf)) - dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n", - pf->num_rdma_msix); - } - } - - return v_actual; - -exit_err: - pf->num_rdma_msix = 0; - pf->num_lan_msix = 0; - return err; + return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() + + (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) + + (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0); } /** @@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf) { pci_free_irq_vectors(pf->pdev); ice_deinit_irq_tracker(pf); + ice_deinit_virt_irq_tracker(pf); } /** @@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf) int ice_init_interrupt_scheme(struct ice_pf *pf) { int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors, max_vectors; + int vectors; - vectors = ice_ena_msix_range(pf); + /* load default PF MSI-X range */ + if (!pf->msix.min) + pf->msix.min = ICE_MIN_MSIX; - if (vectors < 0) - return -ENOMEM; + if (!pf->msix.max) + pf->msix.max = min(total_vectors, + ice_get_default_msix_amount(pf)); + + pf->msix.total = total_vectors; + pf->msix.rest = total_vectors - pf->msix.max; if (pci_msix_can_alloc_dyn(pf->pdev)) - max_vectors = total_vectors; + vectors = pf->msix.min; else - max_vectors = vectors; + vectors = pf->msix.max; + + vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors, + PCI_IRQ_MSIX); + if (vectors < 0) + return vectors; - ice_init_irq_tracker(pf, max_vectors, vectors); + ice_init_irq_tracker(pf, pf->msix.max, vectors); - return 0; + return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest); } /** * ice_alloc_irq - Allocate new interrupt vector * @pf: board private structure - * @dyn_only: force dynamic allocation of the interrupt + * @dyn_allowed: allow dynamic allocation of the interrupt * * Allocate new interrupt vector for a given owner id. * return struct msi_map with interrupt details and track @@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf) * interrupt will be allocated with pci_msix_alloc_irq_at. * * Some callers may only support dynamically allocated interrupts. - * This is indicated with dyn_only flag. + * This is indicated with dyn_allowed flag. * * On failure, return map with negative .index. The caller * is expected to check returned map index. * */ -struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only) +struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed) { - int sriov_base_vector = pf->sriov_base_vector; struct msi_map map = { .index = -ENOENT }; struct device *dev = ice_pf_to_dev(pf); struct ice_irq_entry *entry; - entry = ice_get_irq_res(pf, dyn_only); + entry = ice_get_irq_res(pf, dyn_allowed); if (!entry) return map; - /* fail if we're about to violate SRIOV vectors space */ - if (sriov_base_vector && entry->index >= sriov_base_vector) - goto exit_free_res; - if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) { map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL); if (map.index < 0) @@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map) } /** - * ice_get_max_used_msix_vector - Get the max used interrupt vector - * @pf: board private structure + * ice_virt_get_irqs - get irqs for SR-IOV usacase + * @pf: pointer to PF structure + * @needed: number of irqs to get * - * Return index of maximum used interrupt vectors with respect to the - * beginning of the MSIX table. Take into account that some interrupts - * may have been dynamically allocated after MSIX was initially enabled. + * This returns the first MSI-X vector index in PF space that is used by this + * VF. This index is used when accessing PF relative registers such as + * GLINT_VECT2FUNC and GLINT_DYN_CTL. + * This will always be the OICR index in the AVF driver so any functionality + * using vf->first_vector_idx for queue configuration_id: id of VF which will + * use this irqs */ -int ice_get_max_used_msix_vector(struct ice_pf *pf) +int ice_virt_get_irqs(struct ice_pf *pf, u32 needed) { - unsigned long start, index, max_idx; - void *entry; + int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm, + pf->virt_irq_tracker.num_entries, + 0, needed, 0); - /* Treat all preallocated interrupts as used */ - start = pf->irq_tracker.num_static; - max_idx = start - 1; + if (res >= pf->virt_irq_tracker.num_entries) + return -ENOENT; - xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) { - if (index > max_idx) - max_idx = index; - } + bitmap_set(pf->virt_irq_tracker.bm, res, needed); + + /* conversion from number in bitmap to global irq index */ + return res + pf->virt_irq_tracker.base; +} - return max_idx; +/** + * ice_virt_free_irqs - free irqs used by the VF + * @pf: pointer to PF structure + * @index: first index to be free + * @irqs: number of irqs to free + */ +void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs) +{ + bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base, + irqs); } diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h index f35efc08575e..b2f9dbafd57e 100644 --- a/drivers/net/ethernet/intel/ice/ice_irq.h +++ b/drivers/net/ethernet/intel/ice/ice_irq.h @@ -15,11 +15,22 @@ struct ice_irq_tracker { u16 num_static; /* preallocated entries */ }; +struct ice_virt_irq_tracker { + unsigned long *bm; /* bitmap to track irq usage */ + u32 num_entries; + /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the + * number of MSIX vectors needed for all SR-IOV VFs from the number of + * MSIX vectors allowed on this PF. + */ + u32 base; +}; + int ice_init_interrupt_scheme(struct ice_pf *pf); void ice_clear_interrupt_scheme(struct ice_pf *pf); struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only); void ice_free_irq(struct ice_pf *pf, struct msi_map map); -int ice_get_max_used_msix_vector(struct ice_pf *pf); +int ice_virt_get_irqs(struct ice_pf *pf, u32 needed); +void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index f0e76f0a6d60..2410aee59fb2 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -202,11 +202,12 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag) * @act: rule action * @recipe_id: recipe id for the new rule * @rule_idx: pointer to rule index + * @direction: ICE_FLTR_RX or ICE_FLTR_TX * @add: boolean on whether we are adding filters */ static int ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, - bool add) + u8 direction, bool add) { struct ice_sw_rule_lkup_rx_tx *s_rule; u16 s_rule_sz, vsi_num; @@ -231,9 +232,16 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx, act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num); - s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); s_rule->recipe_id = cpu_to_le16(recipe_id); - s_rule->src = cpu_to_le16(hw->port_info->lport); + if (direction == ICE_FLTR_RX) { + s_rule->hdr.type = + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX); + s_rule->src = cpu_to_le16(hw->port_info->lport); + } else { + s_rule->hdr.type = + cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX); + s_rule->src = cpu_to_le16(vsi_num); + } s_rule->act = cpu_to_le32(act); s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN); opc = ice_aqc_opc_add_sw_rules; @@ -266,9 +274,27 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add) { u32 act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE; + int err; + + err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id, + ICE_FLTR_RX, add); + if (err) + goto err_rx; - return ice_lag_cfg_fltr(lag, act, lag->pf_recipe, - &lag->pf_rule_id, add); + act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT | + ICE_SINGLE_ACT_LB_ENABLE; + err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_tx_rule_id, + ICE_FLTR_TX, add); + if (err) + goto err_tx; + + return 0; + +err_tx: + ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id, + ICE_FLTR_RX, !add); +err_rx: + return err; } /** @@ -284,7 +310,7 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add) ICE_SINGLE_ACT_DROP; return ice_lag_cfg_fltr(lag, act, lag->lport_recipe, - &lag->lport_rule_idx, add); + &lag->lport_rule_idx, ICE_FLTR_RX, add); } /** @@ -310,7 +336,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) dev = ice_pf_to_dev(lag->pf); /* interface not active - remove old default VSI rule */ - if (bonding_info->slave.state && lag->pf_rule_id) { + if (bonding_info->slave.state && lag->pf_rx_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, false)) dev_err(dev, "Error removing old default VSI filter\n"); if (ice_lag_cfg_drop_fltr(lag, true)) @@ -319,7 +345,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr) } /* interface becoming active - add new default VSI rule */ - if (!bonding_info->slave.state && !lag->pf_rule_id) { + if (!bonding_info->slave.state && !lag->pf_rx_rule_id) { if (ice_lag_cfg_dflt_fltr(lag, true)) dev_err(dev, "Error adding new default VSI filter\n"); if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false)) @@ -714,8 +740,7 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_lag_move_single_vf_nodes(lag, oldport, newport, i); } @@ -953,8 +978,7 @@ ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_for_each_traffic_class(tc) ice_lag_reclaim_vf_tc(lag, src_hw, i, tc); } @@ -977,6 +1001,28 @@ static void ice_lag_link(struct ice_lag *lag) } /** + * ice_lag_config_eswitch - configure eswitch to work with LAG + * @lag: lag info struct + * @netdev: active network interface device struct + * + * Updates all port representors in eswitch to use @netdev for Tx. + * + * Configures the netdev to keep dst metadata (also used in representor Tx). + * This is required for an uplink without switchdev mode configured. + */ +static void ice_lag_config_eswitch(struct ice_lag *lag, + struct net_device *netdev) +{ + struct ice_repr *repr; + unsigned long id; + + xa_for_each(&lag->pf->eswitch.reprs, id, repr) + repr->dst->u.port_info.lower_dev = netdev; + + netif_keep_dst(netdev); +} + +/** * ice_lag_unlink - handle unlink event * @lag: LAG info struct */ @@ -997,6 +1043,9 @@ static void ice_lag_unlink(struct ice_lag *lag) ice_lag_move_vf_nodes(lag, act_port, pri_port); lag->primary = false; lag->active_port = ICE_LAG_INVALID_PORT; + + /* Config primary's eswitch back to normal operation. */ + ice_lag_config_eswitch(lag, lag->netdev); } else { struct ice_lag *primary_lag; @@ -1272,12 +1321,18 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr) */ if (!primary_lag) { lag->primary = true; + if (!ice_is_switchdev_running(lag->pf)) + return; + /* Configure primary's SWID to be shared */ ice_lag_primary_swid(lag, true); primary_lag = lag; } else { u16 swid; + if (!ice_is_switchdev_running(primary_lag->pf)) + return; + swid = primary_lag->pf->hw.port_info->sw_id; ice_lag_set_swid(swid, lag, true); ice_lag_add_prune_list(primary_lag, lag->pf); @@ -1395,6 +1450,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) ice_lag_move_vf_nodes(lag, prim_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); return; } @@ -1404,6 +1460,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr) /* new active port */ ice_lag_move_vf_nodes(lag, lag->active_port, event_port); lag->active_port = event_port; + ice_lag_config_eswitch(lag, event_netdev); } else { /* port not set as currently active (e.g. new active port * has already claimed the nodes and filters @@ -1976,8 +2033,7 @@ ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw) pf = lag->pf; ice_for_each_vsi(pf, i) - if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF || - pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL)) + if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF) ice_for_each_traffic_class(tc) ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i, tc); @@ -2149,7 +2205,7 @@ void ice_lag_rebuild(struct ice_pf *pf) ice_lag_cfg_cp_fltr(lag, true); - if (lag->pf_rule_id) + if (lag->pf_rx_rule_id) if (ice_lag_cfg_dflt_fltr(lag, true)) dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h index 183b38792ef2..bab2c83142a1 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.h +++ b/drivers/net/ethernet/intel/ice/ice_lag.h @@ -43,7 +43,8 @@ struct ice_lag { u8 primary:1; /* this is primary */ u16 pf_recipe; u16 lport_recipe; - u16 pf_rule_id; + u16 pf_rx_rule_id; + u16 pf_tx_rule_id; u16 cp_rule_idx; u16 lport_rule_idx; u8 role; diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d384ddfcb83e..77ba26538b07 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -160,64 +160,6 @@ struct ice_fltr_desc { (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) #define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL -struct ice_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:2; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum ice_rx_ptype_outer_ip { - ICE_RX_PTYPE_OUTER_L2 = 0, - ICE_RX_PTYPE_OUTER_IP = 1, -}; - -enum ice_rx_ptype_outer_ip_ver { - ICE_RX_PTYPE_OUTER_NONE = 0, - ICE_RX_PTYPE_OUTER_IPV4 = 1, - ICE_RX_PTYPE_OUTER_IPV6 = 2, -}; - -enum ice_rx_ptype_outer_fragmented { - ICE_RX_PTYPE_NOT_FRAG = 0, - ICE_RX_PTYPE_FRAG = 1, -}; - -enum ice_rx_ptype_tunnel_type { - ICE_RX_PTYPE_TUNNEL_NONE = 0, - ICE_RX_PTYPE_TUNNEL_IP_IP = 1, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum ice_rx_ptype_tunnel_end_prot { - ICE_RX_PTYPE_TUNNEL_END_NONE = 0, - ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, - ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum ice_rx_ptype_inner_prot { - ICE_RX_PTYPE_INNER_PROT_NONE = 0, - ICE_RX_PTYPE_INNER_PROT_UDP = 1, - ICE_RX_PTYPE_INNER_PROT_TCP = 2, - ICE_RX_PTYPE_INNER_PROT_SCTP = 3, - ICE_RX_PTYPE_INNER_PROT_ICMP = 4, - ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5, -}; - -enum ice_rx_ptype_payload_layer { - ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - /* Rx Flex Descriptor * This descriptor is used instead of the legacy version descriptor when * ice_rlan_ctx.adv_desc is set @@ -287,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic { __le16 status_error1; u8 flexi_flags2; u8 ts_low; - __le16 l2tag2_1st; + __le16 raw_csum; __le16 l2tag2_2nd; /* Qword 3 */ @@ -429,29 +371,21 @@ enum ice_rx_flex_desc_status_error_1_bits { ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */ }; -#define ICE_RXQ_CTX_SIZE_DWORDS 8 -#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 #define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 #define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) -/* RLAN Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* RLAN Rx queue context data */ struct ice_rlan_ctx { u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; #define ICE_RLAN_BASE_S 7 u64 base; u16 qlen; #define ICE_RLAN_CTX_DBUF_S 7 - u16 dbuf; /* bigger than needed, see above for reason */ + u8 dbuf; #define ICE_RLAN_CTX_HBUF_S 6 - u16 hbuf; /* bigger than needed, see above for reason */ + u8 hbuf; u8 dtype; u8 dsize; u8 crcstrip; @@ -459,29 +393,15 @@ struct ice_rlan_ctx { u8 hsplit_0; u8 hsplit_1; u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ + u16 rxmax; u8 tphrdesc_ena; u8 tphwdesc_ena; u8 tphdata_ena; u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ + u8 lrxqthresh; u8 prefena; /* NOTE: normally must be set to 1 at init */ }; -struct ice_ctx_ele { - u16 offset; - u16 size_of; - u16 width; - u16 lsb; -}; - -#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ - .offset = offsetof(struct _struct, _ele), \ - .size_of = sizeof_field(struct _struct, _ele), \ - .width = _width, \ - .lsb = _lsb, \ -} - /* for hsplit_0 field of Rx RLAN context */ enum ice_rlan_ctx_rx_hsplit_0 { ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0, @@ -558,10 +478,15 @@ enum ice_tx_desc_len_fields { struct ice_tx_ctx_desc { __le32 tunneling_params; __le16 l2tag2; - __le16 rsvd; + __le16 gcs; __le64 qw1; }; +#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0) +#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8) +#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12) +#define ICE_TX_GCS_DESC_CSUM_PSH 1 + #define ICE_TXD_CTX_QW1_CMD_S 4 #define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S) @@ -609,18 +534,12 @@ enum ice_tx_ctx_desc_eipt_offload { #define ICE_LAN_TXQ_MAX_QGRPS 127 #define ICE_LAN_TXQ_MAX_QDIS 1023 -/* Tx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ +/* Tx queue context data */ struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ u8 port_num; - u16 cgd_num; /* bigger than needed, see above for reason */ + u8 cgd_num; u8 pf_num; u16 vmvf_num; u8 vmvf_type; @@ -631,7 +550,7 @@ struct ice_tlan_ctx { u8 tsyn_ena; u8 internal_usage_flag; u8 alt_vlan; - u16 cpuid; /* bigger than needed, see above for reason */ + u8 cpuid; u8 wb_mode; u8 tphrd_desc; u8 tphrd; @@ -640,7 +559,7 @@ struct ice_tlan_ctx { u16 qnum_in_func; u8 itr_notification_mode; u8 adjust_prof_id; - u32 qlen; /* bigger than needed, see above for reason */ + u16 qlen; u8 quanta_prof_idx; u8 tso_ena; u16 tso_qnum; @@ -648,269 +567,6 @@ struct ice_tlan_ctx { u8 drop_ena; u8 cache_prof_idx; u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; -/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT ice_ptype_lkup[ptype].known - * THEN - * Packet is unknown - * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum ice_rx_l2_ptype to decode the packet type - * ENDIF - */ -#define ICE_PTYPES \ - /* L2 Packet types */ \ - ICE_PTT_UNUSED_ENTRY(0), \ - ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \ - ICE_PTT_UNUSED_ENTRY(2), \ - ICE_PTT_UNUSED_ENTRY(3), \ - ICE_PTT_UNUSED_ENTRY(4), \ - ICE_PTT_UNUSED_ENTRY(5), \ - ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(8), \ - ICE_PTT_UNUSED_ENTRY(9), \ - ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(12), \ - ICE_PTT_UNUSED_ENTRY(13), \ - ICE_PTT_UNUSED_ENTRY(14), \ - ICE_PTT_UNUSED_ENTRY(15), \ - ICE_PTT_UNUSED_ENTRY(16), \ - ICE_PTT_UNUSED_ENTRY(17), \ - ICE_PTT_UNUSED_ENTRY(18), \ - ICE_PTT_UNUSED_ENTRY(19), \ - ICE_PTT_UNUSED_ENTRY(20), \ - ICE_PTT_UNUSED_ENTRY(21), \ - \ - /* Non Tunneled IPv4 */ \ - ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(25), \ - ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv4 */ \ - ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(32), \ - ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv6 */ \ - ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(39), \ - ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT */ \ - ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> IPv4 */ \ - ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(47), \ - ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> IPv6 */ \ - ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(54), \ - ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC */ \ - ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \ - ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(62), \ - ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \ - ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(69), \ - ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC/VLAN */ \ - ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(77), \ - ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(84), \ - ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \ - \ - /* Non Tunneled IPv6 */ \ - ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(91), \ - ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv4 */ \ - ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(98), \ - ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv6 */ \ - ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(105), \ - ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT */ \ - ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> IPv4 */ \ - ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(113), \ - ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> IPv6 */ \ - ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(120), \ - ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC */ \ - ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \ - ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(128), \ - ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \ - ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(135), \ - ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN */ \ - ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(143), \ - ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(150), \ - ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - -#define ICE_NUM_DEFINED_PTYPES 154 - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - ICE_RX_PTYPE_##OUTER_FRAG, \ - ICE_RX_PTYPE_TUNNEL_##T, \ - ICE_RX_PTYPE_TUNNEL_END_##TE, \ - ICE_RX_PTYPE_##TEF, \ - ICE_RX_PTYPE_INNER_PROT_##I, \ - ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG -#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG - -/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */ -static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = { - ICE_PTYPES - - /* unused entries */ - [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - -static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) -{ - return ice_ptype_lkup[ptype]; -} - - #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 558422120312..03bb16191237 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -7,7 +7,7 @@ #include "ice_lib.h" #include "ice_fltr.h" #include "ice_dcb_lib.h" -#include "ice_devlink.h" +#include "ice_type.h" #include "ice_vsi_vlan_ops.h" /** @@ -21,14 +21,14 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type) return "ICE_VSI_PF"; case ICE_VSI_VF: return "ICE_VSI_VF"; + case ICE_VSI_SF: + return "ICE_VSI_SF"; case ICE_VSI_CTRL: return "ICE_VSI_CTRL"; case ICE_VSI_CHNL: return "ICE_VSI_CHNL"; case ICE_VSI_LB: return "ICE_VSI_LB"; - case ICE_VSI_SWITCHDEV_CTRL: - return "ICE_VSI_SWITCHDEV_CTRL"; default: return "unknown"; } @@ -117,14 +117,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->q_vectors) goto err_vectors; - vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); - if (!vsi->af_xdp_zc_qps) - goto err_zc_qps; - return 0; -err_zc_qps: - devm_kfree(dev, vsi->q_vectors); err_vectors: devm_kfree(dev, vsi->rxq_map); err_rxq_map: @@ -144,7 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) { switch (vsi->type) { case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_SF: case ICE_VSI_CTRL: case ICE_VSI_LB: /* a user could change the values of num_[tr]x_desc using @@ -163,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) } } +static u16 ice_get_rxq_count(struct ice_pf *pf) +{ + return min(ice_get_avail_rxq_count(pf), num_online_cpus()); +} + +static u16 ice_get_txq_count(struct ice_pf *pf) +{ + return min(ice_get_avail_txq_count(pf), num_online_cpus()); +} + /** * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI * @vsi: the VSI being configured @@ -184,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->alloc_txq = vsi->req_txq; vsi->num_txq = vsi->req_txq; } else { - vsi->alloc_txq = min3(pf->num_lan_msix, - ice_get_avail_txq_count(pf), - (u16)num_online_cpus()); + vsi->alloc_txq = ice_get_txq_count(pf); } pf->num_lan_tx = vsi->alloc_txq; @@ -199,32 +201,19 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi) vsi->alloc_rxq = vsi->req_rxq; vsi->num_rxq = vsi->req_rxq; } else { - vsi->alloc_rxq = min3(pf->num_lan_msix, - ice_get_avail_rxq_count(pf), - (u16)num_online_cpus()); + vsi->alloc_rxq = ice_get_rxq_count(pf); } } pf->num_lan_rx = vsi->alloc_rxq; - vsi->num_q_vectors = min_t(int, pf->num_lan_msix, - max_t(int, vsi->alloc_rxq, - vsi->alloc_txq)); + vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq); break; - case ICE_VSI_SWITCHDEV_CTRL: - /* The number of queues for ctrl VSI is equal to number of PRs - * Each ring is associated to the corresponding VF_PR netdev. - * Tx and Rx rings are always equal - */ - if (vsi->req_txq && vsi->req_rxq) { - vsi->alloc_txq = vsi->req_txq; - vsi->alloc_rxq = vsi->req_rxq; - } else { - vsi->alloc_txq = 1; - vsi->alloc_rxq = 1; - } - + case ICE_VSI_SF: + vsi->alloc_txq = 1; + vsi->alloc_rxq = 1; vsi->num_q_vectors = 1; + vsi->irq_dyn_alloc = true; break; case ICE_VSI_VF: if (vf->num_req_qs) @@ -328,8 +317,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); - bitmap_free(vsi->af_xdp_zc_qps); - vsi->af_xdp_zc_qps = NULL; /* free the ring and vector containers */ devm_kfree(dev, vsi->q_vectors); vsi->q_vectors = NULL; @@ -450,7 +437,7 @@ err_out: * This deallocates the VSI's queue resources, removes it from the PF's * VSI array if necessary, and deallocates the VSI */ -static void ice_vsi_free(struct ice_vsi *vsi) +void ice_vsi_free(struct ice_vsi *vsi) { struct ice_pf *pf = NULL; struct device *dev; @@ -474,6 +461,7 @@ static void ice_vsi_free(struct ice_vsi *vsi) ice_vsi_free_stats(vsi); ice_vsi_free_arrays(vsi); + mutex_destroy(&vsi->xdp_state_lock); mutex_unlock(&pf->sw_mutex); devm_kfree(dev, vsi); } @@ -522,22 +510,6 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data) return IRQ_HANDLED; } -static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data) -{ - struct ice_q_vector *q_vector = (struct ice_q_vector *)data; - struct ice_pf *pf = q_vector->vsi->back; - struct ice_repr *repr; - unsigned long id; - - if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring) - return IRQ_HANDLED; - - xa_for_each(&pf->eswitch.reprs, id, repr) - napi_schedule(&repr->q_vector->napi); - - return IRQ_HANDLED; -} - /** * ice_vsi_alloc_stat_arrays - Allocate statistics arrays * @vsi: VSI pointer @@ -599,12 +571,11 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) return -ENOMEM; } + vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev); + switch (vsi->type) { - case ICE_VSI_SWITCHDEV_CTRL: - /* Setup eswitch MSIX irq handler for VSI */ - vsi->irq_handler = ice_eswitch_msix_clean_rings; - break; case ICE_VSI_PF: + case ICE_VSI_SF: /* Setup default MSIX irq handler for VSI */ vsi->irq_handler = ice_msix_clean_rings; break; @@ -641,7 +612,7 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch) * * returns a pointer to a VSI on success, NULL on failure. */ -static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) +struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_vsi *vsi = NULL; @@ -673,6 +644,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf) pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi, pf->next_vsi); + mutex_init(&vsi->xdp_state_lock); + unlock_pf: mutex_unlock(&pf->sw_mutex); return vsi; @@ -860,7 +833,13 @@ bool ice_is_safe_mode(struct ice_pf *pf) */ bool ice_is_rdma_ena(struct ice_pf *pf) { - return test_bit(ICE_FLAG_RDMA_ENA, pf->flags); + union devlink_param_value value; + int err; + + err = devl_param_driverinit_value_get(priv_to_devlink(pf), + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + &value); + return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool; } /** @@ -933,7 +912,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi) max_rss_size); vsi->rss_lut_type = ICE_LUT_PF; break; - case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_SF: vsi->rss_table_size = ICE_LUT_VSI_SIZE; vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size); vsi->rss_lut_type = ICE_LUT_VSI; @@ -1185,6 +1164,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF; break; case ICE_VSI_VF: + case ICE_VSI_SF: /* VF VSI will gets a small RSS table which is a VSI LUT type */ lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI; break; @@ -1205,12 +1185,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi) static void ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - struct ice_pf *pf = vsi->back; u16 qcount, qmap; u8 offset = 0; int pow; - qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix); + qcount = vsi->num_rxq; pow = order_base_2(qcount); qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset); @@ -1263,7 +1242,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags) case ICE_VSI_PF: ctxt->flags = ICE_AQ_VSI_TYPE_PF; break; - case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_SF: case ICE_VSI_CHNL: ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2; break; @@ -1452,6 +1431,10 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->dev = dev; ring->count = vsi->num_rx_desc; ring->cached_phctime = pf->ptp.cached_phc_time; + + if (ice_is_feature_supported(pf, ICE_F_GCS)) + ring->flags |= ICE_RX_FLAGS_RING_GCS; + WRITE_ONCE(vsi->rx_rings[i], ring); } @@ -1732,6 +1715,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf) return true; } +#define ICE_FW_MODE_REC_M BIT(1) +bool ice_is_recovery_mode(struct ice_hw *hw) +{ + return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M; +} + /** * ice_update_eth_stats - Update VSI-specific ethernet statistics counters * @vsi: the VSI to be updated @@ -1790,9 +1779,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi) * @prio: priority for the RXDID for this queue * @ena_ts: true to enable timestamp and false to disable timestamp */ -void -ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, - bool ena_ts) +void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, + bool ena_ts) { int regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); @@ -2077,12 +2065,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi) } /** - * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling + * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling * @vsi: the VSI being configured * @tx: bool to determine Tx or Rx rule * @create: bool to determine create or remove Rule + * + * Adding an ethtype Tx rule to the uplink VSI results in it being applied + * to the whole port, so LLDP transmission for VFs will be blocked too. */ -void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) +void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) { int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag, enum ice_sw_fwd_act_type act); @@ -2097,19 +2088,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create) status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX, ICE_DROP_PACKET); } else { - if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) { - status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num, - create); - } else { + if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) { status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX, ICE_FWD_TO_VSI); + if (!status || !create) + goto report; + + dev_info(dev, + "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n", + vsi->vsi_num, status); } + + status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create); + if (!status) + set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags); + } +report: if (status) - dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n", - create ? "adding" : "removing", tx ? "TX" : "RX", - vsi->vsi_num, status); + dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n", + create ? "add" : "remove", tx ? "Tx" : "Rx", + vsi->vsi_num, status); +} + +/** + * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP + * @pf: the PF being configured + * @enable: enable or disable + * + * Configure switch rules to enable/disable LLDP handling by software + * across PF. + */ +void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable) +{ + struct ice_vsi *vsi; + struct ice_vf *vf; + unsigned int bkt; + + vsi = ice_get_main_vsi(pf); + ice_vsi_cfg_sw_lldp(vsi, false, enable); + + if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) + return; + + ice_for_each_vf(pf, bkt, vf) { + vsi = ice_get_vf_vsi(vf); + + if (WARN_ON(!vsi)) + continue; + + if (ice_vf_is_lldp_ena(vf)) + ice_vsi_cfg_sw_lldp(vsi, false, enable); + } } /** @@ -2145,7 +2176,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi) case ICE_VSI_CHNL: case ICE_VSI_LB: case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_SF: max_agg_nodes = ICE_MAX_PF_AGG_NODES; agg_node_id_start = ICE_PF_AGG_NODE_ID_START; agg_node_iter = &pf->pf_agg_node[0]; @@ -2273,10 +2304,8 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi) /** * ice_vsi_cfg_def - configure default VSI based on the type * @vsi: pointer to VSI - * @params: the parameters to configure this VSI with */ -static int -ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +static int ice_vsi_cfg_def(struct ice_vsi *vsi) { struct device *dev = ice_pf_to_dev(vsi->back); struct ice_pf *pf = vsi->back; @@ -2284,7 +2313,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) vsi->vsw = pf->first_sw; - ret = ice_vsi_alloc_def(vsi, params->ch); + ret = ice_vsi_alloc_def(vsi, vsi->ch); if (ret) return ret; @@ -2309,7 +2338,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ice_vsi_set_tc_cfg(vsi); /* create the VSI */ - ret = ice_vsi_init(vsi, params->flags); + ret = ice_vsi_init(vsi, vsi->flags); if (ret) goto unroll_get_qs; @@ -2317,7 +2346,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) switch (vsi->type) { case ICE_VSI_CTRL: - case ICE_VSI_SWITCHDEV_CTRL: + case ICE_VSI_SF: case ICE_VSI_PF: ret = ice_vsi_alloc_q_vectors(vsi); if (ret) @@ -2331,22 +2360,20 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) if (ret) goto unroll_vector_base; - ice_vsi_map_rings_to_vectors(vsi); - - /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi); - - vsi->stat_offsets_loaded = false; - if (ice_is_xdp_ena_vsi(vsi)) { ret = ice_vsi_determine_xdp_res(vsi); if (ret) goto unroll_vector_base; - ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog); + ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog, + ICE_XDP_CFG_PART); if (ret) goto unroll_vector_base; } + ice_vsi_map_rings_to_vectors(vsi); + + vsi->stat_offsets_loaded = false; + /* ICE_VSI_CTRL does not need RSS so skip RSS processing */ if (vsi->type != ICE_VSI_CTRL) /* Do not exit if configuring RSS had an issue, at @@ -2430,23 +2457,16 @@ unroll_vsi_alloc: /** * ice_vsi_cfg - configure a previously allocated VSI * @vsi: pointer to VSI - * @params: parameters used to configure this VSI */ -int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) +int ice_vsi_cfg(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; int ret; - if (WARN_ON(params->type == ICE_VSI_VF && !params->vf)) + if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; - vsi->type = params->type; - vsi->port_info = params->pi; - - /* For VSIs which don't have a connected VF, this will be NULL */ - vsi->vf = params->vf; - - ret = ice_vsi_cfg_def(vsi, params); + ret = ice_vsi_cfg_def(vsi); if (ret) return ret; @@ -2476,24 +2496,17 @@ void ice_vsi_decfg(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int err; - /* The Rx rule will only exist to remove if the LLDP FW - * engine is currently stopped - */ - if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF && - !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags)) - ice_cfg_sw_lldp(vsi, false, false); - ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx); if (err) dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n", vsi->vsi_num, err); - if (ice_is_xdp_ena_vsi(vsi)) + if (vsi->xdp_rings) /* return value check can be skipped here, it always returns * 0 if reset is in progress */ - ice_destroy_xdp_rings(vsi); + ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART); ice_vsi_clear_rings(vsi); ice_vsi_free_q_vectors(vsi); @@ -2532,7 +2545,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) * a port_info structure for it. */ if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) || - WARN_ON(!params->pi)) + WARN_ON(!params->port_info)) return NULL; vsi = ice_vsi_alloc(pf); @@ -2541,7 +2554,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) return NULL; } - ret = ice_vsi_cfg(vsi, params); + vsi->params = *params; + ret = ice_vsi_cfg(vsi); if (ret) goto err_vsi_cfg; @@ -2557,7 +2571,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params) if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) { ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX, ICE_DROP_PACKET); - ice_cfg_sw_lldp(vsi, true, true); + ice_vsi_cfg_sw_lldp(vsi, true, true); } if (!vsi->agg_node) @@ -2590,7 +2604,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi) for (q = 0; q < q_vector->num_ring_tx; q++) { ice_write_itr(&q_vector->tx, 0); wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0); - if (ice_is_xdp_ena_vsi(vsi)) { + if (vsi->xdp_rings) { u32 xdp_txq = txq + vsi->num_xdp_txq; wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0); @@ -2625,7 +2639,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) return; vsi->irqs_ready = false; - ice_free_cpu_rx_rmap(vsi); ice_for_each_q_vector(vsi, i) { int irq_num; @@ -2638,12 +2651,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) vsi->q_vectors[i]->num_ring_rx)) continue; - /* clear the affinity notifier in the IRQ descriptor */ - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) - irq_set_affinity_notifier(irq_num, NULL); - - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); } @@ -2690,6 +2697,7 @@ void ice_vsi_close(struct ice_vsi *vsi) if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) ice_down(vsi); + ice_vsi_clear_napi_queues(vsi); ice_vsi_free_irq(vsi); ice_vsi_free_tx_rings(vsi); ice_vsi_free_rx_rings(vsi); @@ -2709,7 +2717,8 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state); - if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { if (netif_running(vsi->netdev)) { if (!locked) rtnl_lock(); @@ -2733,144 +2742,108 @@ int ice_ena_vsi(struct ice_vsi *vsi, bool locked) */ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) { - if (test_bit(ICE_VSI_DOWN, vsi->state)) - return; + bool already_down = test_bit(ICE_VSI_DOWN, vsi->state); set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); - if (vsi->type == ICE_VSI_PF && vsi->netdev) { + if (vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)) { if (netif_running(vsi->netdev)) { if (!locked) rtnl_lock(); - - ice_vsi_close(vsi); + already_down = test_bit(ICE_VSI_DOWN, vsi->state); + if (!already_down) + ice_vsi_close(vsi); if (!locked) rtnl_unlock(); - } else { + } else if (!already_down) { ice_vsi_close(vsi); } - } else if (vsi->type == ICE_VSI_CTRL || - vsi->type == ICE_VSI_SWITCHDEV_CTRL) { + } else if (vsi->type == ICE_VSI_CTRL && !already_down) { ice_vsi_close(vsi); } } /** - * __ice_queue_set_napi - Set the napi instance for the queue - * @dev: device to which NAPI and queue belong - * @queue_index: Index of queue - * @type: queue type as RX or TX - * @napi: NAPI context - * @locked: is the rtnl_lock already held - * - * Set the napi instance for the queue. Caller indicates the lock status. - */ -static void -__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi, - bool locked) -{ - if (!locked) - rtnl_lock(); - netif_queue_set_napi(dev, queue_index, type, napi); - if (!locked) - rtnl_unlock(); -} - -/** - * ice_queue_set_napi - Set the napi instance for the queue - * @vsi: VSI being configured - * @queue_index: Index of queue - * @type: queue type as RX or TX - * @napi: NAPI context + * ice_vsi_set_napi_queues - associate netdev queues with napi + * @vsi: VSI pointer * - * Set the napi instance for the queue. The rtnl lock state is derived from the - * execution path. + * Associate queue[s] with napi for all vectors. + * The caller must hold rtnl_lock. */ -void -ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi) +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) { - struct ice_pf *pf = vsi->back; + struct net_device *netdev = vsi->netdev; + int q_idx, v_idx; - if (!vsi->netdev) + if (!netdev) return; - if (current_work() == &pf->serv_task || - test_bit(ICE_PREPARED_FOR_RESET, pf->state) || - test_bit(ICE_DOWN, pf->state) || - test_bit(ICE_SUSPENDED, pf->state)) - __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, - false); - else - __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, - true); + ice_for_each_rxq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, + &vsi->rx_rings[q_idx]->q_vector->napi); + + ice_for_each_txq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, + &vsi->tx_rings[q_idx]->q_vector->napi); + /* Also set the interrupt number for the NAPI */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); + } } /** - * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi - * @q_vector: q_vector pointer - * @locked: is the rtnl_lock already held + * ice_vsi_clear_napi_queues - dissociate netdev queues from napi + * @vsi: VSI pointer * - * Associate the q_vector napi with all the queue[s] on the vector. - * Caller indicates the lock status. + * Clear the association between all VSI queues queue[s] and napi. + * The caller must hold rtnl_lock. */ -void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +void ice_vsi_clear_napi_queues(struct ice_vsi *vsi) { - struct ice_rx_ring *rx_ring; - struct ice_tx_ring *tx_ring; + struct net_device *netdev = vsi->netdev; + int q_idx, v_idx; - ice_for_each_rx_ring(rx_ring, q_vector->rx) - __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, &q_vector->napi, - locked); + if (!netdev) + return; - ice_for_each_tx_ring(tx_ring, q_vector->tx) - __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, &q_vector->napi, - locked); - /* Also set the interrupt number for the NAPI */ - netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); -} + /* Clear the NAPI's interrupt number */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; -/** - * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi - * @q_vector: q_vector pointer - * - * Associate the q_vector napi with all the queue[s] on the vector - */ -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector) -{ - struct ice_rx_ring *rx_ring; - struct ice_tx_ring *tx_ring; + netif_napi_set_irq(&q_vector->napi, -1); + } - ice_for_each_rx_ring(rx_ring, q_vector->rx) - ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, &q_vector->napi); + ice_for_each_txq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL); - ice_for_each_tx_ring(tx_ring, q_vector->tx) - ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, &q_vector->napi); - /* Also set the interrupt number for the NAPI */ - netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); + ice_for_each_rxq(vsi, q_idx) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, NULL); } /** - * ice_vsi_set_napi_queues - * @vsi: VSI pointer + * ice_napi_add - register NAPI handler for the VSI + * @vsi: VSI for which NAPI handler is to be registered * - * Associate queue[s] with napi for all vectors + * This function is only called in the driver's load path. Registering the NAPI + * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, + * reset/rebuild, etc.) */ -void ice_vsi_set_napi_queues(struct ice_vsi *vsi) +void ice_napi_add(struct ice_vsi *vsi) { - int i; + int v_idx; if (!vsi->netdev) return; - ice_for_each_q_vector(vsi, i) - ice_q_vector_set_napi_queues(vsi->q_vectors[i]); + ice_for_each_q_vector(vsi, v_idx) + netif_napi_add_config(vsi->netdev, + &vsi->q_vectors[v_idx]->napi, + ice_napi_poll, + v_idx); } /** @@ -2891,6 +2864,16 @@ int ice_vsi_release(struct ice_vsi *vsi) ice_rss_clean(vsi); ice_vsi_close(vsi); + + /* The Rx rule will only exist to remove if the LLDP FW + * engine is currently stopped + */ + if (!ice_is_safe_mode(pf) && + !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) && + (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF && + ice_vf_is_lldp_ena(vsi->vf)))) + ice_vsi_cfg_sw_lldp(vsi, false, false); + ice_vsi_decfg(vsi); /* retain SW VSI data structure since it is needed to unregister and @@ -3089,7 +3072,6 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) */ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) { - struct ice_vsi_cfg_params params = {}; struct ice_coalesce_stored *coalesce; int prev_num_q_vectors; struct ice_pf *pf; @@ -3098,26 +3080,28 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (!vsi) return -EINVAL; - params = ice_vsi_to_params(vsi); - params.flags = vsi_flags; - + vsi->flags = vsi_flags; pf = vsi->back; if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf)) return -EINVAL; + mutex_lock(&vsi->xdp_state_lock); + ret = ice_vsi_realloc_stat_arrays(vsi); if (ret) - goto err_vsi_cfg; + goto unlock; ice_vsi_decfg(vsi); - ret = ice_vsi_cfg_def(vsi, ¶ms); + ret = ice_vsi_cfg_def(vsi); if (ret) - goto err_vsi_cfg; + goto unlock; coalesce = kcalloc(vsi->num_q_vectors, sizeof(struct ice_coalesce_stored), GFP_KERNEL); - if (!coalesce) - return -ENOMEM; + if (!coalesce) { + ret = -ENOMEM; + goto decfg; + } prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce); @@ -3125,22 +3109,23 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) if (ret) { if (vsi_flags & ICE_VSI_FLAG_INIT) { ret = -EIO; - goto err_vsi_cfg_tc_lan; + goto free_coalesce; } - kfree(coalesce); - return ice_schedule_reset(pf, ICE_RESET_PFR); + ret = ice_schedule_reset(pf, ICE_RESET_PFR); + goto free_coalesce; } ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors); - kfree(coalesce); - - return 0; + clear_bit(ICE_VSI_REBUILD_PENDING, vsi->state); -err_vsi_cfg_tc_lan: - ice_vsi_decfg(vsi); +free_coalesce: kfree(coalesce); -err_vsi_cfg: +decfg: + if (ret) + ice_vsi_decfg(vsi); +unlock: + mutex_unlock(&vsi->xdp_state_lock); return ret; } @@ -3956,12 +3941,17 @@ void ice_init_feature_support(struct ice_pf *pf) ice_set_feature_support(pf, ICE_F_CGU); if (ice_is_clock_mux_in_netlist(&pf->hw)) ice_set_feature_support(pf, ICE_F_SMA_CTRL); - if (ice_gnss_is_gps_present(&pf->hw)) + if (ice_gnss_is_module_present(&pf->hw)) ice_set_feature_support(pf, ICE_F_GNSS); break; default: break; } + + if (pf->hw.mac_type == ICE_MAC_E830) { + ice_set_feature_support(pf, ICE_F_MBX_LIMIT); + ice_set_feature_support(pf, ICE_F_GCS); + } } /** @@ -4008,24 +3998,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx) } /** - * ice_vsi_ctx_set_allow_override - allow destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - -/** - * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI - * @ctx: pointer to VSI ctx structure - */ -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx) -{ - ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD; -} - -/** * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit * @vsi: pointer to VSI structure * @set: set or unset the bit diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 9cd23afe5f15..654516c5fc3e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -11,43 +11,6 @@ #define ICE_VSI_FLAG_INIT BIT(0) #define ICE_VSI_FLAG_NO_INIT 0 -/** - * struct ice_vsi_cfg_params - VSI configuration parameters - * @pi: pointer to the port_info instance for the VSI - * @ch: pointer to the channel structure for the VSI, may be NULL - * @vf: pointer to the VF associated with this VSI, may be NULL - * @type: the type of VSI to configure - * @flags: VSI flags used for rebuild and configuration - * - * Parameter structure used when configuring a new VSI. - */ -struct ice_vsi_cfg_params { - struct ice_port_info *pi; - struct ice_channel *ch; - struct ice_vf *vf; - enum ice_vsi_type type; - u32 flags; -}; - -/** - * ice_vsi_to_params - Get parameters for an existing VSI - * @vsi: the VSI to get parameters for - * - * Fill a parameter structure for reconfiguring a VSI with its current - * parameters, such as during a rebuild operation. - */ -static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi) -{ - struct ice_vsi_cfg_params params = {}; - - params.pi = vsi->port_info; - params.ch = vsi->ch; - params.vf = vsi->vf; - params.type = vsi->type; - - return params; -} - const char *ice_vsi_type_str(enum ice_vsi_type vsi_type); bool ice_pf_state_is_nominal(struct ice_pf *pf); @@ -66,7 +29,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi); -void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); +void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); +void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable); int ice_set_link(struct ice_vsi *vsi, bool ena); @@ -81,15 +45,10 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); -void -ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi); - -void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); - -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector); - void ice_vsi_set_napi_queues(struct ice_vsi *vsi); +void ice_napi_add(struct ice_vsi *vsi); + +void ice_vsi_clear_napi_queues(struct ice_vsi *vsi); int ice_vsi_release(struct ice_vsi *vsi); @@ -101,7 +60,9 @@ void ice_vsi_decfg(struct ice_vsi *vsi); void ice_dis_vsi(struct ice_vsi *vsi, bool locked); int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags); -int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params); +int ice_vsi_cfg(struct ice_vsi *vsi); +struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf); +void ice_vsi_free(struct ice_vsi *vsi); bool ice_is_reset_in_progress(unsigned long *state); int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout); @@ -128,10 +89,9 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl); void ice_write_itr(struct ice_ring_container *rc, u16 itr); void ice_set_q_vector_intrl(struct ice_q_vector *q_vector); -int ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); - bool ice_is_safe_mode(struct ice_pf *pf); bool ice_is_rdma_ena(struct ice_pf *pf); +bool ice_is_recovery_mode(struct ice_hw *hw); bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi); bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi); int ice_set_dflt_vsi(struct ice_vsi *vsi); @@ -146,10 +106,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *)) void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx); void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx); - -void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx); int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set); int ice_vsi_add_vlan_zero(struct ice_vsi *vsi); int ice_vsi_del_vlan_zero(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 33a164fa325a..20d3baf955e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -13,7 +13,9 @@ #include "ice_fltr.h" #include "ice_dcb_lib.h" #include "ice_dcb_nl.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" +#include "devlink/port.h" +#include "ice_sf_eth.h" #include "ice_hwmon.h" /* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the * ice tracepoint functions. This must be done exactly once across the @@ -34,8 +36,8 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; #define ICE_DDP_PKG_PATH "intel/ice/ddp/" #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" -MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS("LIBIE"); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(ICE_DDP_PKG_FILE); @@ -85,7 +87,8 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch, bool netif_is_ice(const struct net_device *dev) { - return dev && (dev->netdev_ops == &ice_netdev_ops); + return dev && (dev->netdev_ops == &ice_netdev_ops || + dev->netdev_ops == &ice_netdev_safe_mode_ops); } /** @@ -519,25 +522,6 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked) } /** - * ice_clear_sw_switch_recipes - clear switch recipes - * @pf: board private structure - * - * Mark switch recipes as not created in sw structures. There are cases where - * rules (especially advanced rules) need to be restored, either re-read from - * hardware or added again. For example after the reset. 'recp_created' flag - * prevents from doing that and need to be cleared upfront. - */ -static void ice_clear_sw_switch_recipes(struct ice_pf *pf) -{ - struct ice_sw_recipe *recp; - u8 i; - - recp = pf->hw.switch_info->recp_list; - for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) - recp[i].recp_created = false; -} - -/** * ice_prepare_for_reset - prep for reset * @pf: board private structure * @reset_type: reset type requested @@ -558,6 +542,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) return; + synchronize_irq(pf->oicr_irq.virq); + ice_unplug_aux_dev(pf); /* Notify VFs of impending reset */ @@ -571,8 +557,9 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) mutex_unlock(&pf->vfs.table_lock); if (ice_is_eswitch_mode_switchdev(pf)) { - if (reset_type != ICE_RESET_PFR) - ice_clear_sw_switch_recipes(pf); + rtnl_lock(); + ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); + rtnl_unlock(); } /* release ADQ specific HW and SW resources */ @@ -605,11 +592,15 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); } } + + if (vsi->netdev) + netif_device_detach(vsi->netdev); skip: /* clear SW filtering DB */ ice_clear_hw_tbls(hw); /* disable the VSIs and their queues that are not already DOWN */ + set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); ice_pf_dis_all_vsi(pf, false); if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) @@ -621,7 +612,7 @@ skip: if (hw->port_info) ice_sched_clear_port(hw->port_info); - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_PREPARED_FOR_RESET, pf->state); } @@ -803,6 +794,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup) } switch (vsi->port_info->phy.link_info.link_speed) { + case ICE_AQ_LINK_SPEED_200GB: + speed = "200 G"; + break; case ICE_AQ_LINK_SPEED_100GB: speed = "100 G"; break; @@ -1150,7 +1144,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up, if (link_up == old_link && link_speed == old_link_speed) return 0; - ice_ptp_link_change(pf, pf->hw.pf_id, link_up); + ice_ptp_link_change(pf, link_up); if (ice_is_dcb_active(pf)) { if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) @@ -1552,12 +1546,20 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) ice_vf_lan_overflow_event(pf, &event); break; case ice_mbx_opc_send_msg_to_pf: - data.num_msg_proc = i; - data.num_pending_arq = pending; - data.max_num_msgs_mbx = hw->mailboxq.num_rq_entries; - data.async_watermark_val = ICE_MBX_OVERFLOW_WATERMARK; + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) { + ice_vc_process_vf_msg(pf, &event, NULL); + ice_mbx_vf_dec_trig_e830(hw, &event); + } else { + u16 val = hw->mailboxq.num_rq_entries; + + data.max_num_msgs_mbx = val; + val = ICE_MBX_OVERFLOW_WATERMARK; + data.async_watermark_val = val; + data.num_msg_proc = i; + data.num_pending_arq = pending; - ice_vc_process_vf_msg(pf, &event, &data); + ice_vc_process_vf_msg(pf, &event, &data); + } break; case ice_aqc_opc_fw_logs_event: ice_get_fwlog_data(pf, &event); @@ -1565,6 +1567,9 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type) case ice_aqc_opc_lldp_set_mib_change: ice_dcb_process_lldp_set_mib_change(pf, &event); break; + case ice_aqc_opc_get_health_status: + ice_process_health_status_event(pf, &event); + break; default: dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n", qtype, opcode); @@ -1712,7 +1717,7 @@ static int ice_service_task_stop(struct ice_pf *pf) ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); if (pf->serv_tmr.function) - del_timer_sync(&pf->serv_tmr); + timer_delete_sync(&pf->serv_tmr); if (pf->serv_task.func) cancel_work_sync(&pf->serv_task); @@ -1745,6 +1750,39 @@ static void ice_service_timer(struct timer_list *t) } /** + * ice_mdd_maybe_reset_vf - reset VF after MDD event + * @pf: pointer to the PF structure + * @vf: pointer to the VF structure + * @reset_vf_tx: whether Tx MDD has occurred + * @reset_vf_rx: whether Rx MDD has occurred + * + * Since the queue can get stuck on VF MDD events, the PF can be configured to + * automatically reset the VF by enabling the private ethtool flag + * mdd-auto-reset-vf. + */ +static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf, + bool reset_vf_tx, bool reset_vf_rx) +{ + struct device *dev = ice_pf_to_dev(pf); + + if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) + return; + + /* VF MDD event counters will be cleared by reset, so print the event + * prior to reset. + */ + if (reset_vf_tx) + ice_print_vf_tx_mdd_event(vf); + + if (reset_vf_rx) + ice_print_vf_rx_mdd_event(vf); + + dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", + pf->hw.pf_id, vf->vf_id); + ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK); +} + +/** * ice_handle_mdd_event - handle malicious driver detect event * @pf: pointer to the PF structure * @@ -1781,6 +1819,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num, + event, queue); wr32(hw, GL_MDET_TX_PQM, 0xffffffff); } @@ -1794,6 +1834,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num, + event, queue); wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX); } @@ -1807,6 +1849,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_rx_err(pf)) dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n", event, queue, pf_num, vf_num); + ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event, + queue); wr32(hw, GL_MDET_RX, 0xffffffff); } @@ -1837,6 +1881,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) */ mutex_lock(&pf->vfs.table_lock); ice_for_each_vf(pf, bkt, vf) { + bool reset_vf_tx = false, reset_vf_rx = false; + reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); if (reg & VP_MDET_TX_PQM_VALID_M) { wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); @@ -1845,6 +1891,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); @@ -1855,6 +1903,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); @@ -1865,6 +1915,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf) if (netif_msg_tx_err(pf)) dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n", vf->vf_id); + + reset_vf_tx = true; } reg = rd32(hw, VP_MDET_RX(vf->vf_id)); @@ -1876,18 +1928,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf) dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n", vf->vf_id); - /* Since the queue is disabled on VF Rx MDD events, the - * PF can be configured to reset the VF through ethtool - * private flag mdd-auto-reset-vf. - */ - if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) { - /* VF MDD event counters will be cleared by - * reset, so print the event prior to reset. - */ - ice_print_vf_rx_mdd_event(vf); - ice_reset_vf(vf, ICE_VF_RESET_LOCK); - } + reset_vf_rx = true; } + + if (reset_vf_tx || reset_vf_rx) + ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx, + reset_vf_rx); } mutex_unlock(&pf->vfs.table_lock); @@ -2318,6 +2364,18 @@ static void ice_check_media_subtask(struct ice_pf *pf) } } +static void ice_service_task_recovery_mode(struct work_struct *work) +{ + struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); + + set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); + ice_clean_adminq_subtask(pf); + + ice_service_task_complete(pf); + + mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); +} + /** * ice_service_task - manage and run subtasks * @work: pointer to work_struct contained by the PF struct @@ -2327,9 +2385,11 @@ static void ice_service_task(struct work_struct *work) struct ice_pf *pf = container_of(work, struct ice_pf, serv_task); unsigned long start_time = jiffies; - /* subtasks */ + if (pf->health_reporters.tx_hang_buf.tx_ring) { + ice_report_tx_hang(pf); + pf->health_reporters.tx_hang_buf.tx_ring = NULL; + } - /* process reset requests first */ ice_reset_subtask(pf); /* bail if a reset/recovery cycle is pending or rebuild failed */ @@ -2341,11 +2401,11 @@ static void ice_service_task(struct work_struct *work) } if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { - struct iidc_event *event; + struct iidc_rdma_event *event; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event) { - set_bit(IIDC_EVENT_CRIT_ERR, event->type); + set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type); /* report the entire OICR value to AUX driver */ swap(event->reg, pf->oicr_err_reg); ice_send_event_to_aux(pf, event); @@ -2364,11 +2424,11 @@ static void ice_service_task(struct work_struct *work) ice_plug_aux_dev(pf); if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { - struct iidc_event *event; + struct iidc_rdma_event *event; event = kzalloc(sizeof(*event), GFP_KERNEL); if (event) { - set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); + set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type); ice_send_event_to_aux(pf, event); kfree(event); } @@ -2468,34 +2528,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset) } /** - * ice_irq_affinity_notify - Callback for affinity changes - * @notify: context as to what irq was changed - * @mask: the new affinity mask - * - * This is a callback function used by the irq_set_affinity_notifier function - * so that we may register to receive changes to the irq affinity masks. - */ -static void -ice_irq_affinity_notify(struct irq_affinity_notify *notify, - const cpumask_t *mask) -{ - struct ice_q_vector *q_vector = - container_of(notify, struct ice_q_vector, affinity_notify); - - cpumask_copy(&q_vector->affinity_mask, mask); -} - -/** - * ice_irq_affinity_release - Callback for affinity notifier release - * @ref: internal core kernel usage - * - * This is a callback function used by the irq_set_affinity_notifier function - * to inform the current notification subscriber that they will no longer - * receive notifications. - */ -static void ice_irq_affinity_release(struct kref __always_unused *ref) {} - -/** * ice_vsi_ena_irq - Enable IRQ for the given VSI * @vsi: the VSI being configured */ @@ -2558,19 +2590,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) err); goto free_q_irqs; } - - /* register for affinity change notifications */ - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) { - struct irq_affinity_notify *affinity_notify; - - affinity_notify = &q_vector->affinity_notify; - affinity_notify->notify = ice_irq_affinity_notify; - affinity_notify->release = ice_irq_affinity_release; - irq_set_affinity_notifier(irq_num, affinity_notify); - } - - /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); } err = ice_set_cpu_rx_rmap(vsi); @@ -2586,9 +2605,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) free_q_irqs: while (vector--) { irq_num = vsi->q_vectors[vector]->irq.virq; - if (!IS_ENABLED(CONFIG_RFS_ACCEL)) - irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } return err; @@ -2670,17 +2686,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) bpf_prog_put(old_prog); } +static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid) +{ + struct ice_q_vector *q_vector; + struct ice_tx_ring *ring; + + if (static_key_enabled(&ice_xdp_locking_key)) + return vsi->xdp_rings[qid % vsi->num_xdp_txq]; + + q_vector = vsi->rx_rings[qid]->q_vector; + ice_for_each_tx_ring(ring, q_vector->tx) + if (ice_ring_is_xdp(ring)) + return ring; + + return NULL; +} + +/** + * ice_map_xdp_rings - Map XDP rings to interrupt vectors + * @vsi: the VSI with XDP rings being configured + * + * Map XDP rings to interrupt vectors and perform the configuration steps + * dependent on the mapping. + */ +void ice_map_xdp_rings(struct ice_vsi *vsi) +{ + int xdp_rings_rem = vsi->num_xdp_txq; + int v_idx, q_idx; + + /* follow the logic from ice_vsi_map_rings_to_vectors */ + ice_for_each_q_vector(vsi, v_idx) { + struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; + int xdp_rings_per_v, q_id, q_base; + + xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, + vsi->num_q_vectors - v_idx); + q_base = vsi->num_xdp_txq - xdp_rings_rem; + + for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { + struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; + + xdp_ring->q_vector = q_vector; + xdp_ring->next = q_vector->tx.tx_ring; + q_vector->tx.tx_ring = xdp_ring; + } + xdp_rings_rem -= xdp_rings_per_v; + } + + ice_for_each_rxq(vsi, q_idx) { + vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, + q_idx); + ice_tx_xsk_pool(vsi, q_idx); + } +} + /** * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * @vsi: VSI to bring up Tx rings used by XDP * @prog: bpf program that will be assigned to VSI + * @cfg_type: create from scratch or restore the existing configuration * * Return 0 on success and negative value on error */ -int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) +int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, + enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; - int xdp_rings_rem = vsi->num_xdp_txq; struct ice_pf *pf = vsi->back; struct ice_qs_cfg xdp_qs_cfg = { .qs_mutex = &pf->avail_q_mutex, @@ -2693,8 +2764,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) .mapping_mode = ICE_VSI_MAP_CONTIG }; struct device *dev; - int i, v_idx; - int status; + int status, i; dev = ice_pf_to_dev(pf); vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, @@ -2713,49 +2783,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog) if (ice_xdp_alloc_setup_rings(vsi)) goto clear_xdp_rings; - /* follow the logic from ice_vsi_map_rings_to_vectors */ - ice_for_each_q_vector(vsi, v_idx) { - struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; - int xdp_rings_per_v, q_id, q_base; - - xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem, - vsi->num_q_vectors - v_idx); - q_base = vsi->num_xdp_txq - xdp_rings_rem; - - for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) { - struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; - - xdp_ring->q_vector = q_vector; - xdp_ring->next = q_vector->tx.tx_ring; - q_vector->tx.tx_ring = xdp_ring; - } - xdp_rings_rem -= xdp_rings_per_v; - } - - ice_for_each_rxq(vsi, i) { - if (static_key_enabled(&ice_xdp_locking_key)) { - vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq]; - } else { - struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector; - struct ice_tx_ring *ring; - - ice_for_each_tx_ring(ring, q_vector->tx) { - if (ice_ring_is_xdp(ring)) { - vsi->rx_rings[i]->xdp_ring = ring; - break; - } - } - } - ice_tx_xsk_pool(vsi, i); - } - /* omit the scheduler update if in reset path; XDP queues will be * taken into account at the end of ice_vsi_rebuild, where * ice_cfg_vsi_lan is being called */ - if (ice_is_reset_in_progress(pf->state)) + if (cfg_type == ICE_XDP_CFG_PART) return 0; + ice_map_xdp_rings(vsi); + /* tell the Tx scheduler that right now we have * additional queues */ @@ -2805,22 +2841,21 @@ err_map_xdp: /** * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings * @vsi: VSI to remove XDP rings + * @cfg_type: disable XDP permanently or allow it to be restored later * * Detach XDP rings from irq vectors, clean up the PF bitmap and free * resources */ -int ice_destroy_xdp_rings(struct ice_vsi *vsi) +int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; struct ice_pf *pf = vsi->back; int i, v_idx; /* q_vectors are freed in reset path so there's no point in detaching - * rings; in case of rebuild being triggered not from reset bits - * in pf->state won't be set, so additionally check first q_vector - * against NULL + * rings */ - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) goto free_qmap; ice_for_each_q_vector(vsi, v_idx) { @@ -2861,7 +2896,7 @@ free_qmap: if (static_key_enabled(&ice_xdp_locking_key)) static_branch_dec(&ice_xdp_locking_key); - if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0]) + if (cfg_type == ICE_XDP_CFG_PART) return 0; ice_vsi_assign_bpf_prog(vsi, NULL); @@ -2890,7 +2925,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi) ice_for_each_rxq(vsi, i) { struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; - if (rx_ring->xsk_pool) + if (READ_ONCE(rx_ring->xsk_pool)) napi_schedule(&rx_ring->q_vector->napi); } } @@ -2910,6 +2945,9 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi) if (avail < cpus / 2) return -ENOMEM; + if (vsi->type == ICE_VSI_SF) + avail = vsi->alloc_txq; + vsi->num_xdp_txq = min_t(u16, avail, cpus); if (vsi->num_xdp_txq < cpus) @@ -2941,8 +2979,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, struct netlink_ext_ack *extack) { unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; - bool if_running = netif_running(vsi->netdev); int ret = 0, xdp_ring_err = 0; + bool if_running; if (prog && !prog->aux->xdp_has_frags) { if (frame_size > ice_max_xdp_frame_size(vsi)) { @@ -2953,13 +2991,17 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, } /* hot swap progs and avoid toggling link */ - if (ice_is_xdp_ena_vsi(vsi) == !!prog) { + if (ice_is_xdp_ena_vsi(vsi) == !!prog || + test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { ice_vsi_assign_bpf_prog(vsi, prog); return 0; } + if_running = netif_running(vsi->netdev) && + !test_and_set_bit(ICE_VSI_DOWN, vsi->state); + /* need to stop netdev while setting up the program for Rx rings */ - if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { + if (if_running) { ret = ice_down(vsi); if (ret) { NL_SET_ERR_MSG_MOD(extack, "Preparing device for XDP attach failed"); @@ -2972,7 +3014,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, if (xdp_ring_err) { NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); } else { - xdp_ring_err = ice_prepare_xdp_rings(vsi, prog); + xdp_ring_err = ice_prepare_xdp_rings(vsi, prog, + ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); } @@ -2983,7 +3026,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog, NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed"); } else if (ice_is_xdp_ena_vsi(vsi) && !prog) { xdp_features_clear_redirect_target(vsi->netdev); - xdp_ring_err = ice_destroy_xdp_rings(vsi); + xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL); if (xdp_ring_err) NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed"); /* reallocate Rx queues that were used for zero-copy */ @@ -3020,25 +3063,32 @@ static int ice_xdp_safe_mode(struct net_device __always_unused *dev, * @dev: netdevice * @xdp: XDP command */ -static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) +int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_vsi *vsi = np->vsi; + int ret; - if (vsi->type != ICE_VSI_PF) { - NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF VSI"); + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { + NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); return -EINVAL; } + mutex_lock(&vsi->xdp_state_lock); + switch (xdp->command) { case XDP_SETUP_PROG: - return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); + break; case XDP_SETUP_XSK_POOL: - return ice_xsk_pool_setup(vsi, xdp->xsk.pool, - xdp->xsk.queue_id); + ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); + break; default: - return -EINVAL; + ret = -EINVAL; } + + mutex_unlock(&vsi->xdp_state_lock); + return ret; } /** @@ -3210,22 +3260,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) if (oicr & PFINT_OICR_TSYN_TX_M) { ena_mask &= ~PFINT_OICR_TSYN_TX_M; - if (ice_pf_state_is_nominal(pf) && - pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { - struct ice_ptp_tx *tx = &pf->ptp.port.tx; - unsigned long flags; - u8 idx; - - spin_lock_irqsave(&tx->lock, flags); - idx = find_next_bit_wrap(tx->in_use, tx->len, - tx->last_ll_ts_idx_read + 1); - if (idx != tx->len) - ice_ptp_req_tx_single_tstamp(tx, idx); - spin_unlock_irqrestore(&tx->lock, flags); - } else if (ice_ptp_pf_handles_tx_interrupt(pf)) { - set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); - ret = IRQ_WAKE_THREAD; - } + + ret = ice_ptp_ts_irq(pf); } if (oicr & PFINT_OICR_TSYN_EVNT_M) { @@ -3480,28 +3516,6 @@ skip_req_irq: } /** - * ice_napi_add - register NAPI handler for the VSI - * @vsi: VSI for which NAPI handler is to be registered - * - * This function is only called in the driver's load path. Registering the NAPI - * handler is done in ice_vsi_alloc_q_vector() for all other cases (i.e. resume, - * reset/rebuild, etc.) - */ -static void ice_napi_add(struct ice_vsi *vsi) -{ - int v_idx; - - if (!vsi->netdev) - return; - - ice_for_each_q_vector(vsi, v_idx) { - netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, - ice_napi_poll); - __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); - } -} - -/** * ice_set_ops - set netdev and ethtools ops for the given netdev * @vsi: the VSI associated with the new netdev */ @@ -3534,7 +3548,7 @@ static void ice_set_ops(struct ice_vsi *vsi) * ice_set_netdev_features - set features for the given netdev * @netdev: netdev instance */ -static void ice_set_netdev_features(struct net_device *netdev) +void ice_set_netdev_features(struct net_device *netdev) { struct ice_pf *pf = ice_netdev_to_pf(netdev); bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); @@ -3617,6 +3631,15 @@ static void ice_set_netdev_features(struct net_device *netdev) */ netdev->hw_features |= NETIF_F_RXFCS; + /* Allow core to manage IRQs affinity */ + netif_set_affinity_auto(netdev); + + /* Mutual exclusivity for TSO and GCS is enforced by the set features + * ndo callback. + */ + if (ice_is_feature_supported(pf, ICE_F_GCS)) + netdev->hw_features |= NETIF_F_HW_CSUM; + netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE); } @@ -3648,7 +3671,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_PF; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -3661,7 +3684,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_CHNL; - params.pi = pi; + params.port_info = pi; params.ch = ch; params.flags = ICE_VSI_FLAG_INIT; @@ -3682,7 +3705,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_CTRL; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -3702,7 +3725,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) struct ice_vsi_cfg_params params = {}; params.type = ICE_VSI_LB; - params.pi = pi; + params.port_info = pi; params.flags = ICE_VSI_FLAG_INIT; return ice_vsi_setup(pf, ¶ms); @@ -3716,8 +3739,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi) * * net_device_ops implementation for adding VLAN IDs */ -static int -ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +int ice_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi_vlan_ops *vlan_ops; @@ -3779,8 +3801,7 @@ finish: * * net_device_ops implementation for removing VLAN IDs */ -static int -ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +int ice_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi_vlan_ops *vlan_ops; @@ -3949,6 +3970,9 @@ static void ice_deinit_pf(struct ice_pf *pf) if (pf->ptp.clock) ptp_clock_unregister(pf->ptp.clock); + + xa_destroy(&pf->dyn_ports); + xa_destroy(&pf->sf_nums); } /** @@ -3993,8 +4017,7 @@ static void ice_set_pf_caps(struct ice_pf *pf) } clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); - if (func_caps->common_cap.ieee_1588 && - !(pf->hw.mac_type == ICE_MAC_E830)) + if (func_caps->common_cap.ieee_1588) set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); pf->max_pf_txqs = func_caps->common_cap.num_txq; @@ -4040,7 +4063,14 @@ static int ice_init_pf(struct ice_pf *pf) mutex_init(&pf->vfs.table_lock); hash_init(pf->vfs.table); - ice_mbx_init_snapshot(&pf->hw); + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, + ICE_MBX_OVERFLOW_WATERMARK); + else + ice_mbx_init_snapshot(&pf->hw); + + xa_init(&pf->dyn_ports); + xa_init(&pf->sf_nums); return 0; } @@ -4079,7 +4109,7 @@ bool ice_is_wol_supported(struct ice_hw *hw) int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) { struct ice_pf *pf = vsi->back; - int err = 0, timeout = 50; + int i, err = 0, timeout = 50; if (!new_rx && !new_tx) return -EINVAL; @@ -4098,15 +4128,32 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) /* set for the next time the netdev is started */ if (!netif_running(vsi->netdev)) { - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); goto done; } ice_vsi_close(vsi); - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; + + ice_for_each_traffic_class(i) { + if (vsi->tc_cfg.ena_tc & BIT(i)) + netdev_set_tc_queue(vsi->netdev, + vsi->tc_cfg.tc_info[i].netdev_tc, + vsi->tc_cfg.tc_info[i].qcount_tx, + vsi->tc_cfg.tc_info[i].qoffset); + } ice_pf_dcb_recfg(pf, locked); ice_vsi_open(vsi); + goto done; + +rebuild_err: + dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", + err); done: clear_bit(ICE_CFG_BUSY, pf->state); return err; @@ -4417,11 +4464,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf) /** * ice_request_fw - Device initialization routine * @pf: pointer to the PF instance + * @firmware: double pointer to firmware struct + * + * Return: zero when successful, negative values otherwise. */ -static void ice_request_fw(struct ice_pf *pf) +static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware) { char *opt_fw_filename = ice_get_opt_fw_name(pf); - const struct firmware *firmware = NULL; struct device *dev = ice_pf_to_dev(pf); int err = 0; @@ -4430,29 +4479,120 @@ static void ice_request_fw(struct ice_pf *pf) * and warning messages for other errors. */ if (opt_fw_filename) { - err = firmware_request_nowarn(&firmware, opt_fw_filename, dev); - if (err) { - kfree(opt_fw_filename); - goto dflt_pkg_load; - } - - /* request for firmware was successful. Download to device */ - ice_load_pkg(firmware, pf); + err = firmware_request_nowarn(firmware, opt_fw_filename, dev); kfree(opt_fw_filename); - release_firmware(firmware); - return; + if (!err) + return err; } + err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev); + if (err) + dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); -dflt_pkg_load: - err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev); + return err; +} + +/** + * ice_init_tx_topology - performs Tx topology initialization + * @hw: pointer to the hardware structure + * @firmware: pointer to firmware structure + * + * Return: zero when init was successful, negative values otherwise. + */ +static int +ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) +{ + u8 num_tx_sched_layers = hw->num_tx_sched_layers; + struct ice_pf *pf = hw->back; + struct device *dev; + int err; + + dev = ice_pf_to_dev(pf); + err = ice_cfg_tx_topo(hw, firmware->data, firmware->size); + if (!err) { + if (hw->num_tx_sched_layers > num_tx_sched_layers) + dev_info(dev, "Tx scheduling layers switching feature disabled\n"); + else + dev_info(dev, "Tx scheduling layers switching feature enabled\n"); + /* if there was a change in topology ice_cfg_tx_topo triggered + * a CORER and we need to re-init hw + */ + ice_deinit_hw(hw); + err = ice_init_hw(hw); + + return err; + } else if (err == -EIO) { + dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n"); + } + + return 0; +} + +/** + * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs + * @hw: pointer to the hardware structure + * @pf: pointer to pf structure + * + * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor + * formats the PF hardware supports. The exact list of supported RXDIDs + * depends on the loaded DDP package. The IDs can be determined by reading the + * GLFLXP_RXDID_FLAGS register after the DDP package is loaded. + * + * Note that the legacy 32-byte RXDID 0 is always supported but is not listed + * in the DDP package. The 16-byte legacy descriptor is never supported by + * VFs. + */ +static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf) +{ + pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); + + for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { + u32 regval; + + regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); + if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) + & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) + pf->supported_rxdids |= BIT(i); + } +} + +/** + * ice_init_ddp_config - DDP related configuration + * @hw: pointer to the hardware structure + * @pf: pointer to pf structure + * + * This function loads DDP file from the disk, then initializes Tx + * topology. At the end DDP package is loaded on the card. + * + * Return: zero when init was successful, negative values otherwise. + */ +static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + const struct firmware *firmware = NULL; + int err; + + err = ice_request_fw(pf, &firmware); if (err) { - dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n"); - return; + dev_err(dev, "Fail during requesting FW: %d\n", err); + return err; + } + + err = ice_init_tx_topology(hw, firmware); + if (err) { + dev_err(dev, "Fail during initialization of Tx topology: %d\n", + err); + release_firmware(firmware); + return err; } - /* request for firmware was successful. Download to device */ + /* Download firmware to device */ ice_load_pkg(firmware, pf); release_firmware(firmware); + + /* Initialize the supported Rx descriptor IDs after loading DDP */ + ice_init_supported_rxdids(hw, pf); + + return 0; } /** @@ -4574,64 +4714,21 @@ static void ice_decfg_netdev(struct ice_vsi *vsi) vsi->netdev = NULL; } -/** - * ice_wait_for_fw - wait for full FW readiness - * @hw: pointer to the hardware structure - * @timeout: milliseconds that can elapse before timing out - */ -static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout) -{ - int fw_loading; - u32 elapsed = 0; - - while (elapsed <= timeout) { - fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M; - - /* firmware was not yet loaded, we have to wait more */ - if (fw_loading) { - elapsed += 100; - msleep(100); - continue; - } - return 0; - } - - return -ETIMEDOUT; -} - int ice_init_dev(struct ice_pf *pf) { struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int err; - err = ice_init_hw(hw); - if (err) { - dev_err(dev, "ice_init_hw failed: %d\n", err); - return err; - } - - /* Some cards require longer initialization times - * due to necessity of loading FW from an external source. - * This can take even half a minute. - */ - if (ice_is_pf_c827(hw)) { - err = ice_wait_for_fw(hw, 30000); - if (err) { - dev_err(dev, "ice_wait_for_fw timed out"); - return err; - } - } - ice_init_feature_support(pf); - ice_request_fw(pf); + err = ice_init_ddp_config(hw, pf); - /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be + /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be * set in pf->state, which will cause ice_is_safe_mode to return * true */ - if (ice_is_safe_mode(pf)) { + if (err || ice_is_safe_mode(pf)) { /* we already got function/device capabilities but these don't * reflect what the driver needs to do in safe mode. Instead of * adding conditional logic everywhere to ignore these @@ -4643,7 +4740,7 @@ int ice_init_dev(struct ice_pf *pf) err = ice_init_pf(pf); if (err) { dev_err(dev, "ice_init_pf failed: %d\n", err); - goto err_init_pf; + return err; } pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; @@ -4667,7 +4764,7 @@ int ice_init_dev(struct ice_pf *pf) if (err) { dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err); err = -EIO; - goto err_init_interrupt_scheme; + goto unroll_pf_init; } /* In case of MSIX we are going to setup the misc vector right here @@ -4678,17 +4775,15 @@ int ice_init_dev(struct ice_pf *pf) err = ice_req_irq_msix_misc(pf); if (err) { dev_err(dev, "setup of misc vector failed: %d\n", err); - goto err_req_irq_msix_misc; + goto unroll_irq_scheme_init; } return 0; -err_req_irq_msix_misc: +unroll_irq_scheme_init: ice_clear_interrupt_scheme(pf); -err_init_interrupt_scheme: +unroll_pf_init: ice_deinit_pf(pf); -err_init_pf: - ice_deinit_hw(hw); return err; } @@ -4921,12 +5016,14 @@ static int ice_init_devlink(struct ice_pf *pf) ice_devlink_init_regions(pf); ice_devlink_register(pf); + ice_health_init(pf); return 0; } static void ice_deinit_devlink(struct ice_pf *pf) { + ice_health_deinit(pf); ice_devlink_unregister(pf); ice_devlink_destroy_regions(pf); ice_devlink_unregister_params(pf); @@ -4940,6 +5037,12 @@ static int ice_init(struct ice_pf *pf) if (err) return err; + if (pf->hw.mac_type == ICE_MAC_E830) { + err = pci_enable_ptm(pf->pdev, NULL); + if (err) + dev_dbg(ice_pf_to_dev(pf), "PCIe PTM not supported by PCIe bus/controller\n"); + } + err = ice_alloc_vsis(pf); if (err) goto err_alloc_vsis; @@ -5039,11 +5142,12 @@ int ice_load(struct ice_pf *pf) ice_napi_add(vsi); + ice_init_features(pf); + err = ice_init_rdma(pf); if (err) goto err_init_rdma; - ice_init_features(pf); ice_service_task_restart(pf); clear_bit(ICE_DOWN, pf->state); @@ -5051,6 +5155,7 @@ int ice_load(struct ice_pf *pf) return 0; err_init_rdma: + ice_deinit_features(pf); ice_tc_indir_block_unregister(vsi); err_tc_indir_block_register: ice_unregister_netdev(vsi); @@ -5074,14 +5179,44 @@ void ice_unload(struct ice_pf *pf) devl_assert_locked(priv_to_devlink(pf)); - ice_deinit_features(pf); ice_deinit_rdma(pf); + ice_deinit_features(pf); ice_tc_indir_block_unregister(vsi); ice_unregister_netdev(vsi); ice_devlink_destroy_pf_port(pf); ice_decfg_netdev(vsi); } +static int ice_probe_recovery_mode(struct ice_pf *pf) +{ + struct device *dev = ice_pf_to_dev(pf); + int err; + + dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n"); + + INIT_HLIST_HEAD(&pf->aq_wait_list); + spin_lock_init(&pf->aq_wait_lock); + init_waitqueue_head(&pf->aq_wait_queue); + + timer_setup(&pf->serv_tmr, ice_service_timer, 0); + pf->serv_tmr_period = HZ; + INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); + clear_bit(ICE_SERVICE_SCHED, pf->state); + err = ice_create_all_ctrlq(&pf->hw); + if (err) + return err; + + scoped_guard(devl, priv_to_devlink(pf)) { + err = ice_init_devlink(pf); + if (err) + return err; + } + + ice_service_task_restart(pf); + + return 0; +} + /** * ice_probe - Device initialization routine * @pdev: PCI device information struct @@ -5093,6 +5228,7 @@ static int ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { struct device *dev = &pdev->dev; + struct ice_adapter *adapter; struct ice_pf *pf; struct ice_hw *hw; int err; @@ -5144,7 +5280,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) } pci_set_master(pdev); - pf->pdev = pdev; pci_set_drvdata(pdev, pf); set_bit(ICE_DOWN, pf->state); @@ -5173,30 +5308,47 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) hw->debug_mask = debug; #endif + if (ice_is_recovery_mode(hw)) + return ice_probe_recovery_mode(pf); + + err = ice_init_hw(hw); + if (err) { + dev_err(dev, "ice_init_hw failed: %d\n", err); + return err; + } + + adapter = ice_adapter_get(pdev); + if (IS_ERR(adapter)) { + err = PTR_ERR(adapter); + goto unroll_hw_init; + } + pf->adapter = adapter; + err = ice_init(pf); if (err) - goto err_init; + goto unroll_adapter; devl_lock(priv_to_devlink(pf)); err = ice_load(pf); - devl_unlock(priv_to_devlink(pf)); if (err) - goto err_load; + goto unroll_init; err = ice_init_devlink(pf); if (err) - goto err_init_devlink; + goto unroll_load; + devl_unlock(priv_to_devlink(pf)); return 0; -err_init_devlink: - devl_lock(priv_to_devlink(pf)); +unroll_load: ice_unload(pf); +unroll_init: devl_unlock(priv_to_devlink(pf)); -err_load: ice_deinit(pf); -err_init: - pci_disable_device(pdev); +unroll_adapter: + ice_adapter_put(pdev); +unroll_hw_init: + ice_deinit_hw(hw); return err; } @@ -5276,6 +5428,14 @@ static void ice_remove(struct pci_dev *pdev) msleep(100); } + if (ice_is_recovery_mode(&pf->hw)) { + ice_service_task_stop(pf); + scoped_guard(devl, priv_to_devlink(pf)) { + ice_deinit_devlink(pf); + } + return; + } + if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { set_bit(ICE_VF_RESETS_DISABLED, pf->state); ice_free_vfs(pf); @@ -5290,9 +5450,10 @@ static void ice_remove(struct pci_dev *pdev) if (!ice_is_safe_mode(pf)) ice_remove_arfs(pf); + devl_lock(priv_to_devlink(pf)); + ice_dealloc_all_dynamic_ports(pf); ice_deinit_devlink(pf); - devl_lock(priv_to_devlink(pf)); ice_unload(pf); devl_unlock(priv_to_devlink(pf)); @@ -5302,7 +5463,7 @@ static void ice_remove(struct pci_dev *pdev) ice_setup_mc_magic_wake(pf); ice_set_wake(pf); - pci_disable_device(pdev); + ice_adapter_put(pdev); } /** @@ -5321,7 +5482,6 @@ static void ice_shutdown(struct pci_dev *pdev) } } -#ifdef CONFIG_PM /** * ice_prepare_for_shutdown - prep for PCI shutdown * @pf: board private structure @@ -5346,7 +5506,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf) if (pf->vsi[v]) pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); } /** @@ -5383,7 +5543,9 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) if (ret) goto err_reinit; ice_vsi_map_rings_to_vectors(pf->vsi[v]); + rtnl_lock(); ice_vsi_set_napi_queues(pf->vsi[v]); + rtnl_unlock(); } ret = ice_req_irq_msix_misc(pf); @@ -5397,8 +5559,12 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) err_reinit: while (v--) - if (pf->vsi[v]) + if (pf->vsi[v]) { + rtnl_lock(); + ice_vsi_clear_napi_queues(pf->vsi[v]); + rtnl_unlock(); ice_vsi_free_q_vectors(pf->vsi[v]); + } return ret; } @@ -5410,7 +5576,7 @@ err_reinit: * Power Management callback to quiesce the device and prepare * for D3 transition. */ -static int __maybe_unused ice_suspend(struct device *dev) +static int ice_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct ice_pf *pf; @@ -5431,7 +5597,7 @@ static int __maybe_unused ice_suspend(struct device *dev) */ disabled = ice_service_task_stop(pf); - ice_unplug_aux_dev(pf); + ice_deinit_rdma(pf); /* Already suspended?, then there is nothing to do */ if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { @@ -5463,6 +5629,9 @@ static int __maybe_unused ice_suspend(struct device *dev) ice_for_each_vsi(pf, v) { if (!pf->vsi[v]) continue; + rtnl_lock(); + ice_vsi_clear_napi_queues(pf->vsi[v]); + rtnl_unlock(); ice_vsi_free_q_vectors(pf->vsi[v]); } ice_clear_interrupt_scheme(pf); @@ -5477,7 +5646,7 @@ static int __maybe_unused ice_suspend(struct device *dev) * ice_resume - PM callback for waking up from D3 * @dev: generic device information structure */ -static int __maybe_unused ice_resume(struct device *dev) +static int ice_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); enum ice_reset_req reset_type; @@ -5511,6 +5680,11 @@ static int __maybe_unused ice_resume(struct device *dev) if (ret) dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret); + ret = ice_init_rdma(pf); + if (ret) + dev_err(dev, "Reinitialize RDMA during resume failed: %d\n", + ret); + clear_bit(ICE_DOWN, pf->state); /* Now perform PF reset and rebuild */ reset_type = ICE_RESET_PFR; @@ -5528,7 +5702,6 @@ static int __maybe_unused ice_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM */ /** * ice_pci_err_detected - warning that PCI error has been detected @@ -5693,16 +5866,22 @@ static const struct pci_device_id ice_pci_tbl[] = { { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), }, { PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) }, - { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), }, + { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), }, /* required last entry */ {} }; MODULE_DEVICE_TABLE(pci, ice_pci_tbl); -static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume); static const struct pci_error_handlers ice_pci_err_handler = { .error_detected = ice_pci_err_detected, @@ -5717,9 +5896,7 @@ static struct pci_driver ice_driver = { .id_table = ice_pci_tbl, .probe = ice_probe, .remove = ice_remove, -#ifdef CONFIG_PM - .driver.pm = &ice_pm_ops, -#endif /* CONFIG_PM */ + .driver.pm = pm_sleep_ptr(&ice_pm_ops), .shutdown = ice_shutdown, .sriov_configure = ice_sriov_configure, .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix, @@ -5742,7 +5919,7 @@ static int __init ice_module_init(void) ice_adv_lnk_speed_maps_init(); - ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return status; @@ -5762,8 +5939,16 @@ static int __init ice_module_init(void) goto err_dest_lag_wq; } + status = ice_sf_driver_register(); + if (status) { + pr_err("Failed to register SF driver, err %d\n", status); + goto err_sf_driver; + } + return 0; +err_sf_driver: + pci_unregister_driver(&ice_driver); err_dest_lag_wq: destroy_workqueue(ice_lag_wq); ice_debugfs_exit(); @@ -5781,6 +5966,7 @@ module_init(ice_module_init); */ static void __exit ice_module_exit(void) { + ice_sf_driver_unregister(); pci_unregister_driver(&ice_driver); ice_debugfs_exit(); destroy_workqueue(ice_wq); @@ -5958,12 +6144,14 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate) * @addr: the MAC address entry being added * @vid: VLAN ID * @flags: instructions from stack about fdb operation + * @notified: whether notification was emitted * @extack: netlink extended ack */ static int ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], struct net_device *dev, const unsigned char *addr, u16 vid, - u16 flags, struct netlink_ext_ack __always_unused *extack) + u16 flags, bool *notified, + struct netlink_ext_ack __always_unused *extack) { int err; @@ -5997,12 +6185,14 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[], * @dev: the net device pointer * @addr: the MAC address entry being added * @vid: VLAN ID + * @notified: whether notification was emitted * @extack: netlink extended ack */ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - __always_unused u16 vid, struct netlink_ext_ack *extack) + __always_unused u16 vid, bool *notified, + struct netlink_ext_ack *extack) { int err; @@ -6206,10 +6396,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features) int err = 0; /* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking - * if either bit is set + * if either bit is set. In switchdev mode Rx filtering should never be + * enabled. */ - if (features & - (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) + if ((features & + (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) && + !ice_is_eswitch_mode_switchdev(vsi->back)) err = vlan_ops->ena_rx_filtering(vsi); else err = vlan_ops->dis_rx_filtering(vsi); @@ -6357,13 +6549,24 @@ ice_set_features(struct net_device *netdev, netdev_features_t features) if (changed & NETIF_F_HW_TC) { bool ena = !!(features & NETIF_F_HW_TC); - ena ? set_bit(ICE_FLAG_CLS_FLOWER, pf->flags) : - clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags); + assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); } if (changed & NETIF_F_LOOPBACK) ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK)); + /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS + * (NETIF_F_HW_CSUM) is not supported. + */ + if (ice_is_feature_supported(pf, ICE_F_GCS) && + ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) { + if (netdev->features & NETIF_F_HW_CSUM) + dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n"); + else + dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n"); + return -EIO; + } + return ret; } @@ -6582,11 +6785,12 @@ static int ice_up_complete(struct ice_vsi *vsi) if (vsi->port_info && (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && - vsi->netdev && vsi->type == ICE_VSI_PF) { + ((vsi->netdev && (vsi->type == ICE_VSI_PF || + vsi->type == ICE_VSI_SF)))) { ice_print_link_msg(vsi, true); netif_tx_start_all_queues(vsi->netdev); netif_carrier_on(vsi->netdev); - ice_ptp_link_change(pf, pf->hw.pf_id, true); + ice_ptp_link_change(pf, true); } /* Perform an initial read of the statistics registers now to @@ -6940,7 +7144,6 @@ void ice_update_pf_stats(struct ice_pf *pf) * @netdev: network interface device structure * @stats: main device statistics structure */ -static void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -7055,13 +7258,11 @@ int ice_down(struct ice_vsi *vsi) WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); - if (vsi->netdev && vsi->type == ICE_VSI_PF) { + if (vsi->netdev) { vlan_err = ice_vsi_del_vlan_zero(vsi); - ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false); + ice_ptp_link_change(vsi->back, false); netif_carrier_off(vsi->netdev); netif_tx_disable(vsi->netdev); - } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { - ice_eswitch_stop_all_tx_queues(vsi->back); } ice_vsi_dis_irq(vsi); @@ -7070,7 +7271,7 @@ int ice_down(struct ice_vsi *vsi) if (tx_err) netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", vsi->vsi_num, tx_err); - if (!tx_err && ice_is_xdp_ena_vsi(vsi)) { + if (!tx_err && vsi->xdp_rings) { tx_err = ice_vsi_stop_xdp_tx_rings(vsi); if (tx_err) netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", @@ -7087,7 +7288,7 @@ int ice_down(struct ice_vsi *vsi) ice_for_each_txq(vsi, i) ice_clean_tx_ring(vsi->tx_rings[i]); - if (ice_is_xdp_ena_vsi(vsi)) + if (vsi->xdp_rings) ice_for_each_xdp_txq(vsi, i) ice_clean_tx_ring(vsi->xdp_rings[i]); @@ -7283,7 +7484,7 @@ int ice_vsi_open(struct ice_vsi *vsi) ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); - if (vsi->type == ICE_VSI_PF) { + if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); if (err) @@ -7292,6 +7493,8 @@ int ice_vsi_open(struct ice_vsi *vsi) err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); if (err) goto err_set_qs; + + ice_vsi_set_napi_queues(vsi); } err = ice_up_complete(vsi); @@ -7429,6 +7632,7 @@ static void ice_update_pf_netdev_link(struct ice_pf *pf) */ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) { + struct ice_vsi *vsi = ice_get_main_vsi(pf); struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; bool dvm; @@ -7544,12 +7748,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - err = ice_eswitch_rebuild(pf); - if (err) { - dev_err(dev, "Switchdev rebuild failed: %d\n", err); - goto err_vsi_rebuild; - } - if (reset_type == ICE_RESET_PFR) { err = ice_rebuild_channels(pf); if (err) { @@ -7577,6 +7775,9 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) ice_rebuild_arfs(pf); } + if (vsi && vsi->netdev) + netif_device_attach(vsi->netdev); + ice_update_pf_netdev_link(pf); /* tell the firmware we are up */ @@ -7592,6 +7793,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) /* if we get here, reset flow is successful */ clear_bit(ICE_RESET_FAILED, pf->state); + ice_health_clear(pf); + ice_plug_aux_dev(pf); if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) ice_lag_rebuild(pf); @@ -7604,7 +7807,7 @@ err_vsi_rebuild: err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_RESET_FAILED, pf->state); clear_recovery: /* set this bit in PF state to control service task scheduling */ @@ -7619,7 +7822,7 @@ clear_recovery: * * Returns 0 on success, negative on failure */ -static int ice_change_mtu(struct net_device *netdev, int new_mtu) +int ice_change_mtu(struct net_device *netdev, int new_mtu) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_vsi *vsi = np->vsi; @@ -7666,7 +7869,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu) return -EBUSY; } - netdev->mtu = (unsigned int)new_mtu; + WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); err = ice_down_up(vsi); if (err) return err; @@ -7999,12 +8202,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { + __u16 mode = nla_get_u16(attr); - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - mode = nla_get_u16(attr); if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) return -EINVAL; /* Continue if bridge mode is not being flipped */ @@ -8046,7 +8246,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, * @netdev: network interface device structure * @txqueue: Tx queue */ -static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) +void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_tx_ring *tx_ring = NULL; @@ -8085,16 +8285,18 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) if (tx_ring) { struct ice_hw *hw = &pf->hw; - u32 head, val = 0; + u32 head, intr = 0; head = FIELD_GET(QTX_COMM_HEAD_HEAD_M, rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); /* Read interrupt register */ - val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); + intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, txqueue, tx_ring->next_to_clean, - head, tx_ring->next_to_use, val); + head, tx_ring->next_to_use, intr); + + ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); } pf->tx_timeout_last_recovery = jiffies; @@ -8128,11 +8330,16 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue) * @np: net device to configure * @filter_dev: device on which filter is added * @cls_flower: offload data + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added or deleted, + * negative error code otherwise. */ static int ice_setup_tc_cls_flower(struct ice_netdev_priv *np, struct net_device *filter_dev, - struct flow_cls_offload *cls_flower) + struct flow_cls_offload *cls_flower, + bool ingress) { struct ice_vsi *vsi = np->vsi; @@ -8141,7 +8348,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np, switch (cls_flower->command) { case FLOW_CLS_REPLACE: - return ice_add_cls_flower(filter_dev, vsi, cls_flower); + return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress); case FLOW_CLS_DESTROY: return ice_del_cls_flower(vsi, cls_flower); default: @@ -8150,20 +8357,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np, } /** - * ice_setup_tc_block_cb - callback handler registered for TC block + * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block * @type: TC SETUP type * @type_data: TC flower offload data that contains user input * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise. */ static int -ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data, + void *cb_priv) { struct ice_netdev_priv *np = cb_priv; switch (type) { case TC_SETUP_CLSFLOWER: return ice_setup_tc_cls_flower(np, np->vsi->netdev, - type_data); + type_data, true); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_setup_tc_block_cb_egress - callback handler for egress TC block + * @type: TC SETUP type + * @type_data: TC flower offload data that contains user input + * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise. + */ +static int +ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data, + void *cb_priv) +{ + struct ice_netdev_priv *np = cb_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return ice_setup_tc_cls_flower(np, np->vsi->netdev, + type_data, false); default: return -EOPNOTSUPP; } @@ -9108,27 +9341,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data) { struct ice_netdev_priv *np = netdev_priv(netdev); + enum flow_block_binder_type binder_type; + struct iidc_rdma_core_dev_info *cdev; struct ice_pf *pf = np->vsi->back; + flow_setup_cb_t *flower_handler; bool locked = false; int err; switch (type) { case TC_SETUP_BLOCK: + binder_type = + ((struct flow_block_offload *)type_data)->binder_type; + + switch (binder_type) { + case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: + flower_handler = ice_setup_tc_block_cb_ingress; + break; + case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: + flower_handler = ice_setup_tc_block_cb_egress; + break; + default: + return -EOPNOTSUPP; + } + return flow_block_cb_setup_simple(type_data, &ice_block_cb_list, - ice_setup_tc_block_cb, - np, np, true); + flower_handler, + np, np, false); case TC_SETUP_QDISC_MQPRIO: if (ice_is_eswitch_mode_switchdev(pf)) { netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n"); return -EOPNOTSUPP; } - if (pf->adev) { + cdev = pf->cdev_info; + if (cdev && cdev->adev) { mutex_lock(&pf->adev_mutex); - device_lock(&pf->adev->dev); + device_lock(&cdev->adev->dev); locked = true; - if (pf->adev->dev.driver) { + if (cdev->adev->dev.driver) { netdev_err(netdev, "Cannot change qdisc when RDMA is active\n"); err = -EBUSY; goto adev_unlock; @@ -9142,7 +9393,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type, adev_unlock: if (locked) { - device_unlock(&pf->adev->dev); + device_unlock(&cdev->adev->dev); mutex_unlock(&pf->adev_mutex); } return err; @@ -9178,7 +9429,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data, case TC_SETUP_CLSFLOWER: return ice_setup_tc_cls_flower(np, priv->netdev, (struct flow_cls_offload *) - type_data); + type_data, false); default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c index d4e05d2cb30c..59e8879ac059 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.c +++ b/drivers/net/ethernet/intel/ice/ice_nvm.c @@ -18,10 +18,9 @@ * * Read the NVM using the admin queue commands (0x0701) */ -static int -ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length, - void *data, bool last_command, bool read_shadow_ram, - struct ice_sq_cd *cd) +int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram, struct ice_sq_cd *cd) { struct ice_aq_desc desc; struct ice_aqc_nvm *cmd; @@ -375,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1 * * Read the specified word from the copy of the Shadow RAM found in the * specified NVM module. + * + * Note that the Shadow RAM copy is always located after the CSS header, and + * is aligned to 64-byte (32-word) offsets. */ static int ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data) { - return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data); + u32 sr_copy; + + switch (bank) { + case ICE_ACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32); + break; + case ICE_INACTIVE_FLASH_BANK: + sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32); + break; + } + + return ice_read_nvm_module(hw, bank, sr_copy + offset, data); } /** @@ -441,8 +454,7 @@ int ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, u16 module_type) { - u16 pfa_len, pfa_ptr; - u16 next_tlv; + u16 pfa_len, pfa_ptr, next_tlv, max_tlv; int status; status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr); @@ -455,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n"); return status; } + + /* The Preserved Fields Area contains a sequence of Type-Length-Value + * structures which define its contents. The PFA length includes all + * of the TLVs, plus the initial length word itself, *and* one final + * word at the end after all of the TLVs. + */ + if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) { + dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n", + pfa_ptr, pfa_len); + return -EINVAL; + } + /* Starting with first TLV after PFA length, iterate through the list * of TLVs to find the requested one. */ next_tlv = pfa_ptr + 1; - while (next_tlv < pfa_ptr + pfa_len) { + while (next_tlv < max_tlv) { u16 tlv_sub_module_type; u16 tlv_len; @@ -483,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len, } return -EINVAL; } - /* Check next TLV, i.e. current TLV pointer + length + 2 words - * (for current TLV's type and length) - */ - next_tlv = next_tlv + tlv_len + 2; + + if (check_add_overflow(next_tlv, 2, &next_tlv) || + check_add_overflow(next_tlv, tlv_len, &next_tlv)) { + dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n", + tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len); + return -EINVAL; + } } /* Module does not exist */ return -ENOENT; @@ -1011,6 +1038,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw) } /** + * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header + * @hw: pointer to the HW struct + * @bank: whether to read from the active or inactive flash bank + * @hdr_len: storage for header length in words + * + * Read the CSS header length from the NVM CSS header and add the Authentication + * header size, and then convert to words. + * + * Return: zero on success, or a negative error code on failure. + */ +static int +ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank, + u32 *hdr_len) +{ + u16 hdr_len_l, hdr_len_h; + u32 hdr_len_dword; + int status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L, + &hdr_len_l); + if (status) + return status; + + status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H, + &hdr_len_h); + if (status) + return status; + + /* CSS header length is in DWORD, so convert to words and add + * authentication header size + */ + hdr_len_dword = hdr_len_h << 16 | hdr_len_l; + *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN; + + return 0; +} + +/** + * ice_determine_css_hdr_len - Discover CSS header length for the device + * @hw: pointer to the HW struct + * + * Determine the size of the CSS header at the start of the NVM module. This + * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is + * always located just after the CSS header. + * + * Return: zero on success, or a negative error code on failure. + */ +static int ice_determine_css_hdr_len(struct ice_hw *hw) +{ + struct ice_bank_info *banks = &hw->flash.banks; + int status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK, + &banks->active_css_hdr_len); + if (status) + return status; + + status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK, + &banks->inactive_css_hdr_len); + if (status) + return status; + + return 0; +} + +/** * ice_init_nvm - initializes NVM setting * @hw: pointer to the HW struct * @@ -1056,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw) return status; } + status = ice_determine_css_hdr_len(hw); + if (status) { + ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n"); + return status; + } + status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm); if (status) { ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h index 774c2317967d..63cdc6bdac58 100644 --- a/drivers/net/ethernet/intel/ice/ice_nvm.h +++ b/drivers/net/ethernet/intel/ice/ice_nvm.h @@ -14,6 +14,9 @@ struct ice_orom_civd_info { int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access); void ice_release_nvm(struct ice_hw *hw); +int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, + u16 length, void *data, bool last_command, + bool read_shadow_ram, struct ice_sq_cd *cd); int ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data, bool read_shadow_ram); diff --git a/drivers/net/ethernet/intel/ice/ice_osdep.h b/drivers/net/ethernet/intel/ice/ice_osdep.h index a2562f04267f..b9f383494b3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_osdep.h +++ b/drivers/net/ethernet/intel/ice/ice_osdep.h @@ -12,6 +12,7 @@ #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <linux/if_ether.h> +#include <linux/iopoll.h> #include <linux/pci_ids.h> #ifndef CONFIG_64BIT #include <linux/io-64-nonatomic-lo-hi.h> @@ -23,6 +24,9 @@ #define wr64(a, reg, value) writeq((value), ((a)->hw_addr + (reg))) #define rd64(a, reg) readq((a)->hw_addr + (reg)) +#define rd32_poll_timeout(a, addr, val, cond, delay_us, timeout_us) \ + read_poll_timeout(rd32, val, cond, delay_us, timeout_us, false, a, addr) + #define ice_flush(a) rd32((a), GLGEN_STAT) #define ICE_M(m, s) ((m ## U) << (s)) @@ -39,11 +43,10 @@ struct device *ice_hw_to_dev(struct ice_hw *hw); #define ice_debug(hw, type, fmt, args...) \ dev_dbg(ice_hw_to_dev(hw), fmt, ##args) -#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ - print_hex_dump_debug(KBUILD_MODNAME " ", \ - DUMP_PREFIX_OFFSET, rowsize, \ - groupsize, buf, len, false) -#else +#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET, \ + rowsize, groupsize, buf, len, false) +#else /* CONFIG_DYNAMIC_DEBUG */ #define ice_debug(hw, type, fmt, args...) \ do { \ if ((type) & (hw)->debug_mask) \ @@ -51,16 +54,15 @@ do { \ } while (0) #ifdef DEBUG -#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ +#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ do { \ if ((type) & (hw)->debug_mask) \ - print_hex_dump_debug(KBUILD_MODNAME, \ - DUMP_PREFIX_OFFSET, \ + print_hex_dump_debug(prefix, DUMP_PREFIX_OFFSET,\ rowsize, groupsize, buf, \ len, false); \ } while (0) -#else -#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ +#else /* DEBUG */ +#define _ice_debug_array(hw, type, prefix, rowsize, groupsize, buf, len) \ do { \ struct ice_hw *hw_l = hw; \ if ((type) & (hw_l)->debug_mask) { \ @@ -78,4 +80,10 @@ do { \ #endif /* DEBUG */ #endif /* CONFIG_DYNAMIC_DEBUG */ +#define ice_debug_array(hw, type, rowsize, groupsize, buf, len) \ + _ice_debug_array(hw, type, KBUILD_MODNAME, rowsize, groupsize, buf, len) + +#define ice_debug_array_w_prefix(hw, type, prefix, buf, len) \ + _ice_debug_array(hw, type, prefix, 16, 1, buf, len) + #endif /* _ICE_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_parser.c b/drivers/net/ethernet/intel/ice/ice_parser.c new file mode 100644 index 000000000000..664beb64f557 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_parser.c @@ -0,0 +1,2430 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 Intel Corporation */ + +#include "ice_common.h" + +struct ice_pkg_sect_hdr { + __le16 count; + __le16 offset; +}; + +/** + * ice_parser_sect_item_get - parse an item from a section + * @sect_type: section type + * @section: section object + * @index: index of the item to get + * @offset: dummy as prototype of ice_pkg_enum_entry's last parameter + * + * Return: a pointer to the item or NULL. + */ +static void *ice_parser_sect_item_get(u32 sect_type, void *section, + u32 index, u32 __maybe_unused *offset) +{ + size_t data_off = ICE_SEC_DATA_OFFSET; + struct ice_pkg_sect_hdr *hdr; + size_t size; + + if (!section) + return NULL; + + switch (sect_type) { + case ICE_SID_RXPARSER_IMEM: + size = ICE_SID_RXPARSER_IMEM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_METADATA_INIT: + size = ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_CAM: + size = ICE_SID_RXPARSER_CAM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_PG_SPILL: + size = ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_NOMATCH_CAM: + size = ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_NOMATCH_SPILL: + size = ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_BOOST_TCAM: + size = ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE; + break; + case ICE_SID_LBL_RXPARSER_TMEM: + data_off = ICE_SEC_LBL_DATA_OFFSET; + size = ICE_SID_LBL_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_MARKER_PTYPE: + size = ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_MARKER_GRP: + size = ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_PROTO_GRP: + size = ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE; + break; + case ICE_SID_RXPARSER_FLAG_REDIR: + size = ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE; + break; + default: + return NULL; + } + + hdr = section; + if (index >= le16_to_cpu(hdr->count)) + return NULL; + + return section + data_off + index * size; +} + +/** + * ice_parser_create_table - create an item table from a section + * @hw: pointer to the hardware structure + * @sect_type: section type + * @item_size: item size in bytes + * @length: number of items in the table to create + * @parse_item: the function to parse the item + * @no_offset: ignore header offset, calculate index from 0 + * + * Return: a pointer to the allocated table or ERR_PTR. + */ +static void * +ice_parser_create_table(struct ice_hw *hw, u32 sect_type, + u32 item_size, u32 length, + void (*parse_item)(struct ice_hw *hw, u16 idx, + void *item, void *data, + int size), bool no_offset) +{ + struct ice_pkg_enum state = {}; + struct ice_seg *seg = hw->seg; + void *table, *data, *item; + u16 idx = 0; + + if (!seg) + return ERR_PTR(-EINVAL); + + table = kzalloc(item_size * length, GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + + do { + data = ice_pkg_enum_entry(seg, &state, sect_type, NULL, + ice_parser_sect_item_get); + seg = NULL; + if (data) { + struct ice_pkg_sect_hdr *hdr = state.sect; + + if (!no_offset) + idx = le16_to_cpu(hdr->offset) + + state.entry_idx; + + item = (void *)((uintptr_t)table + idx * item_size); + parse_item(hw, idx, item, data, item_size); + + if (no_offset) + idx++; + } + } while (data); + + return table; +} + +/*** ICE_SID_RXPARSER_IMEM section ***/ +static void ice_imem_bst_bm_dump(struct ice_hw *hw, struct ice_bst_main *bm) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "boost main:\n"); + dev_info(dev, "\talu0 = %d\n", bm->alu0); + dev_info(dev, "\talu1 = %d\n", bm->alu1); + dev_info(dev, "\talu2 = %d\n", bm->alu2); + dev_info(dev, "\tpg = %d\n", bm->pg); +} + +static void ice_imem_bst_kb_dump(struct ice_hw *hw, + struct ice_bst_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "boost key builder:\n"); + dev_info(dev, "\tpriority = %d\n", kb->prio); + dev_info(dev, "\ttsr_ctrl = %d\n", kb->tsr_ctrl); +} + +static void ice_imem_np_kb_dump(struct ice_hw *hw, + struct ice_np_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "next proto key builder:\n"); + dev_info(dev, "\topc = %d\n", kb->opc); + dev_info(dev, "\tstart_or_reg0 = %d\n", kb->start_reg0); + dev_info(dev, "\tlen_or_reg1 = %d\n", kb->len_reg1); +} + +static void ice_imem_pg_kb_dump(struct ice_hw *hw, + struct ice_pg_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "parse graph key builder:\n"); + dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena); + dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena); + dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena); + dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena); + dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx); + dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx); + dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx); + dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx); + dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx); +} + +static void ice_imem_alu_dump(struct ice_hw *hw, + struct ice_alu *alu, int index) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "alu%d:\n", index); + dev_info(dev, "\topc = %d\n", alu->opc); + dev_info(dev, "\tsrc_start = %d\n", alu->src_start); + dev_info(dev, "\tsrc_len = %d\n", alu->src_len); + dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel); + dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key); + dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id); + dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id); + dev_info(dev, "\tinc0 = %d\n", alu->inc0); + dev_info(dev, "\tinc1 = %d\n", alu->inc1); + dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc); + dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset); + dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr); + dev_info(dev, "\timm = %d\n", alu->imm); + dev_info(dev, "\tdst_start = %d\n", alu->dst_start); + dev_info(dev, "\tdst_len = %d\n", alu->dst_len); + dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm); + dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm); +} + +/** + * ice_imem_dump - dump an imem item info + * @hw: pointer to the hardware structure + * @item: imem item to dump + */ +static void ice_imem_dump(struct ice_hw *hw, struct ice_imem_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %d\n", item->idx); + ice_imem_bst_bm_dump(hw, &item->b_m); + ice_imem_bst_kb_dump(hw, &item->b_kb); + dev_info(dev, "pg priority = %d\n", item->pg_prio); + ice_imem_np_kb_dump(hw, &item->np_kb); + ice_imem_pg_kb_dump(hw, &item->pg_kb); + ice_imem_alu_dump(hw, &item->alu0, 0); + ice_imem_alu_dump(hw, &item->alu1, 1); + ice_imem_alu_dump(hw, &item->alu2, 2); +} + +#define ICE_IM_BM_ALU0 BIT(0) +#define ICE_IM_BM_ALU1 BIT(1) +#define ICE_IM_BM_ALU2 BIT(2) +#define ICE_IM_BM_PG BIT(3) + +/** + * ice_imem_bm_init - parse 4 bits of Boost Main + * @bm: pointer to the Boost Main structure + * @data: Boost Main data to be parsed + */ +static void ice_imem_bm_init(struct ice_bst_main *bm, u8 data) +{ + bm->alu0 = FIELD_GET(ICE_IM_BM_ALU0, data); + bm->alu1 = FIELD_GET(ICE_IM_BM_ALU1, data); + bm->alu2 = FIELD_GET(ICE_IM_BM_ALU2, data); + bm->pg = FIELD_GET(ICE_IM_BM_PG, data); +} + +#define ICE_IM_BKB_PRIO GENMASK(7, 0) +#define ICE_IM_BKB_TSR_CTRL BIT(8) + +/** + * ice_imem_bkb_init - parse 10 bits of Boost Main Build + * @bkb: pointer to the Boost Main Build structure + * @data: Boost Main Build data to be parsed + */ +static void ice_imem_bkb_init(struct ice_bst_keybuilder *bkb, u16 data) +{ + bkb->prio = FIELD_GET(ICE_IM_BKB_PRIO, data); + bkb->tsr_ctrl = FIELD_GET(ICE_IM_BKB_TSR_CTRL, data); +} + +#define ICE_IM_NPKB_OPC GENMASK(1, 0) +#define ICE_IM_NPKB_S_R0 GENMASK(9, 2) +#define ICE_IM_NPKB_L_R1 GENMASK(17, 10) + +/** + * ice_imem_npkb_init - parse 18 bits of Next Protocol Key Build + * @kb: pointer to the Next Protocol Key Build structure + * @data: Next Protocol Key Build data to be parsed + */ +static void ice_imem_npkb_init(struct ice_np_keybuilder *kb, u32 data) +{ + kb->opc = FIELD_GET(ICE_IM_NPKB_OPC, data); + kb->start_reg0 = FIELD_GET(ICE_IM_NPKB_S_R0, data); + kb->len_reg1 = FIELD_GET(ICE_IM_NPKB_L_R1, data); +} + +#define ICE_IM_PGKB_F0_ENA BIT_ULL(0) +#define ICE_IM_PGKB_F0_IDX GENMASK_ULL(6, 1) +#define ICE_IM_PGKB_F1_ENA BIT_ULL(7) +#define ICE_IM_PGKB_F1_IDX GENMASK_ULL(13, 8) +#define ICE_IM_PGKB_F2_ENA BIT_ULL(14) +#define ICE_IM_PGKB_F2_IDX GENMASK_ULL(20, 15) +#define ICE_IM_PGKB_F3_ENA BIT_ULL(21) +#define ICE_IM_PGKB_F3_IDX GENMASK_ULL(27, 22) +#define ICE_IM_PGKB_AR_IDX GENMASK_ULL(34, 28) + +/** + * ice_imem_pgkb_init - parse 35 bits of Parse Graph Key Build + * @kb: pointer to the Parse Graph Key Build structure + * @data: Parse Graph Key Build data to be parsed + */ +static void ice_imem_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) +{ + kb->flag0_ena = FIELD_GET(ICE_IM_PGKB_F0_ENA, data); + kb->flag0_idx = FIELD_GET(ICE_IM_PGKB_F0_IDX, data); + kb->flag1_ena = FIELD_GET(ICE_IM_PGKB_F1_ENA, data); + kb->flag1_idx = FIELD_GET(ICE_IM_PGKB_F1_IDX, data); + kb->flag2_ena = FIELD_GET(ICE_IM_PGKB_F2_ENA, data); + kb->flag2_idx = FIELD_GET(ICE_IM_PGKB_F2_IDX, data); + kb->flag3_ena = FIELD_GET(ICE_IM_PGKB_F3_ENA, data); + kb->flag3_idx = FIELD_GET(ICE_IM_PGKB_F3_IDX, data); + kb->alu_reg_idx = FIELD_GET(ICE_IM_PGKB_AR_IDX, data); +} + +#define ICE_IM_ALU_OPC GENMASK_ULL(5, 0) +#define ICE_IM_ALU_SS GENMASK_ULL(13, 6) +#define ICE_IM_ALU_SL GENMASK_ULL(18, 14) +#define ICE_IM_ALU_SXS BIT_ULL(19) +#define ICE_IM_ALU_SXK GENMASK_ULL(23, 20) +#define ICE_IM_ALU_SRID GENMASK_ULL(30, 24) +#define ICE_IM_ALU_DRID GENMASK_ULL(37, 31) +#define ICE_IM_ALU_INC0 BIT_ULL(38) +#define ICE_IM_ALU_INC1 BIT_ULL(39) +#define ICE_IM_ALU_POO GENMASK_ULL(41, 40) +#define ICE_IM_ALU_PO GENMASK_ULL(49, 42) +#define ICE_IM_ALU_BA_S 50 /* offset for the 2nd 64-bits field */ +#define ICE_IM_ALU_BA GENMASK_ULL(57 - ICE_IM_ALU_BA_S, \ + 50 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_IMM GENMASK_ULL(73 - ICE_IM_ALU_BA_S, \ + 58 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_DFE BIT_ULL(74 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_DS GENMASK_ULL(80 - ICE_IM_ALU_BA_S, \ + 75 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_DL GENMASK_ULL(86 - ICE_IM_ALU_BA_S, \ + 81 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_FEI BIT_ULL(87 - ICE_IM_ALU_BA_S) +#define ICE_IM_ALU_FSI GENMASK_ULL(95 - ICE_IM_ALU_BA_S, \ + 88 - ICE_IM_ALU_BA_S) + +/** + * ice_imem_alu_init - parse 96 bits of ALU entry + * @alu: pointer to the ALU entry structure + * @data: ALU entry data to be parsed + * @off: offset of the ALU entry data + */ +static void ice_imem_alu_init(struct ice_alu *alu, u8 *data, u8 off) +{ + u64 d64; + u8 idd; + + d64 = *((u64 *)data) >> off; + + alu->opc = FIELD_GET(ICE_IM_ALU_OPC, d64); + alu->src_start = FIELD_GET(ICE_IM_ALU_SS, d64); + alu->src_len = FIELD_GET(ICE_IM_ALU_SL, d64); + alu->shift_xlate_sel = FIELD_GET(ICE_IM_ALU_SXS, d64); + alu->shift_xlate_key = FIELD_GET(ICE_IM_ALU_SXK, d64); + alu->src_reg_id = FIELD_GET(ICE_IM_ALU_SRID, d64); + alu->dst_reg_id = FIELD_GET(ICE_IM_ALU_DRID, d64); + alu->inc0 = FIELD_GET(ICE_IM_ALU_INC0, d64); + alu->inc1 = FIELD_GET(ICE_IM_ALU_INC1, d64); + alu->proto_offset_opc = FIELD_GET(ICE_IM_ALU_POO, d64); + alu->proto_offset = FIELD_GET(ICE_IM_ALU_PO, d64); + + idd = (ICE_IM_ALU_BA_S + off) / BITS_PER_BYTE; + off = (ICE_IM_ALU_BA_S + off) % BITS_PER_BYTE; + d64 = *((u64 *)(&data[idd])) >> off; + + alu->branch_addr = FIELD_GET(ICE_IM_ALU_BA, d64); + alu->imm = FIELD_GET(ICE_IM_ALU_IMM, d64); + alu->dedicate_flags_ena = FIELD_GET(ICE_IM_ALU_DFE, d64); + alu->dst_start = FIELD_GET(ICE_IM_ALU_DS, d64); + alu->dst_len = FIELD_GET(ICE_IM_ALU_DL, d64); + alu->flags_extr_imm = FIELD_GET(ICE_IM_ALU_FEI, d64); + alu->flags_start_imm = FIELD_GET(ICE_IM_ALU_FSI, d64); +} + +#define ICE_IMEM_BM_S 0 +#define ICE_IMEM_BKB_S 4 +#define ICE_IMEM_BKB_IDD (ICE_IMEM_BKB_S / BITS_PER_BYTE) +#define ICE_IMEM_BKB_OFF (ICE_IMEM_BKB_S % BITS_PER_BYTE) +#define ICE_IMEM_PGP GENMASK(15, 14) +#define ICE_IMEM_NPKB_S 16 +#define ICE_IMEM_NPKB_IDD (ICE_IMEM_NPKB_S / BITS_PER_BYTE) +#define ICE_IMEM_NPKB_OFF (ICE_IMEM_NPKB_S % BITS_PER_BYTE) +#define ICE_IMEM_PGKB_S 34 +#define ICE_IMEM_PGKB_IDD (ICE_IMEM_PGKB_S / BITS_PER_BYTE) +#define ICE_IMEM_PGKB_OFF (ICE_IMEM_PGKB_S % BITS_PER_BYTE) +#define ICE_IMEM_ALU0_S 69 +#define ICE_IMEM_ALU0_IDD (ICE_IMEM_ALU0_S / BITS_PER_BYTE) +#define ICE_IMEM_ALU0_OFF (ICE_IMEM_ALU0_S % BITS_PER_BYTE) +#define ICE_IMEM_ALU1_S 165 +#define ICE_IMEM_ALU1_IDD (ICE_IMEM_ALU1_S / BITS_PER_BYTE) +#define ICE_IMEM_ALU1_OFF (ICE_IMEM_ALU1_S % BITS_PER_BYTE) +#define ICE_IMEM_ALU2_S 357 +#define ICE_IMEM_ALU2_IDD (ICE_IMEM_ALU2_S / BITS_PER_BYTE) +#define ICE_IMEM_ALU2_OFF (ICE_IMEM_ALU2_S % BITS_PER_BYTE) + +/** + * ice_imem_parse_item - parse 384 bits of IMEM entry + * @hw: pointer to the hardware structure + * @idx: index of IMEM entry + * @item: item of IMEM entry + * @data: IMEM entry data to be parsed + * @size: size of IMEM entry + */ +static void ice_imem_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_imem_item *ii = item; + u8 *buf = data; + + ii->idx = idx; + + ice_imem_bm_init(&ii->b_m, *(u8 *)buf); + ice_imem_bkb_init(&ii->b_kb, + *((u16 *)(&buf[ICE_IMEM_BKB_IDD])) >> + ICE_IMEM_BKB_OFF); + + ii->pg_prio = FIELD_GET(ICE_IMEM_PGP, *(u16 *)buf); + + ice_imem_npkb_init(&ii->np_kb, + *((u32 *)(&buf[ICE_IMEM_NPKB_IDD])) >> + ICE_IMEM_NPKB_OFF); + ice_imem_pgkb_init(&ii->pg_kb, + *((u64 *)(&buf[ICE_IMEM_PGKB_IDD])) >> + ICE_IMEM_PGKB_OFF); + + ice_imem_alu_init(&ii->alu0, + &buf[ICE_IMEM_ALU0_IDD], + ICE_IMEM_ALU0_OFF); + ice_imem_alu_init(&ii->alu1, + &buf[ICE_IMEM_ALU1_IDD], + ICE_IMEM_ALU1_OFF); + ice_imem_alu_init(&ii->alu2, + &buf[ICE_IMEM_ALU2_IDD], + ICE_IMEM_ALU2_OFF); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_imem_dump(hw, ii); +} + +/** + * ice_imem_table_get - create an imem table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated IMEM table. + */ +static struct ice_imem_item *ice_imem_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_IMEM, + sizeof(struct ice_imem_item), + ICE_IMEM_TABLE_SIZE, + ice_imem_parse_item, false); +} + +/*** ICE_SID_RXPARSER_METADATA_INIT section ***/ +/** + * ice_metainit_dump - dump an metainit item info + * @hw: pointer to the hardware structure + * @item: metainit item to dump + */ +static void ice_metainit_dump(struct ice_hw *hw, struct ice_metainit_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %d\n", item->idx); + + dev_info(dev, "tsr = %d\n", item->tsr); + dev_info(dev, "ho = %d\n", item->ho); + dev_info(dev, "pc = %d\n", item->pc); + dev_info(dev, "pg_rn = %d\n", item->pg_rn); + dev_info(dev, "cd = %d\n", item->cd); + + dev_info(dev, "gpr_a_ctrl = %d\n", item->gpr_a_ctrl); + dev_info(dev, "gpr_a_data_mdid = %d\n", item->gpr_a_data_mdid); + dev_info(dev, "gpr_a_data_start = %d\n", item->gpr_a_data_start); + dev_info(dev, "gpr_a_data_len = %d\n", item->gpr_a_data_len); + dev_info(dev, "gpr_a_id = %d\n", item->gpr_a_id); + + dev_info(dev, "gpr_b_ctrl = %d\n", item->gpr_b_ctrl); + dev_info(dev, "gpr_b_data_mdid = %d\n", item->gpr_b_data_mdid); + dev_info(dev, "gpr_b_data_start = %d\n", item->gpr_b_data_start); + dev_info(dev, "gpr_b_data_len = %d\n", item->gpr_b_data_len); + dev_info(dev, "gpr_b_id = %d\n", item->gpr_b_id); + + dev_info(dev, "gpr_c_ctrl = %d\n", item->gpr_c_ctrl); + dev_info(dev, "gpr_c_data_mdid = %d\n", item->gpr_c_data_mdid); + dev_info(dev, "gpr_c_data_start = %d\n", item->gpr_c_data_start); + dev_info(dev, "gpr_c_data_len = %d\n", item->gpr_c_data_len); + dev_info(dev, "gpr_c_id = %d\n", item->gpr_c_id); + + dev_info(dev, "gpr_d_ctrl = %d\n", item->gpr_d_ctrl); + dev_info(dev, "gpr_d_data_mdid = %d\n", item->gpr_d_data_mdid); + dev_info(dev, "gpr_d_data_start = %d\n", item->gpr_d_data_start); + dev_info(dev, "gpr_d_data_len = %d\n", item->gpr_d_data_len); + dev_info(dev, "gpr_d_id = %d\n", item->gpr_d_id); + + dev_info(dev, "flags = 0x%llx\n", (unsigned long long)(item->flags)); +} + +#define ICE_MI_TSR GENMASK_ULL(7, 0) +#define ICE_MI_HO GENMASK_ULL(16, 8) +#define ICE_MI_PC GENMASK_ULL(24, 17) +#define ICE_MI_PGRN GENMASK_ULL(35, 25) +#define ICE_MI_CD GENMASK_ULL(38, 36) +#define ICE_MI_GAC BIT_ULL(39) +#define ICE_MI_GADM GENMASK_ULL(44, 40) +#define ICE_MI_GADS GENMASK_ULL(48, 45) +#define ICE_MI_GADL GENMASK_ULL(53, 49) +#define ICE_MI_GAI GENMASK_ULL(59, 56) +#define ICE_MI_GBC BIT_ULL(60) +#define ICE_MI_GBDM_S 61 /* offset for the 2nd 64-bits field */ +#define ICE_MI_GBDM_IDD (ICE_MI_GBDM_S / BITS_PER_BYTE) +#define ICE_MI_GBDM_OFF (ICE_MI_GBDM_S % BITS_PER_BYTE) + +#define ICE_MI_GBDM_GENMASK_ULL(high, low) \ + GENMASK_ULL((high) - ICE_MI_GBDM_S, (low) - ICE_MI_GBDM_S) +#define ICE_MI_GBDM ICE_MI_GBDM_GENMASK_ULL(65, 61) +#define ICE_MI_GBDS ICE_MI_GBDM_GENMASK_ULL(69, 66) +#define ICE_MI_GBDL ICE_MI_GBDM_GENMASK_ULL(74, 70) +#define ICE_MI_GBI ICE_MI_GBDM_GENMASK_ULL(80, 77) +#define ICE_MI_GCC BIT_ULL(81 - ICE_MI_GBDM_S) +#define ICE_MI_GCDM ICE_MI_GBDM_GENMASK_ULL(86, 82) +#define ICE_MI_GCDS ICE_MI_GBDM_GENMASK_ULL(90, 87) +#define ICE_MI_GCDL ICE_MI_GBDM_GENMASK_ULL(95, 91) +#define ICE_MI_GCI ICE_MI_GBDM_GENMASK_ULL(101, 98) +#define ICE_MI_GDC BIT_ULL(102 - ICE_MI_GBDM_S) +#define ICE_MI_GDDM ICE_MI_GBDM_GENMASK_ULL(107, 103) +#define ICE_MI_GDDS ICE_MI_GBDM_GENMASK_ULL(111, 108) +#define ICE_MI_GDDL ICE_MI_GBDM_GENMASK_ULL(116, 112) +#define ICE_MI_GDI ICE_MI_GBDM_GENMASK_ULL(122, 119) +#define ICE_MI_FLAG_S 123 /* offset for the 3rd 64-bits field */ +#define ICE_MI_FLAG_IDD (ICE_MI_FLAG_S / BITS_PER_BYTE) +#define ICE_MI_FLAG_OFF (ICE_MI_FLAG_S % BITS_PER_BYTE) +#define ICE_MI_FLAG GENMASK_ULL(186 - ICE_MI_FLAG_S, \ + 123 - ICE_MI_FLAG_S) + +/** + * ice_metainit_parse_item - parse 192 bits of Metadata Init entry + * @hw: pointer to the hardware structure + * @idx: index of Metadata Init entry + * @item: item of Metadata Init entry + * @data: Metadata Init entry data to be parsed + * @size: size of Metadata Init entry + */ +static void ice_metainit_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_metainit_item *mi = item; + u8 *buf = data; + u64 d64; + + mi->idx = idx; + + d64 = *(u64 *)buf; + + mi->tsr = FIELD_GET(ICE_MI_TSR, d64); + mi->ho = FIELD_GET(ICE_MI_HO, d64); + mi->pc = FIELD_GET(ICE_MI_PC, d64); + mi->pg_rn = FIELD_GET(ICE_MI_PGRN, d64); + mi->cd = FIELD_GET(ICE_MI_CD, d64); + + mi->gpr_a_ctrl = FIELD_GET(ICE_MI_GAC, d64); + mi->gpr_a_data_mdid = FIELD_GET(ICE_MI_GADM, d64); + mi->gpr_a_data_start = FIELD_GET(ICE_MI_GADS, d64); + mi->gpr_a_data_len = FIELD_GET(ICE_MI_GADL, d64); + mi->gpr_a_id = FIELD_GET(ICE_MI_GAI, d64); + + mi->gpr_b_ctrl = FIELD_GET(ICE_MI_GBC, d64); + + d64 = *((u64 *)&buf[ICE_MI_GBDM_IDD]) >> ICE_MI_GBDM_OFF; + + mi->gpr_b_data_mdid = FIELD_GET(ICE_MI_GBDM, d64); + mi->gpr_b_data_start = FIELD_GET(ICE_MI_GBDS, d64); + mi->gpr_b_data_len = FIELD_GET(ICE_MI_GBDL, d64); + mi->gpr_b_id = FIELD_GET(ICE_MI_GBI, d64); + + mi->gpr_c_ctrl = FIELD_GET(ICE_MI_GCC, d64); + mi->gpr_c_data_mdid = FIELD_GET(ICE_MI_GCDM, d64); + mi->gpr_c_data_start = FIELD_GET(ICE_MI_GCDS, d64); + mi->gpr_c_data_len = FIELD_GET(ICE_MI_GCDL, d64); + mi->gpr_c_id = FIELD_GET(ICE_MI_GCI, d64); + + mi->gpr_d_ctrl = FIELD_GET(ICE_MI_GDC, d64); + mi->gpr_d_data_mdid = FIELD_GET(ICE_MI_GDDM, d64); + mi->gpr_d_data_start = FIELD_GET(ICE_MI_GDDS, d64); + mi->gpr_d_data_len = FIELD_GET(ICE_MI_GDDL, d64); + mi->gpr_d_id = FIELD_GET(ICE_MI_GDI, d64); + + d64 = *((u64 *)&buf[ICE_MI_FLAG_IDD]) >> ICE_MI_FLAG_OFF; + + mi->flags = FIELD_GET(ICE_MI_FLAG, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_metainit_dump(hw, mi); +} + +/** + * ice_metainit_table_get - create a metainit table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Metadata initialization table. + */ +static struct ice_metainit_item *ice_metainit_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_METADATA_INIT, + sizeof(struct ice_metainit_item), + ICE_METAINIT_TABLE_SIZE, + ice_metainit_parse_item, false); +} + +/** + * ice_bst_tcam_search - find a TCAM item with specific type + * @tcam_table: the TCAM table + * @lbl_table: the lbl table to search + * @type: the type we need to match against + * @start: start searching from this index + * + * Return: a pointer to the matching BOOST TCAM item or NULL. + */ +struct ice_bst_tcam_item * +ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, + struct ice_lbl_item *lbl_table, + enum ice_lbl_type type, u16 *start) +{ + u16 i = *start; + + for (; i < ICE_BST_TCAM_TABLE_SIZE; i++) { + if (lbl_table[i].type == type) { + *start = i; + return &tcam_table[lbl_table[i].idx]; + } + } + + return NULL; +} + +/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL, + * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM + * sections ***/ +static void ice_pg_cam_key_dump(struct ice_hw *hw, struct ice_pg_cam_key *key) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "key:\n"); + dev_info(dev, "\tvalid = %d\n", key->valid); + dev_info(dev, "\tnode_id = %d\n", key->node_id); + dev_info(dev, "\tflag0 = %d\n", key->flag0); + dev_info(dev, "\tflag1 = %d\n", key->flag1); + dev_info(dev, "\tflag2 = %d\n", key->flag2); + dev_info(dev, "\tflag3 = %d\n", key->flag3); + dev_info(dev, "\tboost_idx = %d\n", key->boost_idx); + dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg); + dev_info(dev, "\tnext_proto = 0x%08x\n", key->next_proto); +} + +static void ice_pg_nm_cam_key_dump(struct ice_hw *hw, + struct ice_pg_nm_cam_key *key) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "key:\n"); + dev_info(dev, "\tvalid = %d\n", key->valid); + dev_info(dev, "\tnode_id = %d\n", key->node_id); + dev_info(dev, "\tflag0 = %d\n", key->flag0); + dev_info(dev, "\tflag1 = %d\n", key->flag1); + dev_info(dev, "\tflag2 = %d\n", key->flag2); + dev_info(dev, "\tflag3 = %d\n", key->flag3); + dev_info(dev, "\tboost_idx = %d\n", key->boost_idx); + dev_info(dev, "\talu_reg = 0x%04x\n", key->alu_reg); +} + +static void ice_pg_cam_action_dump(struct ice_hw *hw, + struct ice_pg_cam_action *action) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "action:\n"); + dev_info(dev, "\tnext_node = %d\n", action->next_node); + dev_info(dev, "\tnext_pc = %d\n", action->next_pc); + dev_info(dev, "\tis_pg = %d\n", action->is_pg); + dev_info(dev, "\tproto_id = %d\n", action->proto_id); + dev_info(dev, "\tis_mg = %d\n", action->is_mg); + dev_info(dev, "\tmarker_id = %d\n", action->marker_id); + dev_info(dev, "\tis_last_round = %d\n", action->is_last_round); + dev_info(dev, "\tho_polarity = %d\n", action->ho_polarity); + dev_info(dev, "\tho_inc = %d\n", action->ho_inc); +} + +/** + * ice_pg_cam_dump - dump an parse graph cam info + * @hw: pointer to the hardware structure + * @item: parse graph cam to dump + */ +static void ice_pg_cam_dump(struct ice_hw *hw, struct ice_pg_cam_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + ice_pg_cam_key_dump(hw, &item->key); + ice_pg_cam_action_dump(hw, &item->action); +} + +/** + * ice_pg_nm_cam_dump - dump an parse graph no match cam info + * @hw: pointer to the hardware structure + * @item: parse graph no match cam to dump + */ +static void ice_pg_nm_cam_dump(struct ice_hw *hw, + struct ice_pg_nm_cam_item *item) +{ + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + ice_pg_nm_cam_key_dump(hw, &item->key); + ice_pg_cam_action_dump(hw, &item->action); +} + +#define ICE_PGCA_NN GENMASK_ULL(10, 0) +#define ICE_PGCA_NPC GENMASK_ULL(18, 11) +#define ICE_PGCA_IPG BIT_ULL(19) +#define ICE_PGCA_PID GENMASK_ULL(30, 23) +#define ICE_PGCA_IMG BIT_ULL(31) +#define ICE_PGCA_MID GENMASK_ULL(39, 32) +#define ICE_PGCA_ILR BIT_ULL(40) +#define ICE_PGCA_HOP BIT_ULL(41) +#define ICE_PGCA_HOI GENMASK_ULL(50, 42) + +/** + * ice_pg_cam_action_init - parse 55 bits of Parse Graph CAM Action + * @action: pointer to the Parse Graph CAM Action structure + * @data: Parse Graph CAM Action data to be parsed + */ +static void ice_pg_cam_action_init(struct ice_pg_cam_action *action, u64 data) +{ + action->next_node = FIELD_GET(ICE_PGCA_NN, data); + action->next_pc = FIELD_GET(ICE_PGCA_NPC, data); + action->is_pg = FIELD_GET(ICE_PGCA_IPG, data); + action->proto_id = FIELD_GET(ICE_PGCA_PID, data); + action->is_mg = FIELD_GET(ICE_PGCA_IMG, data); + action->marker_id = FIELD_GET(ICE_PGCA_MID, data); + action->is_last_round = FIELD_GET(ICE_PGCA_ILR, data); + action->ho_polarity = FIELD_GET(ICE_PGCA_HOP, data); + action->ho_inc = FIELD_GET(ICE_PGCA_HOI, data); +} + +#define ICE_PGNCK_VLD BIT_ULL(0) +#define ICE_PGNCK_NID GENMASK_ULL(11, 1) +#define ICE_PGNCK_F0 BIT_ULL(12) +#define ICE_PGNCK_F1 BIT_ULL(13) +#define ICE_PGNCK_F2 BIT_ULL(14) +#define ICE_PGNCK_F3 BIT_ULL(15) +#define ICE_PGNCK_BH BIT_ULL(16) +#define ICE_PGNCK_BI GENMASK_ULL(24, 17) +#define ICE_PGNCK_AR GENMASK_ULL(40, 25) + +/** + * ice_pg_nm_cam_key_init - parse 41 bits of Parse Graph NoMatch CAM Key + * @key: pointer to the Parse Graph NoMatch CAM Key structure + * @data: Parse Graph NoMatch CAM Key data to be parsed + */ +static void ice_pg_nm_cam_key_init(struct ice_pg_nm_cam_key *key, u64 data) +{ + key->valid = FIELD_GET(ICE_PGNCK_VLD, data); + key->node_id = FIELD_GET(ICE_PGNCK_NID, data); + key->flag0 = FIELD_GET(ICE_PGNCK_F0, data); + key->flag1 = FIELD_GET(ICE_PGNCK_F1, data); + key->flag2 = FIELD_GET(ICE_PGNCK_F2, data); + key->flag3 = FIELD_GET(ICE_PGNCK_F3, data); + + if (FIELD_GET(ICE_PGNCK_BH, data)) + key->boost_idx = FIELD_GET(ICE_PGNCK_BI, data); + else + key->boost_idx = 0; + + key->alu_reg = FIELD_GET(ICE_PGNCK_AR, data); +} + +#define ICE_PGCK_VLD BIT_ULL(0) +#define ICE_PGCK_NID GENMASK_ULL(11, 1) +#define ICE_PGCK_F0 BIT_ULL(12) +#define ICE_PGCK_F1 BIT_ULL(13) +#define ICE_PGCK_F2 BIT_ULL(14) +#define ICE_PGCK_F3 BIT_ULL(15) +#define ICE_PGCK_BH BIT_ULL(16) +#define ICE_PGCK_BI GENMASK_ULL(24, 17) +#define ICE_PGCK_AR GENMASK_ULL(40, 25) +#define ICE_PGCK_NPK_S 41 /* offset for the 2nd 64-bits field */ +#define ICE_PGCK_NPK_IDD (ICE_PGCK_NPK_S / BITS_PER_BYTE) +#define ICE_PGCK_NPK_OFF (ICE_PGCK_NPK_S % BITS_PER_BYTE) +#define ICE_PGCK_NPK GENMASK_ULL(72 - ICE_PGCK_NPK_S, \ + 41 - ICE_PGCK_NPK_S) + +/** + * ice_pg_cam_key_init - parse 73 bits of Parse Graph CAM Key + * @key: pointer to the Parse Graph CAM Key structure + * @data: Parse Graph CAM Key data to be parsed + */ +static void ice_pg_cam_key_init(struct ice_pg_cam_key *key, u8 *data) +{ + u64 d64 = *(u64 *)data; + + key->valid = FIELD_GET(ICE_PGCK_VLD, d64); + key->node_id = FIELD_GET(ICE_PGCK_NID, d64); + key->flag0 = FIELD_GET(ICE_PGCK_F0, d64); + key->flag1 = FIELD_GET(ICE_PGCK_F1, d64); + key->flag2 = FIELD_GET(ICE_PGCK_F2, d64); + key->flag3 = FIELD_GET(ICE_PGCK_F3, d64); + + if (FIELD_GET(ICE_PGCK_BH, d64)) + key->boost_idx = FIELD_GET(ICE_PGCK_BI, d64); + else + key->boost_idx = 0; + + key->alu_reg = FIELD_GET(ICE_PGCK_AR, d64); + + d64 = *((u64 *)&data[ICE_PGCK_NPK_IDD]) >> ICE_PGCK_NPK_OFF; + + key->next_proto = FIELD_GET(ICE_PGCK_NPK, d64); +} + +#define ICE_PG_CAM_ACT_S 73 +#define ICE_PG_CAM_ACT_IDD (ICE_PG_CAM_ACT_S / BITS_PER_BYTE) +#define ICE_PG_CAM_ACT_OFF (ICE_PG_CAM_ACT_S % BITS_PER_BYTE) + +/** + * ice_pg_cam_parse_item - parse 128 bits of Parse Graph CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph CAM Entry + * @item: item of Parse Graph CAM Entry + * @data: Parse Graph CAM Entry data to be parsed + * @size: size of Parse Graph CAM Entry + */ +static void ice_pg_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_pg_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + ice_pg_cam_key_init(&ci->key, buf); + + d64 = *((u64 *)&buf[ICE_PG_CAM_ACT_IDD]) >> ICE_PG_CAM_ACT_OFF; + ice_pg_cam_action_init(&ci->action, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_cam_dump(hw, ci); +} + +#define ICE_PG_SP_CAM_KEY_S 56 +#define ICE_PG_SP_CAM_KEY_IDD (ICE_PG_SP_CAM_KEY_S / BITS_PER_BYTE) + +/** + * ice_pg_sp_cam_parse_item - parse 136 bits of Parse Graph Spill CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph Spill CAM Entry + * @item: item of Parse Graph Spill CAM Entry + * @data: Parse Graph Spill CAM Entry data to be parsed + * @size: size of Parse Graph Spill CAM Entry + */ +static void ice_pg_sp_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_pg_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + d64 = *(u64 *)buf; + ice_pg_cam_action_init(&ci->action, d64); + + ice_pg_cam_key_init(&ci->key, &buf[ICE_PG_SP_CAM_KEY_IDD]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_cam_dump(hw, ci); +} + +#define ICE_PG_NM_CAM_ACT_S 41 +#define ICE_PG_NM_CAM_ACT_IDD (ICE_PG_NM_CAM_ACT_S / BITS_PER_BYTE) +#define ICE_PG_NM_CAM_ACT_OFF (ICE_PG_NM_CAM_ACT_S % BITS_PER_BYTE) + +/** + * ice_pg_nm_cam_parse_item - parse 96 bits of Parse Graph NoMatch CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph NoMatch CAM Entry + * @item: item of Parse Graph NoMatch CAM Entry + * @data: Parse Graph NoMatch CAM Entry data to be parsed + * @size: size of Parse Graph NoMatch CAM Entry + */ +static void ice_pg_nm_cam_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_pg_nm_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + d64 = *(u64 *)buf; + ice_pg_nm_cam_key_init(&ci->key, d64); + + d64 = *((u64 *)&buf[ICE_PG_NM_CAM_ACT_IDD]) >> ICE_PG_NM_CAM_ACT_OFF; + ice_pg_cam_action_init(&ci->action, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_nm_cam_dump(hw, ci); +} + +#define ICE_PG_NM_SP_CAM_ACT_S 56 +#define ICE_PG_NM_SP_CAM_ACT_IDD (ICE_PG_NM_SP_CAM_ACT_S / BITS_PER_BYTE) +#define ICE_PG_NM_SP_CAM_ACT_OFF (ICE_PG_NM_SP_CAM_ACT_S % BITS_PER_BYTE) + +/** + * ice_pg_nm_sp_cam_parse_item - parse 104 bits of Parse Graph NoMatch Spill + * CAM Entry + * @hw: pointer to the hardware structure + * @idx: index of Parse Graph NoMatch Spill CAM Entry + * @item: item of Parse Graph NoMatch Spill CAM Entry + * @data: Parse Graph NoMatch Spill CAM Entry data to be parsed + * @size: size of Parse Graph NoMatch Spill CAM Entry + */ +static void ice_pg_nm_sp_cam_parse_item(struct ice_hw *hw, u16 idx, + void *item, void *data, + int __maybe_unused size) +{ + struct ice_pg_nm_cam_item *ci = item; + u8 *buf = data; + u64 d64; + + ci->idx = idx; + + d64 = *(u64 *)buf; + ice_pg_cam_action_init(&ci->action, d64); + + d64 = *((u64 *)&buf[ICE_PG_NM_SP_CAM_ACT_IDD]) >> + ICE_PG_NM_SP_CAM_ACT_OFF; + ice_pg_nm_cam_key_init(&ci->key, d64); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_pg_nm_cam_dump(hw, ci); +} + +/** + * ice_pg_cam_table_get - create a parse graph cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph CAM table. + */ +static struct ice_pg_cam_item *ice_pg_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_CAM, + sizeof(struct ice_pg_cam_item), + ICE_PG_CAM_TABLE_SIZE, + ice_pg_cam_parse_item, false); +} + +/** + * ice_pg_sp_cam_table_get - create a parse graph spill cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph Spill CAM table. + */ +static struct ice_pg_cam_item *ice_pg_sp_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_PG_SPILL, + sizeof(struct ice_pg_cam_item), + ICE_PG_SP_CAM_TABLE_SIZE, + ice_pg_sp_cam_parse_item, false); +} + +/** + * ice_pg_nm_cam_table_get - create a parse graph no match cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph No Match CAM table. + */ +static struct ice_pg_nm_cam_item *ice_pg_nm_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_CAM, + sizeof(struct ice_pg_nm_cam_item), + ICE_PG_NM_CAM_TABLE_SIZE, + ice_pg_nm_cam_parse_item, false); +} + +/** + * ice_pg_nm_sp_cam_table_get - create a parse graph no match spill cam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Parse Graph No Match Spill CAM table. + */ +static struct ice_pg_nm_cam_item *ice_pg_nm_sp_cam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_NOMATCH_SPILL, + sizeof(struct ice_pg_nm_cam_item), + ICE_PG_NM_SP_CAM_TABLE_SIZE, + ice_pg_nm_sp_cam_parse_item, false); +} + +static bool __ice_pg_cam_match(struct ice_pg_cam_item *item, + struct ice_pg_cam_key *key) +{ + return (item->key.valid && + !memcmp(&item->key.val, &key->val, sizeof(key->val))); +} + +static bool __ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *item, + struct ice_pg_cam_key *key) +{ + return (item->key.valid && + !memcmp(&item->key.val, &key->val, sizeof(item->key.val))); +} + +/** + * ice_pg_cam_match - search parse graph cam table by key + * @table: parse graph cam table to search + * @size: cam table size + * @key: search key + * + * Return: a pointer to the matching PG CAM item or NULL. + */ +struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, + int size, struct ice_pg_cam_key *key) +{ + int i; + + for (i = 0; i < size; i++) { + struct ice_pg_cam_item *item = &table[i]; + + if (__ice_pg_cam_match(item, key)) + return item; + } + + return NULL; +} + +/** + * ice_pg_nm_cam_match - search parse graph no match cam table by key + * @table: parse graph no match cam table to search + * @size: cam table size + * @key: search key + * + * Return: a pointer to the matching PG No Match CAM item or NULL. + */ +struct ice_pg_nm_cam_item * +ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, + struct ice_pg_cam_key *key) +{ + int i; + + for (i = 0; i < size; i++) { + struct ice_pg_nm_cam_item *item = &table[i]; + + if (__ice_pg_nm_cam_match(item, key)) + return item; + } + + return NULL; +} + +/*** Ternary match ***/ +/* Perform a ternary match on a 1-byte pattern (@pat) given @key and @key_inv + * Rules (per bit): + * Key == 0 and Key_inv == 0 : Never match (Don't care) + * Key == 0 and Key_inv == 1 : Match on bit == 1 + * Key == 1 and Key_inv == 0 : Match on bit == 0 + * Key == 1 and Key_inv == 1 : Always match (Don't care) + * + * Return: true if all bits match, false otherwise. + */ +static bool ice_ternary_match_byte(u8 key, u8 key_inv, u8 pat) +{ + u8 bit_key, bit_key_inv, bit_pat; + int i; + + for (i = 0; i < BITS_PER_BYTE; i++) { + bit_key = key & BIT(i); + bit_key_inv = key_inv & BIT(i); + bit_pat = pat & BIT(i); + + if (bit_key != 0 && bit_key_inv != 0) + continue; + + if ((bit_key == 0 && bit_key_inv == 0) || bit_key == bit_pat) + return false; + } + + return true; +} + +static bool ice_ternary_match(const u8 *key, const u8 *key_inv, + const u8 *pat, int len) +{ + int i; + + for (i = 0; i < len; i++) + if (!ice_ternary_match_byte(key[i], key_inv[i], pat[i])) + return false; + + return true; +} + +/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ +static void ice_bst_np_kb_dump(struct ice_hw *hw, struct ice_np_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "next proto key builder:\n"); + dev_info(dev, "\topc = %d\n", kb->opc); + dev_info(dev, "\tstart_reg0 = %d\n", kb->start_reg0); + dev_info(dev, "\tlen_reg1 = %d\n", kb->len_reg1); +} + +static void ice_bst_pg_kb_dump(struct ice_hw *hw, struct ice_pg_keybuilder *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "parse graph key builder:\n"); + dev_info(dev, "\tflag0_ena = %d\n", kb->flag0_ena); + dev_info(dev, "\tflag1_ena = %d\n", kb->flag1_ena); + dev_info(dev, "\tflag2_ena = %d\n", kb->flag2_ena); + dev_info(dev, "\tflag3_ena = %d\n", kb->flag3_ena); + dev_info(dev, "\tflag0_idx = %d\n", kb->flag0_idx); + dev_info(dev, "\tflag1_idx = %d\n", kb->flag1_idx); + dev_info(dev, "\tflag2_idx = %d\n", kb->flag2_idx); + dev_info(dev, "\tflag3_idx = %d\n", kb->flag3_idx); + dev_info(dev, "\talu_reg_idx = %d\n", kb->alu_reg_idx); +} + +static void ice_bst_alu_dump(struct ice_hw *hw, struct ice_alu *alu, int idx) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "alu%d:\n", idx); + dev_info(dev, "\topc = %d\n", alu->opc); + dev_info(dev, "\tsrc_start = %d\n", alu->src_start); + dev_info(dev, "\tsrc_len = %d\n", alu->src_len); + dev_info(dev, "\tshift_xlate_sel = %d\n", alu->shift_xlate_sel); + dev_info(dev, "\tshift_xlate_key = %d\n", alu->shift_xlate_key); + dev_info(dev, "\tsrc_reg_id = %d\n", alu->src_reg_id); + dev_info(dev, "\tdst_reg_id = %d\n", alu->dst_reg_id); + dev_info(dev, "\tinc0 = %d\n", alu->inc0); + dev_info(dev, "\tinc1 = %d\n", alu->inc1); + dev_info(dev, "\tproto_offset_opc = %d\n", alu->proto_offset_opc); + dev_info(dev, "\tproto_offset = %d\n", alu->proto_offset); + dev_info(dev, "\tbranch_addr = %d\n", alu->branch_addr); + dev_info(dev, "\timm = %d\n", alu->imm); + dev_info(dev, "\tdst_start = %d\n", alu->dst_start); + dev_info(dev, "\tdst_len = %d\n", alu->dst_len); + dev_info(dev, "\tflags_extr_imm = %d\n", alu->flags_extr_imm); + dev_info(dev, "\tflags_start_imm= %d\n", alu->flags_start_imm); +} + +/** + * ice_bst_tcam_dump - dump a boost tcam info + * @hw: pointer to the hardware structure + * @item: boost tcam to dump + */ +static void ice_bst_tcam_dump(struct ice_hw *hw, struct ice_bst_tcam_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "addr = %d\n", item->addr); + + dev_info(dev, "key : "); + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key[i]); + + dev_info(dev, "\n"); + + dev_info(dev, "key_inv: "); + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key_inv[i]); + + dev_info(dev, "\n"); + + dev_info(dev, "hit_idx_grp = %d\n", item->hit_idx_grp); + dev_info(dev, "pg_prio = %d\n", item->pg_prio); + + ice_bst_np_kb_dump(hw, &item->np_kb); + ice_bst_pg_kb_dump(hw, &item->pg_kb); + + ice_bst_alu_dump(hw, &item->alu0, ICE_ALU0_IDX); + ice_bst_alu_dump(hw, &item->alu1, ICE_ALU1_IDX); + ice_bst_alu_dump(hw, &item->alu2, ICE_ALU2_IDX); +} + +static void ice_lbl_dump(struct ice_hw *hw, struct ice_lbl_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %u\n", item->idx); + dev_info(dev, "type = %u\n", item->type); + dev_info(dev, "label = %s\n", item->label); +} + +#define ICE_BST_ALU_OPC GENMASK_ULL(5, 0) +#define ICE_BST_ALU_SS GENMASK_ULL(13, 6) +#define ICE_BST_ALU_SL GENMASK_ULL(18, 14) +#define ICE_BST_ALU_SXS BIT_ULL(19) +#define ICE_BST_ALU_SXK GENMASK_ULL(23, 20) +#define ICE_BST_ALU_SRID GENMASK_ULL(30, 24) +#define ICE_BST_ALU_DRID GENMASK_ULL(37, 31) +#define ICE_BST_ALU_INC0 BIT_ULL(38) +#define ICE_BST_ALU_INC1 BIT_ULL(39) +#define ICE_BST_ALU_POO GENMASK_ULL(41, 40) +#define ICE_BST_ALU_PO GENMASK_ULL(49, 42) +#define ICE_BST_ALU_BA_S 50 /* offset for the 2nd 64-bits field */ +#define ICE_BST_ALU_BA GENMASK_ULL(57 - ICE_BST_ALU_BA_S, \ + 50 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_IMM GENMASK_ULL(73 - ICE_BST_ALU_BA_S, \ + 58 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_DFE BIT_ULL(74 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_DS GENMASK_ULL(80 - ICE_BST_ALU_BA_S, \ + 75 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_DL GENMASK_ULL(86 - ICE_BST_ALU_BA_S, \ + 81 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_FEI BIT_ULL(87 - ICE_BST_ALU_BA_S) +#define ICE_BST_ALU_FSI GENMASK_ULL(95 - ICE_BST_ALU_BA_S, \ + 88 - ICE_BST_ALU_BA_S) + +/** + * ice_bst_alu_init - parse 96 bits of ALU entry + * @alu: pointer to the ALU entry structure + * @data: ALU entry data to be parsed + * @off: offset of the ALU entry data + */ +static void ice_bst_alu_init(struct ice_alu *alu, u8 *data, u8 off) +{ + u64 d64; + u8 idd; + + d64 = *((u64 *)data) >> off; + + alu->opc = FIELD_GET(ICE_BST_ALU_OPC, d64); + alu->src_start = FIELD_GET(ICE_BST_ALU_SS, d64); + alu->src_len = FIELD_GET(ICE_BST_ALU_SL, d64); + alu->shift_xlate_sel = FIELD_GET(ICE_BST_ALU_SXS, d64); + alu->shift_xlate_key = FIELD_GET(ICE_BST_ALU_SXK, d64); + alu->src_reg_id = FIELD_GET(ICE_BST_ALU_SRID, d64); + alu->dst_reg_id = FIELD_GET(ICE_BST_ALU_DRID, d64); + alu->inc0 = FIELD_GET(ICE_BST_ALU_INC0, d64); + alu->inc1 = FIELD_GET(ICE_BST_ALU_INC1, d64); + alu->proto_offset_opc = FIELD_GET(ICE_BST_ALU_POO, d64); + alu->proto_offset = FIELD_GET(ICE_BST_ALU_PO, d64); + + idd = (ICE_BST_ALU_BA_S + off) / BITS_PER_BYTE; + off = (ICE_BST_ALU_BA_S + off) % BITS_PER_BYTE; + d64 = *((u64 *)(&data[idd])) >> off; + + alu->branch_addr = FIELD_GET(ICE_BST_ALU_BA, d64); + alu->imm = FIELD_GET(ICE_BST_ALU_IMM, d64); + alu->dedicate_flags_ena = FIELD_GET(ICE_BST_ALU_DFE, d64); + alu->dst_start = FIELD_GET(ICE_BST_ALU_DS, d64); + alu->dst_len = FIELD_GET(ICE_BST_ALU_DL, d64); + alu->flags_extr_imm = FIELD_GET(ICE_BST_ALU_FEI, d64); + alu->flags_start_imm = FIELD_GET(ICE_BST_ALU_FSI, d64); +} + +#define ICE_BST_PGKB_F0_ENA BIT_ULL(0) +#define ICE_BST_PGKB_F0_IDX GENMASK_ULL(6, 1) +#define ICE_BST_PGKB_F1_ENA BIT_ULL(7) +#define ICE_BST_PGKB_F1_IDX GENMASK_ULL(13, 8) +#define ICE_BST_PGKB_F2_ENA BIT_ULL(14) +#define ICE_BST_PGKB_F2_IDX GENMASK_ULL(20, 15) +#define ICE_BST_PGKB_F3_ENA BIT_ULL(21) +#define ICE_BST_PGKB_F3_IDX GENMASK_ULL(27, 22) +#define ICE_BST_PGKB_AR_IDX GENMASK_ULL(34, 28) + +/** + * ice_bst_pgkb_init - parse 35 bits of Parse Graph Key Build + * @kb: pointer to the Parse Graph Key Build structure + * @data: Parse Graph Key Build data to be parsed + */ +static void ice_bst_pgkb_init(struct ice_pg_keybuilder *kb, u64 data) +{ + kb->flag0_ena = FIELD_GET(ICE_BST_PGKB_F0_ENA, data); + kb->flag0_idx = FIELD_GET(ICE_BST_PGKB_F0_IDX, data); + kb->flag1_ena = FIELD_GET(ICE_BST_PGKB_F1_ENA, data); + kb->flag1_idx = FIELD_GET(ICE_BST_PGKB_F1_IDX, data); + kb->flag2_ena = FIELD_GET(ICE_BST_PGKB_F2_ENA, data); + kb->flag2_idx = FIELD_GET(ICE_BST_PGKB_F2_IDX, data); + kb->flag3_ena = FIELD_GET(ICE_BST_PGKB_F3_ENA, data); + kb->flag3_idx = FIELD_GET(ICE_BST_PGKB_F3_IDX, data); + kb->alu_reg_idx = FIELD_GET(ICE_BST_PGKB_AR_IDX, data); +} + +#define ICE_BST_NPKB_OPC GENMASK(1, 0) +#define ICE_BST_NPKB_S_R0 GENMASK(9, 2) +#define ICE_BST_NPKB_L_R1 GENMASK(17, 10) + +/** + * ice_bst_npkb_init - parse 18 bits of Next Protocol Key Build + * @kb: pointer to the Next Protocol Key Build structure + * @data: Next Protocol Key Build data to be parsed + */ +static void ice_bst_npkb_init(struct ice_np_keybuilder *kb, u32 data) +{ + kb->opc = FIELD_GET(ICE_BST_NPKB_OPC, data); + kb->start_reg0 = FIELD_GET(ICE_BST_NPKB_S_R0, data); + kb->len_reg1 = FIELD_GET(ICE_BST_NPKB_L_R1, data); +} + +#define ICE_BT_KEY_S 32 +#define ICE_BT_KEY_IDD (ICE_BT_KEY_S / BITS_PER_BYTE) +#define ICE_BT_KIV_S 192 +#define ICE_BT_KIV_IDD (ICE_BT_KIV_S / BITS_PER_BYTE) +#define ICE_BT_HIG_S 352 +#define ICE_BT_HIG_IDD (ICE_BT_HIG_S / BITS_PER_BYTE) +#define ICE_BT_PGP_S 360 +#define ICE_BT_PGP_IDD (ICE_BT_PGP_S / BITS_PER_BYTE) +#define ICE_BT_PGP_M GENMASK(361 - ICE_BT_PGP_S, 360 - ICE_BT_PGP_S) +#define ICE_BT_NPKB_S 362 +#define ICE_BT_NPKB_IDD (ICE_BT_NPKB_S / BITS_PER_BYTE) +#define ICE_BT_NPKB_OFF (ICE_BT_NPKB_S % BITS_PER_BYTE) +#define ICE_BT_PGKB_S 380 +#define ICE_BT_PGKB_IDD (ICE_BT_PGKB_S / BITS_PER_BYTE) +#define ICE_BT_PGKB_OFF (ICE_BT_PGKB_S % BITS_PER_BYTE) +#define ICE_BT_ALU0_S 415 +#define ICE_BT_ALU0_IDD (ICE_BT_ALU0_S / BITS_PER_BYTE) +#define ICE_BT_ALU0_OFF (ICE_BT_ALU0_S % BITS_PER_BYTE) +#define ICE_BT_ALU1_S 511 +#define ICE_BT_ALU1_IDD (ICE_BT_ALU1_S / BITS_PER_BYTE) +#define ICE_BT_ALU1_OFF (ICE_BT_ALU1_S % BITS_PER_BYTE) +#define ICE_BT_ALU2_S 607 +#define ICE_BT_ALU2_IDD (ICE_BT_ALU2_S / BITS_PER_BYTE) +#define ICE_BT_ALU2_OFF (ICE_BT_ALU2_S % BITS_PER_BYTE) + +/** + * ice_bst_parse_item - parse 704 bits of Boost TCAM entry + * @hw: pointer to the hardware structure + * @idx: index of Boost TCAM entry + * @item: item of Boost TCAM entry + * @data: Boost TCAM entry data to be parsed + * @size: size of Boost TCAM entry + */ +static void ice_bst_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_bst_tcam_item *ti = item; + u8 *buf = (u8 *)data; + int i; + + ti->addr = *(u16 *)buf; + + for (i = 0; i < ICE_BST_TCAM_KEY_SIZE; i++) { + ti->key[i] = buf[ICE_BT_KEY_IDD + i]; + ti->key_inv[i] = buf[ICE_BT_KIV_IDD + i]; + } + ti->hit_idx_grp = buf[ICE_BT_HIG_IDD]; + ti->pg_prio = buf[ICE_BT_PGP_IDD] & ICE_BT_PGP_M; + + ice_bst_npkb_init(&ti->np_kb, + *((u32 *)(&buf[ICE_BT_NPKB_IDD])) >> + ICE_BT_NPKB_OFF); + ice_bst_pgkb_init(&ti->pg_kb, + *((u64 *)(&buf[ICE_BT_PGKB_IDD])) >> + ICE_BT_PGKB_OFF); + + ice_bst_alu_init(&ti->alu0, &buf[ICE_BT_ALU0_IDD], ICE_BT_ALU0_OFF); + ice_bst_alu_init(&ti->alu1, &buf[ICE_BT_ALU1_IDD], ICE_BT_ALU1_OFF); + ice_bst_alu_init(&ti->alu2, &buf[ICE_BT_ALU2_IDD], ICE_BT_ALU2_OFF); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_bst_tcam_dump(hw, ti); +} + +/** + * ice_bst_tcam_table_get - create a boost tcam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Boost TCAM table. + */ +static struct ice_bst_tcam_item *ice_bst_tcam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_BOOST_TCAM, + sizeof(struct ice_bst_tcam_item), + ICE_BST_TCAM_TABLE_SIZE, + ice_bst_parse_item, true); +} + +static void ice_parse_lbl_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_lbl_item *lbl_item = item; + struct ice_lbl_item *lbl_data = data; + + lbl_item->idx = lbl_data->idx; + memcpy(lbl_item->label, lbl_data->label, sizeof(lbl_item->label)); + + if (strstarts(lbl_item->label, ICE_LBL_BST_DVM)) + lbl_item->type = ICE_LBL_BST_TYPE_DVM; + else if (strstarts(lbl_item->label, ICE_LBL_BST_SVM)) + lbl_item->type = ICE_LBL_BST_TYPE_SVM; + else if (strstarts(lbl_item->label, ICE_LBL_TNL_VXLAN)) + lbl_item->type = ICE_LBL_BST_TYPE_VXLAN; + else if (strstarts(lbl_item->label, ICE_LBL_TNL_GENEVE)) + lbl_item->type = ICE_LBL_BST_TYPE_GENEVE; + else if (strstarts(lbl_item->label, ICE_LBL_TNL_UDP_ECPRI)) + lbl_item->type = ICE_LBL_BST_TYPE_UDP_ECPRI; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_lbl_dump(hw, lbl_item); +} + +/** + * ice_bst_lbl_table_get - create a boost label table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Boost label table. + */ +static struct ice_lbl_item *ice_bst_lbl_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_LBL_RXPARSER_TMEM, + sizeof(struct ice_lbl_item), + ICE_BST_TCAM_TABLE_SIZE, + ice_parse_lbl_item, true); +} + +/** + * ice_bst_tcam_match - match a pattern on the boost tcam table + * @tcam_table: boost tcam table to search + * @pat: pattern to match + * + * Return: a pointer to the matching Boost TCAM item or NULL. + */ +struct ice_bst_tcam_item * +ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat) +{ + int i; + + for (i = 0; i < ICE_BST_TCAM_TABLE_SIZE; i++) { + struct ice_bst_tcam_item *item = &tcam_table[i]; + + if (item->hit_idx_grp == 0) + continue; + if (ice_ternary_match(item->key, item->key_inv, pat, + ICE_BST_TCAM_KEY_SIZE)) + return item; + } + + return NULL; +} + +/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/ +/** + * ice_ptype_mk_tcam_dump - dump an ptype marker tcam info + * @hw: pointer to the hardware structure + * @item: ptype marker tcam to dump + */ +static void ice_ptype_mk_tcam_dump(struct ice_hw *hw, + struct ice_ptype_mk_tcam_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "address = %d\n", item->address); + dev_info(dev, "ptype = %d\n", item->ptype); + + dev_info(dev, "key :"); + for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key[i]); + + dev_info(dev, "\n"); + + dev_info(dev, "key_inv:"); + for (i = 0; i < ICE_PTYPE_MK_TCAM_KEY_SIZE; i++) + dev_info(dev, "%02x ", item->key_inv[i]); + + dev_info(dev, "\n"); +} + +static void ice_parse_ptype_mk_tcam_item(struct ice_hw *hw, u16 idx, + void *item, void *data, int size) +{ + memcpy(item, data, size); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_ptype_mk_tcam_dump(hw, + (struct ice_ptype_mk_tcam_item *)item); +} + +/** + * ice_ptype_mk_tcam_table_get - create a ptype marker tcam table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Marker PType TCAM table. + */ +static +struct ice_ptype_mk_tcam_item *ice_ptype_mk_tcam_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_PTYPE, + sizeof(struct ice_ptype_mk_tcam_item), + ICE_PTYPE_MK_TCAM_TABLE_SIZE, + ice_parse_ptype_mk_tcam_item, true); +} + +/** + * ice_ptype_mk_tcam_match - match a pattern on a ptype marker tcam table + * @table: ptype marker tcam table to search + * @pat: pattern to match + * @len: length of the pattern + * + * Return: a pointer to the matching Marker PType item or NULL. + */ +struct ice_ptype_mk_tcam_item * +ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, + u8 *pat, int len) +{ + int i; + + for (i = 0; i < ICE_PTYPE_MK_TCAM_TABLE_SIZE; i++) { + struct ice_ptype_mk_tcam_item *item = &table[i]; + + if (ice_ternary_match(item->key, item->key_inv, pat, len)) + return item; + } + + return NULL; +} + +/*** ICE_SID_RXPARSER_MARKER_GRP section ***/ +/** + * ice_mk_grp_dump - dump an marker group item info + * @hw: pointer to the hardware structure + * @item: marker group item to dump + */ +static void ice_mk_grp_dump(struct ice_hw *hw, struct ice_mk_grp_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "index = %d\n", item->idx); + + dev_info(dev, "markers: "); + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) + dev_info(dev, "%d ", item->markers[i]); + + dev_info(dev, "\n"); +} + +static void ice_mk_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_mk_grp_item *grp = item; + u8 *buf = data; + int i; + + grp->idx = idx; + + for (i = 0; i < ICE_MK_COUNT_PER_GRP; i++) + grp->markers[i] = buf[i]; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_mk_grp_dump(hw, grp); +} + +/** + * ice_mk_grp_table_get - create a marker group table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Marker Group ID table. + */ +static struct ice_mk_grp_item *ice_mk_grp_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_MARKER_GRP, + sizeof(struct ice_mk_grp_item), + ICE_MK_GRP_TABLE_SIZE, + ice_mk_grp_parse_item, false); +} + +/*** ICE_SID_RXPARSER_PROTO_GRP section ***/ +static void ice_proto_off_dump(struct ice_hw *hw, + struct ice_proto_off *po, int idx) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "proto %d\n", idx); + dev_info(dev, "\tpolarity = %d\n", po->polarity); + dev_info(dev, "\tproto_id = %d\n", po->proto_id); + dev_info(dev, "\toffset = %d\n", po->offset); +} + +/** + * ice_proto_grp_dump - dump a proto group item info + * @hw: pointer to the hardware structure + * @item: proto group item to dump + */ +static void ice_proto_grp_dump(struct ice_hw *hw, + struct ice_proto_grp_item *item) +{ + int i; + + dev_info(ice_hw_to_dev(hw), "index = %d\n", item->idx); + + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) + ice_proto_off_dump(hw, &item->po[i], i); +} + +#define ICE_PO_POL BIT(0) +#define ICE_PO_PID GENMASK(8, 1) +#define ICE_PO_OFF GENMASK(21, 12) + +/** + * ice_proto_off_parse - parse 22 bits of Protocol entry + * @po: pointer to the Protocol entry structure + * @data: Protocol entry data to be parsed + */ +static void ice_proto_off_parse(struct ice_proto_off *po, u32 data) +{ + po->polarity = FIELD_GET(ICE_PO_POL, data); + po->proto_id = FIELD_GET(ICE_PO_PID, data); + po->offset = FIELD_GET(ICE_PO_OFF, data); +} + +/** + * ice_proto_grp_parse_item - parse 192 bits of Protocol Group Table entry + * @hw: pointer to the hardware structure + * @idx: index of Protocol Group Table entry + * @item: item of Protocol Group Table entry + * @data: Protocol Group Table entry data to be parsed + * @size: size of Protocol Group Table entry + */ +static void ice_proto_grp_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_proto_grp_item *grp = item; + u8 *buf = (u8 *)data; + u8 idd, off; + u32 d32; + int i; + + grp->idx = idx; + + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) { + idd = (ICE_PROTO_GRP_ITEM_SIZE * i) / BITS_PER_BYTE; + off = (ICE_PROTO_GRP_ITEM_SIZE * i) % BITS_PER_BYTE; + d32 = *((u32 *)&buf[idd]) >> off; + ice_proto_off_parse(&grp->po[i], d32); + } + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_proto_grp_dump(hw, grp); +} + +/** + * ice_proto_grp_table_get - create a proto group table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Protocol Group table. + */ +static struct ice_proto_grp_item *ice_proto_grp_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_PROTO_GRP, + sizeof(struct ice_proto_grp_item), + ICE_PROTO_GRP_TABLE_SIZE, + ice_proto_grp_parse_item, false); +} + +/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/ +/** + * ice_flg_rd_dump - dump a flag redirect item info + * @hw: pointer to the hardware structure + * @item: flag redirect item to dump + */ +static void ice_flg_rd_dump(struct ice_hw *hw, struct ice_flg_rd_item *item) +{ + struct device *dev = ice_hw_to_dev(hw); + + dev_info(dev, "index = %d\n", item->idx); + dev_info(dev, "expose = %d\n", item->expose); + dev_info(dev, "intr_flg_id = %d\n", item->intr_flg_id); +} + +#define ICE_FRT_EXPO BIT(0) +#define ICE_FRT_IFID GENMASK(6, 1) + +/** + * ice_flg_rd_parse_item - parse 8 bits of Flag Redirect Table entry + * @hw: pointer to the hardware structure + * @idx: index of Flag Redirect Table entry + * @item: item of Flag Redirect Table entry + * @data: Flag Redirect Table entry data to be parsed + * @size: size of Flag Redirect Table entry + */ +static void ice_flg_rd_parse_item(struct ice_hw *hw, u16 idx, void *item, + void *data, int __maybe_unused size) +{ + struct ice_flg_rd_item *rdi = item; + u8 d8 = *(u8 *)data; + + rdi->idx = idx; + rdi->expose = FIELD_GET(ICE_FRT_EXPO, d8); + rdi->intr_flg_id = FIELD_GET(ICE_FRT_IFID, d8); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_flg_rd_dump(hw, rdi); +} + +/** + * ice_flg_rd_table_get - create a flag redirect table + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Flags Redirection table. + */ +static struct ice_flg_rd_item *ice_flg_rd_table_get(struct ice_hw *hw) +{ + return ice_parser_create_table(hw, ICE_SID_RXPARSER_FLAG_REDIR, + sizeof(struct ice_flg_rd_item), + ICE_FLG_RD_TABLE_SIZE, + ice_flg_rd_parse_item, false); +} + +/** + * ice_flg_redirect - redirect a parser flag to packet flag + * @table: flag redirect table + * @psr_flg: parser flag to redirect + * + * Return: flag or 0 if @psr_flag = 0. + */ +u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg) +{ + u64 flg = 0; + int i; + + for (i = 0; i < ICE_FLG_RDT_SIZE; i++) { + struct ice_flg_rd_item *item = &table[i]; + + if (!item->expose) + continue; + + if (psr_flg & BIT(item->intr_flg_id)) + flg |= BIT(i); + } + + return flg; +} + +/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL, + * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS + * sections ***/ +static void ice_xlt_kb_entry_dump(struct ice_hw *hw, + struct ice_xlt_kb_entry *entry, int idx) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "key builder entry %d\n", idx); + dev_info(dev, "\txlt1_ad_sel = %d\n", entry->xlt1_ad_sel); + dev_info(dev, "\txlt2_ad_sel = %d\n", entry->xlt2_ad_sel); + + for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) + dev_info(dev, "\tflg%d_sel = %d\n", i, entry->flg0_14_sel[i]); + + dev_info(dev, "\txlt1_md_sel = %d\n", entry->xlt1_md_sel); + dev_info(dev, "\txlt2_md_sel = %d\n", entry->xlt2_md_sel); +} + +/** + * ice_xlt_kb_dump - dump a xlt key build info + * @hw: pointer to the hardware structure + * @kb: key build to dump + */ +static void ice_xlt_kb_dump(struct ice_hw *hw, struct ice_xlt_kb *kb) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "xlt1_pm = %d\n", kb->xlt1_pm); + dev_info(dev, "xlt2_pm = %d\n", kb->xlt2_pm); + dev_info(dev, "prof_id_pm = %d\n", kb->prof_id_pm); + dev_info(dev, "flag15 lo = 0x%08x\n", (u32)kb->flag15); + dev_info(dev, "flag15 hi = 0x%08x\n", + (u32)(kb->flag15 >> (sizeof(u32) * BITS_PER_BYTE))); + + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) + ice_xlt_kb_entry_dump(hw, &kb->entries[i], i); +} + +#define ICE_XLT_KB_X1AS_S 32 /* offset for the 1st 64-bits field */ +#define ICE_XLT_KB_X1AS_IDD (ICE_XLT_KB_X1AS_S / BITS_PER_BYTE) +#define ICE_XLT_KB_X1AS_OFF (ICE_XLT_KB_X1AS_S % BITS_PER_BYTE) +#define ICE_XLT_KB_X1AS GENMASK_ULL(34 - ICE_XLT_KB_X1AS_S, \ + 32 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_X2AS GENMASK_ULL(37 - ICE_XLT_KB_X1AS_S, \ + 35 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL00 GENMASK_ULL(46 - ICE_XLT_KB_X1AS_S, \ + 38 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL01 GENMASK_ULL(55 - ICE_XLT_KB_X1AS_S, \ + 47 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL02 GENMASK_ULL(64 - ICE_XLT_KB_X1AS_S, \ + 56 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL03 GENMASK_ULL(73 - ICE_XLT_KB_X1AS_S, \ + 65 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL04 GENMASK_ULL(82 - ICE_XLT_KB_X1AS_S, \ + 74 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL05 GENMASK_ULL(91 - ICE_XLT_KB_X1AS_S, \ + 83 - ICE_XLT_KB_X1AS_S) +#define ICE_XLT_KB_FL06_S 92 /* offset for the 2nd 64-bits field */ +#define ICE_XLT_KB_FL06_IDD (ICE_XLT_KB_FL06_S / BITS_PER_BYTE) +#define ICE_XLT_KB_FL06_OFF (ICE_XLT_KB_FL06_S % BITS_PER_BYTE) +#define ICE_XLT_KB_FL06 GENMASK_ULL(100 - ICE_XLT_KB_FL06_S, \ + 92 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL07 GENMASK_ULL(109 - ICE_XLT_KB_FL06_S, \ + 101 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL08 GENMASK_ULL(118 - ICE_XLT_KB_FL06_S, \ + 110 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL09 GENMASK_ULL(127 - ICE_XLT_KB_FL06_S, \ + 119 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL10 GENMASK_ULL(136 - ICE_XLT_KB_FL06_S, \ + 128 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL11 GENMASK_ULL(145 - ICE_XLT_KB_FL06_S, \ + 137 - ICE_XLT_KB_FL06_S) +#define ICE_XLT_KB_FL12_S 146 /* offset for the 3rd 64-bits field */ +#define ICE_XLT_KB_FL12_IDD (ICE_XLT_KB_FL12_S / BITS_PER_BYTE) +#define ICE_XLT_KB_FL12_OFF (ICE_XLT_KB_FL12_S % BITS_PER_BYTE) +#define ICE_XLT_KB_FL12 GENMASK_ULL(154 - ICE_XLT_KB_FL12_S, \ + 146 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_FL13 GENMASK_ULL(163 - ICE_XLT_KB_FL12_S, \ + 155 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_FL14 GENMASK_ULL(181 - ICE_XLT_KB_FL12_S, \ + 164 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_X1MS GENMASK_ULL(186 - ICE_XLT_KB_FL12_S, \ + 182 - ICE_XLT_KB_FL12_S) +#define ICE_XLT_KB_X2MS GENMASK_ULL(191 - ICE_XLT_KB_FL12_S, \ + 187 - ICE_XLT_KB_FL12_S) + +/** + * ice_kb_entry_init - parse 192 bits of XLT Key Builder entry + * @entry: pointer to the XLT Key Builder entry structure + * @data: XLT Key Builder entry data to be parsed + */ +static void ice_kb_entry_init(struct ice_xlt_kb_entry *entry, u8 *data) +{ + u8 i = 0; + u64 d64; + + d64 = *((u64 *)&data[ICE_XLT_KB_X1AS_IDD]) >> ICE_XLT_KB_X1AS_OFF; + + entry->xlt1_ad_sel = FIELD_GET(ICE_XLT_KB_X1AS, d64); + entry->xlt2_ad_sel = FIELD_GET(ICE_XLT_KB_X2AS, d64); + + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL00, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL01, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL02, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL03, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL04, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL05, d64); + + d64 = *((u64 *)&data[ICE_XLT_KB_FL06_IDD]) >> ICE_XLT_KB_FL06_OFF; + + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL06, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL07, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL08, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL09, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL10, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL11, d64); + + d64 = *((u64 *)&data[ICE_XLT_KB_FL12_IDD]) >> ICE_XLT_KB_FL12_OFF; + + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL12, d64); + entry->flg0_14_sel[i++] = FIELD_GET(ICE_XLT_KB_FL13, d64); + entry->flg0_14_sel[i] = FIELD_GET(ICE_XLT_KB_FL14, d64); + + entry->xlt1_md_sel = FIELD_GET(ICE_XLT_KB_X1MS, d64); + entry->xlt2_md_sel = FIELD_GET(ICE_XLT_KB_X2MS, d64); +} + +#define ICE_XLT_KB_X1PM_OFF 0 +#define ICE_XLT_KB_X2PM_OFF 1 +#define ICE_XLT_KB_PIPM_OFF 2 +#define ICE_XLT_KB_FL15_OFF 4 +#define ICE_XLT_KB_TBL_OFF 12 + +/** + * ice_parse_kb_data - parse 204 bits of XLT Key Build Table + * @hw: pointer to the hardware structure + * @kb: pointer to the XLT Key Build Table structure + * @data: XLT Key Build Table data to be parsed + */ +static void ice_parse_kb_data(struct ice_hw *hw, struct ice_xlt_kb *kb, + void *data) +{ + u8 *buf = data; + int i; + + kb->xlt1_pm = buf[ICE_XLT_KB_X1PM_OFF]; + kb->xlt2_pm = buf[ICE_XLT_KB_X2PM_OFF]; + kb->prof_id_pm = buf[ICE_XLT_KB_PIPM_OFF]; + + kb->flag15 = *(u64 *)&buf[ICE_XLT_KB_FL15_OFF]; + for (i = 0; i < ICE_XLT_KB_TBL_CNT; i++) + ice_kb_entry_init(&kb->entries[i], + &buf[ICE_XLT_KB_TBL_OFF + + i * ICE_XLT_KB_TBL_ENTRY_SIZE]); + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_xlt_kb_dump(hw, kb); +} + +static struct ice_xlt_kb *ice_xlt_kb_get(struct ice_hw *hw, u32 sect_type) +{ + struct ice_pkg_enum state = {}; + struct ice_seg *seg = hw->seg; + struct ice_xlt_kb *kb; + void *data; + + if (!seg) + return ERR_PTR(-EINVAL); + + kb = kzalloc(sizeof(*kb), GFP_KERNEL); + if (!kb) + return ERR_PTR(-ENOMEM); + + data = ice_pkg_enum_section(seg, &state, sect_type); + if (!data) { + ice_debug(hw, ICE_DBG_PARSER, "failed to find section type %d.\n", + sect_type); + kfree(kb); + return ERR_PTR(-EINVAL); + } + + ice_parse_kb_data(hw, kb, data); + + return kb; +} + +/** + * ice_xlt_kb_get_sw - create switch xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for Switch. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_sw(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_SW); +} + +/** + * ice_xlt_kb_get_acl - create acl xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for ACL. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_acl(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_ACL); +} + +/** + * ice_xlt_kb_get_fd - create fdir xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for Flow Director. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_fd(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_FD); +} + +/** + * ice_xlt_kb_get_rss - create rss xlt key build + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated Key Builder table for RSS. + */ +static struct ice_xlt_kb *ice_xlt_kb_get_rss(struct ice_hw *hw) +{ + return ice_xlt_kb_get(hw, ICE_SID_XLT_KEY_BUILDER_RSS); +} + +#define ICE_XLT_KB_MASK GENMASK_ULL(5, 0) + +/** + * ice_xlt_kb_flag_get - aggregate 64 bits packet flag into 16 bits xlt flag + * @kb: xlt key build + * @pkt_flag: 64 bits packet flag + * + * Return: XLT flag or 0 if @pkt_flag = 0. + */ +u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag) +{ + struct ice_xlt_kb_entry *entry = &kb->entries[0]; + u16 flag = 0; + int i; + + /* check flag 15 */ + if (kb->flag15 & pkt_flag) + flag = BIT(ICE_XLT_KB_FLAG0_14_CNT); + + /* check flag 0 - 14 */ + for (i = 0; i < ICE_XLT_KB_FLAG0_14_CNT; i++) { + /* only check first entry */ + u16 idx = entry->flg0_14_sel[i] & ICE_XLT_KB_MASK; + + if (pkt_flag & BIT(idx)) + flag |= (u16)BIT(i); + } + + return flag; +} + +/*** Parser API ***/ +/** + * ice_parser_create - create a parser instance + * @hw: pointer to the hardware structure + * + * Return: a pointer to the allocated parser instance or ERR_PTR + * in case of error. + */ +struct ice_parser *ice_parser_create(struct ice_hw *hw) +{ + struct ice_parser *p; + void *err; + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + p->hw = hw; + p->rt.psr = p; + + p->imem_table = ice_imem_table_get(hw); + if (IS_ERR(p->imem_table)) { + err = p->imem_table; + goto err; + } + + p->mi_table = ice_metainit_table_get(hw); + if (IS_ERR(p->mi_table)) { + err = p->mi_table; + goto err; + } + + p->pg_cam_table = ice_pg_cam_table_get(hw); + if (IS_ERR(p->pg_cam_table)) { + err = p->pg_cam_table; + goto err; + } + + p->pg_sp_cam_table = ice_pg_sp_cam_table_get(hw); + if (IS_ERR(p->pg_sp_cam_table)) { + err = p->pg_sp_cam_table; + goto err; + } + + p->pg_nm_cam_table = ice_pg_nm_cam_table_get(hw); + if (IS_ERR(p->pg_nm_cam_table)) { + err = p->pg_nm_cam_table; + goto err; + } + + p->pg_nm_sp_cam_table = ice_pg_nm_sp_cam_table_get(hw); + if (IS_ERR(p->pg_nm_sp_cam_table)) { + err = p->pg_nm_sp_cam_table; + goto err; + } + + p->bst_tcam_table = ice_bst_tcam_table_get(hw); + if (IS_ERR(p->bst_tcam_table)) { + err = p->bst_tcam_table; + goto err; + } + + p->bst_lbl_table = ice_bst_lbl_table_get(hw); + if (IS_ERR(p->bst_lbl_table)) { + err = p->bst_lbl_table; + goto err; + } + + p->ptype_mk_tcam_table = ice_ptype_mk_tcam_table_get(hw); + if (IS_ERR(p->ptype_mk_tcam_table)) { + err = p->ptype_mk_tcam_table; + goto err; + } + + p->mk_grp_table = ice_mk_grp_table_get(hw); + if (IS_ERR(p->mk_grp_table)) { + err = p->mk_grp_table; + goto err; + } + + p->proto_grp_table = ice_proto_grp_table_get(hw); + if (IS_ERR(p->proto_grp_table)) { + err = p->proto_grp_table; + goto err; + } + + p->flg_rd_table = ice_flg_rd_table_get(hw); + if (IS_ERR(p->flg_rd_table)) { + err = p->flg_rd_table; + goto err; + } + + p->xlt_kb_sw = ice_xlt_kb_get_sw(hw); + if (IS_ERR(p->xlt_kb_sw)) { + err = p->xlt_kb_sw; + goto err; + } + + p->xlt_kb_acl = ice_xlt_kb_get_acl(hw); + if (IS_ERR(p->xlt_kb_acl)) { + err = p->xlt_kb_acl; + goto err; + } + + p->xlt_kb_fd = ice_xlt_kb_get_fd(hw); + if (IS_ERR(p->xlt_kb_fd)) { + err = p->xlt_kb_fd; + goto err; + } + + p->xlt_kb_rss = ice_xlt_kb_get_rss(hw); + if (IS_ERR(p->xlt_kb_rss)) { + err = p->xlt_kb_rss; + goto err; + } + + return p; +err: + ice_parser_destroy(p); + return err; +} + +/** + * ice_parser_destroy - destroy a parser instance + * @psr: pointer to a parser instance + */ +void ice_parser_destroy(struct ice_parser *psr) +{ + kfree(psr->imem_table); + kfree(psr->mi_table); + kfree(psr->pg_cam_table); + kfree(psr->pg_sp_cam_table); + kfree(psr->pg_nm_cam_table); + kfree(psr->pg_nm_sp_cam_table); + kfree(psr->bst_tcam_table); + kfree(psr->bst_lbl_table); + kfree(psr->ptype_mk_tcam_table); + kfree(psr->mk_grp_table); + kfree(psr->proto_grp_table); + kfree(psr->flg_rd_table); + kfree(psr->xlt_kb_sw); + kfree(psr->xlt_kb_acl); + kfree(psr->xlt_kb_fd); + kfree(psr->xlt_kb_rss); + + kfree(psr); +} + +/** + * ice_parser_run - parse on a packet in binary and return the result + * @psr: pointer to a parser instance + * @pkt_buf: packet data + * @pkt_len: packet length + * @rslt: input/output parameter to save parser result. + * + * Return: 0 on success or errno. + */ +int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, + int pkt_len, struct ice_parser_result *rslt) +{ + ice_parser_rt_reset(&psr->rt); + ice_parser_rt_pktbuf_set(&psr->rt, pkt_buf, pkt_len); + + return ice_parser_rt_execute(&psr->rt, rslt); +} + +/** + * ice_parser_result_dump - dump a parser result info + * @hw: pointer to the hardware structure + * @rslt: parser result info to dump + */ +void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt) +{ + struct device *dev = ice_hw_to_dev(hw); + int i; + + dev_info(dev, "ptype = %d\n", rslt->ptype); + for (i = 0; i < rslt->po_num; i++) + dev_info(dev, "proto = %d, offset = %d\n", + rslt->po[i].proto_id, rslt->po[i].offset); + + dev_info(dev, "flags_psr = 0x%016llx\n", rslt->flags_psr); + dev_info(dev, "flags_pkt = 0x%016llx\n", rslt->flags_pkt); + dev_info(dev, "flags_sw = 0x%04x\n", rslt->flags_sw); + dev_info(dev, "flags_fd = 0x%04x\n", rslt->flags_fd); + dev_info(dev, "flags_rss = 0x%04x\n", rslt->flags_rss); +} + +#define ICE_BT_VLD_KEY 0xFF +#define ICE_BT_INV_KEY 0xFE + +static void ice_bst_dvm_set(struct ice_parser *psr, enum ice_lbl_type type, + bool on) +{ + u16 i = 0; + + while (true) { + struct ice_bst_tcam_item *item; + u8 key; + + item = ice_bst_tcam_search(psr->bst_tcam_table, + psr->bst_lbl_table, + type, &i); + if (!item) + break; + + key = on ? ICE_BT_VLD_KEY : ICE_BT_INV_KEY; + item->key[ICE_BT_VM_OFF] = key; + item->key_inv[ICE_BT_VM_OFF] = key; + i++; + } +} + +/** + * ice_parser_dvm_set - configure double vlan mode for parser + * @psr: pointer to a parser instance + * @on: true to turn on; false to turn off + */ +void ice_parser_dvm_set(struct ice_parser *psr, bool on) +{ + ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_DVM, on); + ice_bst_dvm_set(psr, ICE_LBL_BST_TYPE_SVM, !on); +} + +static int ice_tunnel_port_set(struct ice_parser *psr, enum ice_lbl_type type, + u16 udp_port, bool on) +{ + u8 *buf = (u8 *)&udp_port; + u16 i = 0; + + while (true) { + struct ice_bst_tcam_item *item; + + item = ice_bst_tcam_search(psr->bst_tcam_table, + psr->bst_lbl_table, + type, &i); + if (!item) + break; + + /* found empty slot to add */ + if (on && item->key[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY && + item->key_inv[ICE_BT_TUN_PORT_OFF_H] == ICE_BT_INV_KEY) { + item->key_inv[ICE_BT_TUN_PORT_OFF_L] = + buf[ICE_UDP_PORT_OFF_L]; + item->key_inv[ICE_BT_TUN_PORT_OFF_H] = + buf[ICE_UDP_PORT_OFF_H]; + + item->key[ICE_BT_TUN_PORT_OFF_L] = + ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_L]; + item->key[ICE_BT_TUN_PORT_OFF_H] = + ICE_BT_VLD_KEY - buf[ICE_UDP_PORT_OFF_H]; + + return 0; + /* found a matched slot to delete */ + } else if (!on && + (item->key_inv[ICE_BT_TUN_PORT_OFF_L] == + buf[ICE_UDP_PORT_OFF_L] || + item->key_inv[ICE_BT_TUN_PORT_OFF_H] == + buf[ICE_UDP_PORT_OFF_H])) { + item->key_inv[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY; + item->key_inv[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY; + + item->key[ICE_BT_TUN_PORT_OFF_L] = ICE_BT_VLD_KEY; + item->key[ICE_BT_TUN_PORT_OFF_H] = ICE_BT_INV_KEY; + + return 0; + } + i++; + } + + return -EINVAL; +} + +/** + * ice_parser_vxlan_tunnel_set - configure vxlan tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: vxlan tunnel port in UDP header + * @on: true to turn on; false to turn off + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_VXLAN, udp_port, on); +} + +/** + * ice_parser_geneve_tunnel_set - configure geneve tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: geneve tunnel port in UDP header + * @on: true to turn on; false to turn off + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_geneve_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_GENEVE, udp_port, on); +} + +/** + * ice_parser_ecpri_tunnel_set - configure ecpri tunnel for parser + * @psr: pointer to a parser instance + * @udp_port: ecpri tunnel port in UDP header + * @on: true to turn on; false to turn off + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, + u16 udp_port, bool on) +{ + return ice_tunnel_port_set(psr, ICE_LBL_BST_TYPE_UDP_ECPRI, + udp_port, on); +} + +/** + * ice_nearest_proto_id - find nearest protocol ID + * @rslt: pointer to a parser result instance + * @offset: a min value for the protocol offset + * @proto_id: the protocol ID (output) + * @proto_off: the protocol offset (output) + * + * From the protocols in @rslt, find the nearest protocol that has offset + * larger than @offset. + * + * Return: if true, the protocol's ID and offset + */ +static bool ice_nearest_proto_id(struct ice_parser_result *rslt, u16 offset, + u8 *proto_id, u16 *proto_off) +{ + u16 dist = U16_MAX; + u8 proto = 0; + int i; + + for (i = 0; i < rslt->po_num; i++) { + if (offset < rslt->po[i].offset) + continue; + if (offset - rslt->po[i].offset < dist) { + proto = rslt->po[i].proto_id; + dist = offset - rslt->po[i].offset; + } + } + + if (dist % 2) + return false; + + *proto_id = proto; + *proto_off = dist; + + return true; +} + +/* default flag mask to cover GTP_EH_PDU, GTP_EH_PDU_LINK and TUN2 + * In future, the flag masks should learn from DDP + */ +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW 0x4002 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL 0x0000 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD 0x6080 +#define ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS 0x6010 + +/** + * ice_parser_profile_init - initialize a FXP profile based on parser result + * @rslt: a instance of a parser result + * @pkt_buf: packet data buffer + * @msk_buf: packet mask buffer + * @buf_len: packet length + * @blk: FXP pipeline stage + * @prof: input/output parameter to save the profile + * + * Return: 0 on success or errno on failure. + */ +int ice_parser_profile_init(struct ice_parser_result *rslt, + const u8 *pkt_buf, const u8 *msk_buf, + int buf_len, enum ice_block blk, + struct ice_parser_profile *prof) +{ + u8 proto_id = U8_MAX; + u16 proto_off = 0; + u16 off; + + memset(prof, 0, sizeof(*prof)); + set_bit(rslt->ptype, prof->ptypes); + if (blk == ICE_BLK_SW) { + prof->flags = rslt->flags_sw; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_SW; + } else if (blk == ICE_BLK_ACL) { + prof->flags = rslt->flags_acl; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_ACL; + } else if (blk == ICE_BLK_FD) { + prof->flags = rslt->flags_fd; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_FD; + } else if (blk == ICE_BLK_RSS) { + prof->flags = rslt->flags_rss; + prof->flags_msk = ICE_KEYBUILD_FLAG_MASK_DEFAULT_RSS; + } else { + return -EINVAL; + } + + for (off = 0; off < buf_len - 1; off++) { + if (msk_buf[off] == 0 && msk_buf[off + 1] == 0) + continue; + if (!ice_nearest_proto_id(rslt, off, &proto_id, &proto_off)) + continue; + if (prof->fv_num >= ICE_PARSER_FV_MAX) + return -EINVAL; + + prof->fv[prof->fv_num].proto_id = proto_id; + prof->fv[prof->fv_num].offset = proto_off; + prof->fv[prof->fv_num].spec = *(const u16 *)&pkt_buf[off]; + prof->fv[prof->fv_num].msk = *(const u16 *)&msk_buf[off]; + prof->fv_num++; + } + + return 0; +} + +/** + * ice_parser_profile_dump - dump an FXP profile info + * @hw: pointer to the hardware structure + * @prof: profile info to dump + */ +void ice_parser_profile_dump(struct ice_hw *hw, + struct ice_parser_profile *prof) +{ + struct device *dev = ice_hw_to_dev(hw); + u16 i; + + dev_info(dev, "ptypes:\n"); + for (i = 0; i < ICE_FLOW_PTYPE_MAX; i++) + if (test_bit(i, prof->ptypes)) + dev_info(dev, "\t%u\n", i); + + for (i = 0; i < prof->fv_num; i++) + dev_info(dev, "proto = %u, offset = %2u, spec = 0x%04x, mask = 0x%04x\n", + prof->fv[i].proto_id, prof->fv[i].offset, + prof->fv[i].spec, prof->fv[i].msk); + + dev_info(dev, "flags = 0x%04x\n", prof->flags); + dev_info(dev, "flags_msk = 0x%04x\n", prof->flags_msk); +} diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h new file mode 100644 index 000000000000..4f56d53d56b9 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_parser.h @@ -0,0 +1,538 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef _ICE_PARSER_H_ +#define _ICE_PARSER_H_ + +#define ICE_SEC_DATA_OFFSET 4 +#define ICE_SID_RXPARSER_IMEM_ENTRY_SIZE 48 +#define ICE_SID_RXPARSER_METADATA_INIT_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_CAM_ENTRY_SIZE 16 +#define ICE_SID_RXPARSER_PG_SPILL_ENTRY_SIZE 17 +#define ICE_SID_RXPARSER_NOMATCH_CAM_ENTRY_SIZE 12 +#define ICE_SID_RXPARSER_NOMATCH_SPILL_ENTRY_SIZE 13 +#define ICE_SID_RXPARSER_BOOST_TCAM_ENTRY_SIZE 88 +#define ICE_SID_RXPARSER_MARKER_TYPE_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_MARKER_GRP_ENTRY_SIZE 8 +#define ICE_SID_RXPARSER_PROTO_GRP_ENTRY_SIZE 24 +#define ICE_SID_RXPARSER_FLAG_REDIR_ENTRY_SIZE 1 + +#define ICE_SEC_LBL_DATA_OFFSET 2 +#define ICE_SID_LBL_ENTRY_SIZE 66 + +/*** ICE_SID_RXPARSER_IMEM section ***/ +#define ICE_IMEM_TABLE_SIZE 192 + +/* TCAM boost Master; if bit is set, and TCAM hit, TCAM output overrides iMEM + * output. + */ +struct ice_bst_main { + bool alu0; + bool alu1; + bool alu2; + bool pg; +}; + +struct ice_bst_keybuilder { + u8 prio; /* 0-3: PG precedence within ALUs (3 highest) */ + bool tsr_ctrl; /* TCAM Search Register control */ +}; + +/* Next protocol Key builder */ +struct ice_np_keybuilder { + u8 opc; + u8 start_reg0; + u8 len_reg1; +}; + +enum ice_np_keybuilder_opcode { + ICE_NPKB_OPC_EXTRACT = 0, + ICE_NPKB_OPC_BUILD = 1, + ICE_NPKB_OPC_BYPASS = 2, +}; + +/* Parse Graph Key builder */ +struct ice_pg_keybuilder { + bool flag0_ena; + bool flag1_ena; + bool flag2_ena; + bool flag3_ena; + u8 flag0_idx; + u8 flag1_idx; + u8 flag2_idx; + u8 flag3_idx; + u8 alu_reg_idx; +}; + +enum ice_alu_idx { + ICE_ALU0_IDX = 0, + ICE_ALU1_IDX = 1, + ICE_ALU2_IDX = 2, +}; + +enum ice_alu_opcode { + ICE_ALU_PARK = 0, + ICE_ALU_MOV_ADD = 1, + ICE_ALU_ADD = 2, + ICE_ALU_MOV_AND = 4, + ICE_ALU_AND = 5, + ICE_ALU_AND_IMM = 6, + ICE_ALU_MOV_OR = 7, + ICE_ALU_OR = 8, + ICE_ALU_MOV_XOR = 9, + ICE_ALU_XOR = 10, + ICE_ALU_NOP = 11, + ICE_ALU_BR = 12, + ICE_ALU_BREQ = 13, + ICE_ALU_BRNEQ = 14, + ICE_ALU_BRGT = 15, + ICE_ALU_BRLT = 16, + ICE_ALU_BRGEQ = 17, + ICE_ALU_BRLEG = 18, + ICE_ALU_SETEQ = 19, + ICE_ALU_ANDEQ = 20, + ICE_ALU_OREQ = 21, + ICE_ALU_SETNEQ = 22, + ICE_ALU_ANDNEQ = 23, + ICE_ALU_ORNEQ = 24, + ICE_ALU_SETGT = 25, + ICE_ALU_ANDGT = 26, + ICE_ALU_ORGT = 27, + ICE_ALU_SETLT = 28, + ICE_ALU_ANDLT = 29, + ICE_ALU_ORLT = 30, + ICE_ALU_MOV_SUB = 31, + ICE_ALU_SUB = 32, + ICE_ALU_INVALID = 64, +}; + +enum ice_proto_off_opcode { + ICE_PO_OFF_REMAIN = 0, + ICE_PO_OFF_HDR_ADD = 1, + ICE_PO_OFF_HDR_SUB = 2, +}; + +struct ice_alu { + enum ice_alu_opcode opc; + u8 src_start; + u8 src_len; + bool shift_xlate_sel; + u8 shift_xlate_key; + u8 src_reg_id; + u8 dst_reg_id; + bool inc0; + bool inc1; + u8 proto_offset_opc; + u8 proto_offset; + u8 branch_addr; + u16 imm; + bool dedicate_flags_ena; + u8 dst_start; + u8 dst_len; + bool flags_extr_imm; + u8 flags_start_imm; +}; + +/* Parser program code (iMEM) */ +struct ice_imem_item { + u16 idx; + struct ice_bst_main b_m; + struct ice_bst_keybuilder b_kb; + u8 pg_prio; + struct ice_np_keybuilder np_kb; + struct ice_pg_keybuilder pg_kb; + struct ice_alu alu0; + struct ice_alu alu1; + struct ice_alu alu2; +}; + +/*** ICE_SID_RXPARSER_METADATA_INIT section ***/ +#define ICE_METAINIT_TABLE_SIZE 16 + +/* Metadata Initialization item */ +struct ice_metainit_item { + u16 idx; + + u8 tsr; /* TCAM Search key Register */ + u16 ho; /* Header Offset register */ + u16 pc; /* Program Counter register */ + u16 pg_rn; /* Parse Graph Root Node */ + u8 cd; /* Control Domain ID */ + + /* General Purpose Registers */ + bool gpr_a_ctrl; + u8 gpr_a_data_mdid; + u8 gpr_a_data_start; + u8 gpr_a_data_len; + u8 gpr_a_id; + + bool gpr_b_ctrl; + u8 gpr_b_data_mdid; + u8 gpr_b_data_start; + u8 gpr_b_data_len; + u8 gpr_b_id; + + bool gpr_c_ctrl; + u8 gpr_c_data_mdid; + u8 gpr_c_data_start; + u8 gpr_c_data_len; + u8 gpr_c_id; + + bool gpr_d_ctrl; + u8 gpr_d_data_mdid; + u8 gpr_d_data_start; + u8 gpr_d_data_len; + u8 gpr_d_id; + + u64 flags; /* Initial value for all flags */ +}; + +/*** ICE_SID_RXPARSER_CAM, ICE_SID_RXPARSER_PG_SPILL, + * ICE_SID_RXPARSER_NOMATCH_CAM and ICE_SID_RXPARSER_NOMATCH_CAM + * sections ***/ +#define ICE_PG_CAM_TABLE_SIZE 2048 +#define ICE_PG_SP_CAM_TABLE_SIZE 128 +#define ICE_PG_NM_CAM_TABLE_SIZE 1024 +#define ICE_PG_NM_SP_CAM_TABLE_SIZE 64 + +struct ice_pg_cam_key { + bool valid; + struct_group_attr(val, __packed, + u16 node_id; /* Node ID of protocol in parse graph */ + bool flag0; + bool flag1; + bool flag2; + bool flag3; + u8 boost_idx; /* Boost TCAM match index */ + u16 alu_reg; + u32 next_proto; /* next Protocol value (must be last) */ + ); +}; + +struct ice_pg_nm_cam_key { + bool valid; + struct_group_attr(val, __packed, + u16 node_id; + bool flag0; + bool flag1; + bool flag2; + bool flag3; + u8 boost_idx; + u16 alu_reg; + ); +}; + +struct ice_pg_cam_action { + u16 next_node; /* Parser Node ID for the next round */ + u8 next_pc; /* next Program Counter */ + bool is_pg; /* is protocol group */ + u8 proto_id; /* protocol ID or proto group ID */ + bool is_mg; /* is marker group */ + u8 marker_id; /* marker ID or marker group ID */ + bool is_last_round; + bool ho_polarity; /* header offset polarity */ + u16 ho_inc; +}; + +/* Parse Graph item */ +struct ice_pg_cam_item { + u16 idx; + struct ice_pg_cam_key key; + struct ice_pg_cam_action action; +}; + +/* Parse Graph No Match item */ +struct ice_pg_nm_cam_item { + u16 idx; + struct ice_pg_nm_cam_key key; + struct ice_pg_cam_action action; +}; + +struct ice_pg_cam_item *ice_pg_cam_match(struct ice_pg_cam_item *table, + int size, struct ice_pg_cam_key *key); +struct ice_pg_nm_cam_item * +ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size, + struct ice_pg_cam_key *key); + +/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/ +#define ICE_BST_TCAM_TABLE_SIZE 256 +#define ICE_BST_TCAM_KEY_SIZE 20 + +/* Boost TCAM item */ +struct ice_bst_tcam_item { + u16 addr; + u8 key[ICE_BST_TCAM_KEY_SIZE]; + u8 key_inv[ICE_BST_TCAM_KEY_SIZE]; + u8 hit_idx_grp; + u8 pg_prio; + struct ice_np_keybuilder np_kb; + struct ice_pg_keybuilder pg_kb; + struct ice_alu alu0; + struct ice_alu alu1; + struct ice_alu alu2; +}; + +#define ICE_LBL_LEN 64 +#define ICE_LBL_BST_DVM "BOOST_MAC_VLAN_DVM" +#define ICE_LBL_BST_SVM "BOOST_MAC_VLAN_SVM" +#define ICE_LBL_TNL_VXLAN "TNL_VXLAN" +#define ICE_LBL_TNL_GENEVE "TNL_GENEVE" +#define ICE_LBL_TNL_UDP_ECPRI "TNL_UDP_ECPRI" + +enum ice_lbl_type { + ICE_LBL_BST_TYPE_UNKNOWN, + ICE_LBL_BST_TYPE_DVM, + ICE_LBL_BST_TYPE_SVM, + ICE_LBL_BST_TYPE_VXLAN, + ICE_LBL_BST_TYPE_GENEVE, + ICE_LBL_BST_TYPE_UDP_ECPRI, +}; + +struct ice_lbl_item { + u16 idx; + char label[ICE_LBL_LEN]; + + /* must be at the end, not part of the DDP section */ + enum ice_lbl_type type; +}; + +struct ice_bst_tcam_item * +ice_bst_tcam_match(struct ice_bst_tcam_item *tcam_table, u8 *pat); +struct ice_bst_tcam_item * +ice_bst_tcam_search(struct ice_bst_tcam_item *tcam_table, + struct ice_lbl_item *lbl_table, + enum ice_lbl_type type, u16 *start); + +/*** ICE_SID_RXPARSER_MARKER_PTYPE section ***/ +#define ICE_PTYPE_MK_TCAM_TABLE_SIZE 1024 +#define ICE_PTYPE_MK_TCAM_KEY_SIZE 10 + +struct ice_ptype_mk_tcam_item { + u16 address; + u16 ptype; + u8 key[ICE_PTYPE_MK_TCAM_KEY_SIZE]; + u8 key_inv[ICE_PTYPE_MK_TCAM_KEY_SIZE]; +} __packed; + +struct ice_ptype_mk_tcam_item * +ice_ptype_mk_tcam_match(struct ice_ptype_mk_tcam_item *table, + u8 *pat, int len); +/*** ICE_SID_RXPARSER_MARKER_GRP section ***/ +#define ICE_MK_GRP_TABLE_SIZE 128 +#define ICE_MK_COUNT_PER_GRP 8 + +/* Marker Group item */ +struct ice_mk_grp_item { + int idx; + u8 markers[ICE_MK_COUNT_PER_GRP]; +}; + +/*** ICE_SID_RXPARSER_PROTO_GRP section ***/ +#define ICE_PROTO_COUNT_PER_GRP 8 +#define ICE_PROTO_GRP_TABLE_SIZE 192 +#define ICE_PROTO_GRP_ITEM_SIZE 22 +struct ice_proto_off { + bool polarity; /* true: positive, false: negative */ + u8 proto_id; + u16 offset; /* 10 bit protocol offset */ +}; + +/* Protocol Group item */ +struct ice_proto_grp_item { + u16 idx; + struct ice_proto_off po[ICE_PROTO_COUNT_PER_GRP]; +}; + +/*** ICE_SID_RXPARSER_FLAG_REDIR section ***/ +#define ICE_FLG_RD_TABLE_SIZE 64 +#define ICE_FLG_RDT_SIZE 64 + +/* Flags Redirection item */ +struct ice_flg_rd_item { + u16 idx; + bool expose; + u8 intr_flg_id; /* Internal Flag ID */ +}; + +u64 ice_flg_redirect(struct ice_flg_rd_item *table, u64 psr_flg); + +/*** ICE_SID_XLT_KEY_BUILDER_SW, ICE_SID_XLT_KEY_BUILDER_ACL, + * ICE_SID_XLT_KEY_BUILDER_FD and ICE_SID_XLT_KEY_BUILDER_RSS + * sections ***/ +#define ICE_XLT_KB_FLAG0_14_CNT 15 +#define ICE_XLT_KB_TBL_CNT 8 +#define ICE_XLT_KB_TBL_ENTRY_SIZE 24 + +struct ice_xlt_kb_entry { + u8 xlt1_ad_sel; + u8 xlt2_ad_sel; + u16 flg0_14_sel[ICE_XLT_KB_FLAG0_14_CNT]; + u8 xlt1_md_sel; + u8 xlt2_md_sel; +}; + +/* XLT Key Builder */ +struct ice_xlt_kb { + u8 xlt1_pm; /* XLT1 Partition Mode */ + u8 xlt2_pm; /* XLT2 Partition Mode */ + u8 prof_id_pm; /* Profile ID Partition Mode */ + u64 flag15; + + struct ice_xlt_kb_entry entries[ICE_XLT_KB_TBL_CNT]; +}; + +u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag); + +/*** Parser API ***/ +#define ICE_GPR_HV_IDX 64 +#define ICE_GPR_HV_SIZE 32 +#define ICE_GPR_ERR_IDX 84 +#define ICE_GPR_FLG_IDX 104 +#define ICE_GPR_FLG_SIZE 16 + +#define ICE_GPR_TSR_IDX 108 /* TSR: TCAM Search Register */ +#define ICE_GPR_NN_IDX 109 /* NN: Next Parsing Cycle Node ID */ +#define ICE_GPR_HO_IDX 110 /* HO: Next Parsing Cycle hdr Offset */ +#define ICE_GPR_NP_IDX 111 /* NP: Next Parsing Cycle */ + +#define ICE_PARSER_MAX_PKT_LEN 504 +#define ICE_PARSER_PKT_REV 32 +#define ICE_PARSER_GPR_NUM 128 +#define ICE_PARSER_FLG_NUM 64 +#define ICE_PARSER_ERR_NUM 16 +#define ICE_MARKER_ID_SIZE 9 +#define ICE_MARKER_MAX_SIZE \ + (ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1) +#define ICE_MARKER_ID_NUM 8 +#define ICE_PO_PAIR_SIZE 256 + +struct ice_gpr_pu { + /* array of flags to indicate if GRP needs to be updated */ + bool gpr_val_upd[ICE_PARSER_GPR_NUM]; + u16 gpr_val[ICE_PARSER_GPR_NUM]; + u64 flg_msk; + u64 flg_val; + u16 err_msk; + u16 err_val; +}; + +enum ice_pg_prio { + ICE_PG_P0 = 0, + ICE_PG_P1 = 1, + ICE_PG_P2 = 2, + ICE_PG_P3 = 3, +}; + +struct ice_parser_rt { + struct ice_parser *psr; + u16 gpr[ICE_PARSER_GPR_NUM]; + u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV]; + u16 pkt_len; + u16 po; + u8 bst_key[ICE_BST_TCAM_KEY_SIZE]; + struct ice_pg_cam_key pg_key; + u8 pg_prio; + struct ice_alu *alu0; + struct ice_alu *alu1; + struct ice_alu *alu2; + struct ice_pg_cam_action *action; + struct ice_gpr_pu pu; + u8 markers[ICE_MARKER_ID_SIZE]; + bool protocols[ICE_PO_PAIR_SIZE]; + u16 offsets[ICE_PO_PAIR_SIZE]; +}; + +struct ice_parser_proto_off { + u8 proto_id; /* hardware protocol ID */ + u16 offset; /* offset from the start of the protocol header */ +}; + +#define ICE_PARSER_PROTO_OFF_PAIR_SIZE 16 +#define ICE_PARSER_FLAG_PSR_SIZE 8 +#define ICE_PARSER_FV_SIZE 48 +#define ICE_PARSER_FV_MAX 24 +#define ICE_BT_TUN_PORT_OFF_H 16 +#define ICE_BT_TUN_PORT_OFF_L 15 +#define ICE_BT_VM_OFF 0 +#define ICE_UDP_PORT_OFF_H 1 +#define ICE_UDP_PORT_OFF_L 0 + +struct ice_parser_result { + u16 ptype; /* 16 bits hardware PTYPE */ + /* array of protocol and header offset pairs */ + struct ice_parser_proto_off po[ICE_PARSER_PROTO_OFF_PAIR_SIZE]; + int po_num; /* # of protocol-offset pairs must <= 16 */ + u64 flags_psr; /* parser flags */ + u64 flags_pkt; /* packet flags */ + u16 flags_sw; /* key builder flags for SW */ + u16 flags_acl; /* key builder flags for ACL */ + u16 flags_fd; /* key builder flags for FD */ + u16 flags_rss; /* key builder flags for RSS */ +}; + +void ice_parser_rt_reset(struct ice_parser_rt *rt); +void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, + int pkt_len); +int ice_parser_rt_execute(struct ice_parser_rt *rt, + struct ice_parser_result *rslt); + +struct ice_parser { + struct ice_hw *hw; /* pointer to the hardware structure */ + + struct ice_imem_item *imem_table; + struct ice_metainit_item *mi_table; + + struct ice_pg_cam_item *pg_cam_table; + struct ice_pg_cam_item *pg_sp_cam_table; + struct ice_pg_nm_cam_item *pg_nm_cam_table; + struct ice_pg_nm_cam_item *pg_nm_sp_cam_table; + + struct ice_bst_tcam_item *bst_tcam_table; + struct ice_lbl_item *bst_lbl_table; + struct ice_ptype_mk_tcam_item *ptype_mk_tcam_table; + struct ice_mk_grp_item *mk_grp_table; + struct ice_proto_grp_item *proto_grp_table; + struct ice_flg_rd_item *flg_rd_table; + + struct ice_xlt_kb *xlt_kb_sw; + struct ice_xlt_kb *xlt_kb_acl; + struct ice_xlt_kb *xlt_kb_fd; + struct ice_xlt_kb *xlt_kb_rss; + + struct ice_parser_rt rt; +}; + +struct ice_parser *ice_parser_create(struct ice_hw *hw); +void ice_parser_destroy(struct ice_parser *psr); +void ice_parser_dvm_set(struct ice_parser *psr, bool on); +int ice_parser_vxlan_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); +int ice_parser_geneve_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); +int ice_parser_ecpri_tunnel_set(struct ice_parser *psr, u16 udp_port, bool on); +int ice_parser_run(struct ice_parser *psr, const u8 *pkt_buf, + int pkt_len, struct ice_parser_result *rslt); +void ice_parser_result_dump(struct ice_hw *hw, struct ice_parser_result *rslt); + +struct ice_parser_fv { + u8 proto_id; /* hardware protocol ID */ + u16 offset; /* offset from the start of the protocol header */ + u16 spec; /* pattern to match */ + u16 msk; /* pattern mask */ +}; + +struct ice_parser_profile { + /* array of field vectors */ + struct ice_parser_fv fv[ICE_PARSER_FV_SIZE]; + int fv_num; /* # of field vectors must <= 48 */ + u16 flags; /* key builder flags */ + u16 flags_msk; /* key builder flag mask */ + + DECLARE_BITMAP(ptypes, ICE_FLOW_PTYPE_MAX); /* PTYPE bitmap */ +}; + +int ice_parser_profile_init(struct ice_parser_result *rslt, + const u8 *pkt_buf, const u8 *msk_buf, + int buf_len, enum ice_block blk, + struct ice_parser_profile *prof); +void ice_parser_profile_dump(struct ice_hw *hw, + struct ice_parser_profile *prof); +#endif /* _ICE_PARSER_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c new file mode 100644 index 000000000000..3995d662e050 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c @@ -0,0 +1,859 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2024 Intel Corporation */ + +#include "ice_common.h" + +static void ice_rt_tsr_set(struct ice_parser_rt *rt, u16 tsr) +{ + rt->gpr[ICE_GPR_TSR_IDX] = tsr; +} + +static void ice_rt_ho_set(struct ice_parser_rt *rt, u16 ho) +{ + rt->gpr[ICE_GPR_HO_IDX] = ho; + memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE); +} + +static void ice_rt_np_set(struct ice_parser_rt *rt, u16 pc) +{ + rt->gpr[ICE_GPR_NP_IDX] = pc; +} + +static void ice_rt_nn_set(struct ice_parser_rt *rt, u16 node) +{ + rt->gpr[ICE_GPR_NN_IDX] = node; +} + +static void +ice_rt_flag_set(struct ice_parser_rt *rt, unsigned int idx, bool set) +{ + struct ice_hw *hw = rt->psr->hw; + unsigned int word, id; + + word = idx / ICE_GPR_FLG_SIZE; + id = idx % ICE_GPR_FLG_SIZE; + + if (set) { + rt->gpr[ICE_GPR_FLG_IDX + word] |= (u16)BIT(id); + ice_debug(hw, ICE_DBG_PARSER, "Set parser flag %u\n", idx); + } else { + rt->gpr[ICE_GPR_FLG_IDX + word] &= ~(u16)BIT(id); + ice_debug(hw, ICE_DBG_PARSER, "Clear parser flag %u\n", idx); + } +} + +static void ice_rt_gpr_set(struct ice_parser_rt *rt, int idx, u16 val) +{ + struct ice_hw *hw = rt->psr->hw; + + if (idx == ICE_GPR_HO_IDX) + ice_rt_ho_set(rt, val); + else + rt->gpr[idx] = val; + + ice_debug(hw, ICE_DBG_PARSER, "Set GPR %d value %d\n", idx, val); +} + +static void ice_rt_err_set(struct ice_parser_rt *rt, unsigned int idx, bool set) +{ + struct ice_hw *hw = rt->psr->hw; + + if (set) { + rt->gpr[ICE_GPR_ERR_IDX] |= (u16)BIT(idx); + ice_debug(hw, ICE_DBG_PARSER, "Set parser error %u\n", idx); + } else { + rt->gpr[ICE_GPR_ERR_IDX] &= ~(u16)BIT(idx); + ice_debug(hw, ICE_DBG_PARSER, "Reset parser error %u\n", idx); + } +} + +/** + * ice_parser_rt_reset - reset the parser runtime + * @rt: pointer to the parser runtime + */ +void ice_parser_rt_reset(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_metainit_item *mi; + unsigned int i; + + mi = &psr->mi_table[0]; + + memset(rt, 0, sizeof(*rt)); + rt->psr = psr; + + ice_rt_tsr_set(rt, mi->tsr); + ice_rt_ho_set(rt, mi->ho); + ice_rt_np_set(rt, mi->pc); + ice_rt_nn_set(rt, mi->pg_rn); + + for (i = 0; i < ICE_PARSER_FLG_NUM; i++) { + if (mi->flags & BIT(i)) + ice_rt_flag_set(rt, i, true); + } +} + +/** + * ice_parser_rt_pktbuf_set - set a packet into parser runtime + * @rt: pointer to the parser runtime + * @pkt_buf: buffer with packet data + * @pkt_len: packet buffer length + */ +void ice_parser_rt_pktbuf_set(struct ice_parser_rt *rt, const u8 *pkt_buf, + int pkt_len) +{ + int len = min(ICE_PARSER_MAX_PKT_LEN, pkt_len); + u16 ho = rt->gpr[ICE_GPR_HO_IDX]; + + memcpy(rt->pkt_buf, pkt_buf, len); + rt->pkt_len = pkt_len; + + memcpy(&rt->gpr[ICE_GPR_HV_IDX], &rt->pkt_buf[ho], ICE_GPR_HV_SIZE); +} + +static void ice_bst_key_init(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + u8 tsr = (u8)rt->gpr[ICE_GPR_TSR_IDX]; + u16 ho = rt->gpr[ICE_GPR_HO_IDX]; + u8 *key = rt->bst_key; + int idd, i; + + idd = ICE_BST_TCAM_KEY_SIZE - 1; + if (imem->b_kb.tsr_ctrl) + key[idd] = tsr; + else + key[idd] = imem->b_kb.prio; + + idd = ICE_BST_TCAM_KEY_SIZE - 2; + for (i = idd; i >= 0; i--) { + int j; + + j = ho + idd - i; + if (j < ICE_PARSER_MAX_PKT_LEN) + key[i] = rt->pkt_buf[j]; + else + key[i] = 0; + } + + ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER, + KBUILD_MODNAME ": Generated Boost TCAM Key", + key, ICE_BST_TCAM_KEY_SIZE); +} + +static u16 ice_bit_rev_u16(u16 v, int len) +{ + return bitrev16(v) >> (BITS_PER_TYPE(v) - len); +} + +static u32 ice_bit_rev_u32(u32 v, int len) +{ + return bitrev32(v) >> (BITS_PER_TYPE(v) - len); +} + +static u32 ice_hv_bit_sel(struct ice_parser_rt *rt, int start, int len) +{ + int offset; + u32 buf[2]; + u64 val; + + offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16)); + + memcpy(buf, &rt->gpr[offset], sizeof(buf)); + + buf[0] = bitrev8x4(buf[0]); + buf[1] = bitrev8x4(buf[1]); + + val = *(u64 *)buf; + val >>= start % BITS_PER_TYPE(u16); + + return ice_bit_rev_u32(val, len); +} + +static u32 ice_pk_build(struct ice_parser_rt *rt, + struct ice_np_keybuilder *kb) +{ + if (kb->opc == ICE_NPKB_OPC_EXTRACT) + return ice_hv_bit_sel(rt, kb->start_reg0, kb->len_reg1); + else if (kb->opc == ICE_NPKB_OPC_BUILD) + return rt->gpr[kb->start_reg0] | + ((u32)rt->gpr[kb->len_reg1] << BITS_PER_TYPE(u16)); + else if (kb->opc == ICE_NPKB_OPC_BYPASS) + return 0; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported OP Code %u\n", + kb->opc); + return U32_MAX; +} + +static bool ice_flag_get(struct ice_parser_rt *rt, unsigned int index) +{ + int word = index / ICE_GPR_FLG_SIZE; + int id = index % ICE_GPR_FLG_SIZE; + + return !!(rt->gpr[ICE_GPR_FLG_IDX + word] & (u16)BIT(id)); +} + +static int ice_imem_pgk_init(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); + rt->pg_key.next_proto = ice_pk_build(rt, &imem->np_kb); + if (rt->pg_key.next_proto == U32_MAX) + return -EINVAL; + + if (imem->pg_kb.flag0_ena) + rt->pg_key.flag0 = ice_flag_get(rt, imem->pg_kb.flag0_idx); + if (imem->pg_kb.flag1_ena) + rt->pg_key.flag1 = ice_flag_get(rt, imem->pg_kb.flag1_idx); + if (imem->pg_kb.flag2_ena) + rt->pg_key.flag2 = ice_flag_get(rt, imem->pg_kb.flag2_idx); + if (imem->pg_kb.flag3_ena) + rt->pg_key.flag3 = ice_flag_get(rt, imem->pg_kb.flag3_idx); + + rt->pg_key.alu_reg = rt->gpr[imem->pg_kb.alu_reg_idx]; + rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", + rt->pg_key.node_id, + rt->pg_key.flag0, + rt->pg_key.flag1, + rt->pg_key.flag2, + rt->pg_key.flag3, + rt->pg_key.boost_idx, + rt->pg_key.alu_reg, + rt->pg_key.next_proto); + + return 0; +} + +static void ice_imem_alu0_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->alu0 = &imem->alu0; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from imem pc %d\n", + imem->idx); +} + +static void ice_imem_alu1_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->alu1 = &imem->alu1; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from imem pc %d\n", + imem->idx); +} + +static void ice_imem_alu2_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->alu2 = &imem->alu2; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from imem pc %d\n", + imem->idx); +} + +static void ice_imem_pgp_set(struct ice_parser_rt *rt, + struct ice_imem_item *imem) +{ + rt->pg_prio = imem->pg_prio; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from imem pc %d\n", + rt->pg_prio, imem->idx); +} + +static int ice_bst_pgk_init(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + memset(&rt->pg_key, 0, sizeof(rt->pg_key)); + rt->pg_key.boost_idx = bst->hit_idx_grp; + rt->pg_key.next_proto = ice_pk_build(rt, &bst->np_kb); + if (rt->pg_key.next_proto == U32_MAX) + return -EINVAL; + + if (bst->pg_kb.flag0_ena) + rt->pg_key.flag0 = ice_flag_get(rt, bst->pg_kb.flag0_idx); + if (bst->pg_kb.flag1_ena) + rt->pg_key.flag1 = ice_flag_get(rt, bst->pg_kb.flag1_idx); + if (bst->pg_kb.flag2_ena) + rt->pg_key.flag2 = ice_flag_get(rt, bst->pg_kb.flag2_idx); + if (bst->pg_kb.flag3_ena) + rt->pg_key.flag3 = ice_flag_get(rt, bst->pg_kb.flag3_idx); + + rt->pg_key.alu_reg = rt->gpr[bst->pg_kb.alu_reg_idx]; + rt->pg_key.node_id = rt->gpr[ICE_GPR_NN_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generate Parse Graph Key: node_id(%d), flag0-3(%d,%d,%d,%d), boost_idx(%d), alu_reg(0x%04x), next_proto(0x%08x)\n", + rt->pg_key.node_id, + rt->pg_key.flag0, + rt->pg_key.flag1, + rt->pg_key.flag2, + rt->pg_key.flag3, + rt->pg_key.boost_idx, + rt->pg_key.alu_reg, + rt->pg_key.next_proto); + + return 0; +} + +static void ice_bst_alu0_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu0 = &bst->alu0; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU0 from boost address %d\n", + bst->addr); +} + +static void ice_bst_alu1_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu1 = &bst->alu1; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU1 from boost address %d\n", + bst->addr); +} + +static void ice_bst_alu2_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->alu2 = &bst->alu2; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load ALU2 from boost address %d\n", + bst->addr); +} + +static void ice_bst_pgp_set(struct ice_parser_rt *rt, + struct ice_bst_tcam_item *bst) +{ + rt->pg_prio = bst->pg_prio; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load PG priority %d from boost address %d\n", + rt->pg_prio, bst->addr); +} + +static struct ice_pg_cam_item *ice_rt_pg_cam_match(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_pg_cam_item *item; + + item = ice_pg_cam_match(psr->pg_cam_table, ICE_PG_CAM_TABLE_SIZE, + &rt->pg_key); + if (!item) + item = ice_pg_cam_match(psr->pg_sp_cam_table, + ICE_PG_SP_CAM_TABLE_SIZE, &rt->pg_key); + return item; +} + +static +struct ice_pg_nm_cam_item *ice_rt_pg_nm_cam_match(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + struct ice_pg_nm_cam_item *item; + + item = ice_pg_nm_cam_match(psr->pg_nm_cam_table, + ICE_PG_NM_CAM_TABLE_SIZE, &rt->pg_key); + + if (!item) + item = ice_pg_nm_cam_match(psr->pg_nm_sp_cam_table, + ICE_PG_NM_SP_CAM_TABLE_SIZE, + &rt->pg_key); + return item; +} + +static void ice_gpr_add(struct ice_parser_rt *rt, int idx, u16 val) +{ + rt->pu.gpr_val_upd[idx] = true; + rt->pu.gpr_val[idx] = val; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for register %d value %d\n", + idx, val); +} + +static void ice_pg_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action ...\n"); + + ice_gpr_add(rt, ICE_GPR_NP_IDX, rt->action->next_pc); + ice_gpr_add(rt, ICE_GPR_NN_IDX, rt->action->next_node); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ParseGraph action done.\n"); +} + +static void ice_flg_add(struct ice_parser_rt *rt, int idx, bool val) +{ + rt->pu.flg_msk |= BIT_ULL(idx); + if (val) + rt->pu.flg_val |= BIT_ULL(idx); + else + rt->pu.flg_val &= ~BIT_ULL(idx); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for flag %d value %d\n", + idx, val); +} + +static void ice_flg_update(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + u32 hv_bit_sel; + int i; + + if (!alu->dedicate_flags_ena) + return; + + if (alu->flags_extr_imm) { + for (i = 0; i < alu->dst_len; i++) + ice_flg_add(rt, alu->dst_start + i, + !!(alu->flags_start_imm & BIT(i))); + } else { + for (i = 0; i < alu->dst_len; i++) { + hv_bit_sel = ice_hv_bit_sel(rt, + alu->flags_start_imm + i, + 1); + ice_flg_add(rt, alu->dst_start + i, !!hv_bit_sel); + } + } +} + +static void ice_po_update(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + if (alu->proto_offset_opc == ICE_PO_OFF_HDR_ADD) + rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] + alu->proto_offset); + else if (alu->proto_offset_opc == ICE_PO_OFF_HDR_SUB) + rt->po = (u16)(rt->gpr[ICE_GPR_HO_IDX] - alu->proto_offset); + else if (alu->proto_offset_opc == ICE_PO_OFF_REMAIN) + rt->po = rt->gpr[ICE_GPR_HO_IDX]; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Update Protocol Offset = %d\n", + rt->po); +} + +static u16 ice_reg_bit_sel(struct ice_parser_rt *rt, int reg_idx, + int start, int len) +{ + int offset; + u32 val; + + offset = ICE_GPR_HV_IDX + (start / BITS_PER_TYPE(u16)); + + memcpy(&val, &rt->gpr[offset], sizeof(val)); + + val = bitrev8x4(val); + val >>= start % BITS_PER_TYPE(u16); + + return ice_bit_rev_u16(val, len); +} + +static void ice_err_add(struct ice_parser_rt *rt, int idx, bool val) +{ + rt->pu.err_msk |= (u16)BIT(idx); + if (val) + rt->pu.flg_val |= (u64)BIT_ULL(idx); + else + rt->pu.flg_val &= ~(u64)BIT_ULL(idx); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Pending update for error %d value %d\n", + idx, val); +} + +static void ice_dst_reg_bit_set(struct ice_parser_rt *rt, struct ice_alu *alu, + bool val) +{ + u16 flg_idx; + + if (alu->dedicate_flags_ena) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "DedicatedFlagsEnable should not be enabled in opcode %d\n", + alu->opc); + return; + } + + if (alu->dst_reg_id == ICE_GPR_ERR_IDX) { + if (alu->dst_start >= ICE_PARSER_ERR_NUM) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid error %d\n", + alu->dst_start); + return; + } + ice_err_add(rt, alu->dst_start, val); + } else if (alu->dst_reg_id >= ICE_GPR_FLG_IDX) { + flg_idx = (u16)(((alu->dst_reg_id - ICE_GPR_FLG_IDX) << 4) + + alu->dst_start); + + if (flg_idx >= ICE_PARSER_FLG_NUM) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Invalid flag %d\n", + flg_idx); + return; + } + ice_flg_add(rt, flg_idx, val); + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unexpected Dest Register Bit set, RegisterID %d Start %d\n", + alu->dst_reg_id, alu->dst_start); + } +} + +static void ice_alu_exe(struct ice_parser_rt *rt, struct ice_alu *alu) +{ + u16 dst, src, shift, imm; + + if (alu->shift_xlate_sel) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "shift_xlate_sel != 0 is not expected\n"); + return; + } + + ice_po_update(rt, alu); + ice_flg_update(rt, alu); + + dst = rt->gpr[alu->dst_reg_id]; + src = ice_reg_bit_sel(rt, alu->src_reg_id, + alu->src_start, alu->src_len); + shift = alu->shift_xlate_key; + imm = alu->imm; + + switch (alu->opc) { + case ICE_ALU_PARK: + break; + case ICE_ALU_MOV_ADD: + dst = (src << shift) + imm; + ice_gpr_add(rt, alu->dst_reg_id, dst); + break; + case ICE_ALU_ADD: + dst += (src << shift) + imm; + ice_gpr_add(rt, alu->dst_reg_id, dst); + break; + case ICE_ALU_ORLT: + if (src < imm) + ice_dst_reg_bit_set(rt, alu, true); + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_OREQ: + if (src == imm) + ice_dst_reg_bit_set(rt, alu, true); + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_SETEQ: + ice_dst_reg_bit_set(rt, alu, src == imm); + ice_gpr_add(rt, ICE_GPR_NP_IDX, alu->branch_addr); + break; + case ICE_ALU_MOV_XOR: + dst = (src << shift) ^ imm; + ice_gpr_add(rt, alu->dst_reg_id, dst); + break; + default: + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Unsupported ALU instruction %d\n", + alu->opc); + break; + } +} + +static void ice_alu0_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 ...\n"); + ice_alu_exe(rt, rt->alu0); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU0 done.\n"); +} + +static void ice_alu1_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 ...\n"); + ice_alu_exe(rt, rt->alu1); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU1 done.\n"); +} + +static void ice_alu2_exe(struct ice_parser_rt *rt) +{ + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 ...\n"); + ice_alu_exe(rt, rt->alu2); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Executing ALU2 done.\n"); +} + +static void ice_pu_exe(struct ice_parser_rt *rt) +{ + struct ice_gpr_pu *pu = &rt->pu; + unsigned int i; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers ...\n"); + + for (i = 0; i < ICE_PARSER_GPR_NUM; i++) { + if (pu->gpr_val_upd[i]) + ice_rt_gpr_set(rt, i, pu->gpr_val[i]); + } + + for (i = 0; i < ICE_PARSER_FLG_NUM; i++) { + if (pu->flg_msk & BIT(i)) + ice_rt_flag_set(rt, i, pu->flg_val & BIT(i)); + } + + for (i = 0; i < ICE_PARSER_ERR_NUM; i++) { + if (pu->err_msk & BIT(i)) + ice_rt_err_set(rt, i, pu->err_val & BIT(i)); + } + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Updating Registers done.\n"); +} + +static void ice_alu_pg_exe(struct ice_parser_rt *rt) +{ + memset(&rt->pu, 0, sizeof(rt->pu)); + + switch (rt->pg_prio) { + case (ICE_PG_P0): + ice_pg_exe(rt); + ice_alu0_exe(rt); + ice_alu1_exe(rt); + ice_alu2_exe(rt); + break; + case (ICE_PG_P1): + ice_alu0_exe(rt); + ice_pg_exe(rt); + ice_alu1_exe(rt); + ice_alu2_exe(rt); + break; + case (ICE_PG_P2): + ice_alu0_exe(rt); + ice_alu1_exe(rt); + ice_pg_exe(rt); + ice_alu2_exe(rt); + break; + case (ICE_PG_P3): + ice_alu0_exe(rt); + ice_alu1_exe(rt); + ice_alu2_exe(rt); + ice_pg_exe(rt); + break; + } + + ice_pu_exe(rt); + + if (rt->action->ho_inc == 0) + return; + + if (rt->action->ho_polarity) + ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] + rt->action->ho_inc); + else + ice_rt_ho_set(rt, rt->gpr[ICE_GPR_HO_IDX] - rt->action->ho_inc); +} + +static void ice_proto_off_update(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + + if (rt->action->is_pg) { + struct ice_proto_grp_item *proto_grp = + &psr->proto_grp_table[rt->action->proto_id]; + u16 po; + int i; + + for (i = 0; i < ICE_PROTO_COUNT_PER_GRP; i++) { + struct ice_proto_off *entry = &proto_grp->po[i]; + + if (entry->proto_id == U8_MAX) + break; + + if (!entry->polarity) + po = rt->po + entry->offset; + else + po = rt->po - entry->offset; + + rt->protocols[entry->proto_id] = true; + rt->offsets[entry->proto_id] = po; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", + entry->proto_id, po); + } + } else { + rt->protocols[rt->action->proto_id] = true; + rt->offsets[rt->action->proto_id] = rt->po; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Protocol %d at offset %d\n", + rt->action->proto_id, rt->po); + } +} + +static void ice_marker_set(struct ice_parser_rt *rt, int idx) +{ + unsigned int byte = idx / BITS_PER_BYTE; + unsigned int bit = idx % BITS_PER_BYTE; + + rt->markers[byte] |= (u8)BIT(bit); +} + +static void ice_marker_update(struct ice_parser_rt *rt) +{ + struct ice_parser *psr = rt->psr; + + if (rt->action->is_mg) { + struct ice_mk_grp_item *mk_grp = + &psr->mk_grp_table[rt->action->marker_id]; + int i; + + for (i = 0; i < ICE_MARKER_ID_NUM; i++) { + u8 marker = mk_grp->markers[i]; + + if (marker == ICE_MARKER_MAX_SIZE) + break; + + ice_marker_set(rt, marker); + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", + marker); + } + } else { + if (rt->action->marker_id != ICE_MARKER_MAX_SIZE) + ice_marker_set(rt, rt->action->marker_id); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Set Marker %d\n", + rt->action->marker_id); + } +} + +static u16 ice_ptype_resolve(struct ice_parser_rt *rt) +{ + struct ice_ptype_mk_tcam_item *item; + struct ice_parser *psr = rt->psr; + + item = ice_ptype_mk_tcam_match(psr->ptype_mk_tcam_table, + rt->markers, ICE_MARKER_ID_SIZE); + if (item) + return item->ptype; + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Could not resolve PTYPE\n"); + return U16_MAX; +} + +static void ice_proto_off_resolve(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + int i; + + for (i = 0; i < ICE_PO_PAIR_SIZE - 1; i++) { + if (rt->protocols[i]) { + rslt->po[rslt->po_num].proto_id = (u8)i; + rslt->po[rslt->po_num].offset = rt->offsets[i]; + rslt->po_num++; + } + } +} + +static void ice_result_resolve(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + struct ice_parser *psr = rt->psr; + + memset(rslt, 0, sizeof(*rslt)); + + memcpy(&rslt->flags_psr, &rt->gpr[ICE_GPR_FLG_IDX], + ICE_PARSER_FLAG_PSR_SIZE); + rslt->flags_pkt = ice_flg_redirect(psr->flg_rd_table, rslt->flags_psr); + rslt->flags_sw = ice_xlt_kb_flag_get(psr->xlt_kb_sw, rslt->flags_pkt); + rslt->flags_fd = ice_xlt_kb_flag_get(psr->xlt_kb_fd, rslt->flags_pkt); + rslt->flags_rss = ice_xlt_kb_flag_get(psr->xlt_kb_rss, rslt->flags_pkt); + + ice_proto_off_resolve(rt, rslt); + rslt->ptype = ice_ptype_resolve(rt); +} + +/** + * ice_parser_rt_execute - parser execution routine + * @rt: pointer to the parser runtime + * @rslt: input/output parameter to save parser result + * + * Return: 0 on success or errno. + */ +int ice_parser_rt_execute(struct ice_parser_rt *rt, + struct ice_parser_result *rslt) +{ + struct ice_pg_nm_cam_item *pg_nm_cam; + struct ice_parser *psr = rt->psr; + struct ice_pg_cam_item *pg_cam; + int status = 0; + u16 node; + u16 pc; + + node = rt->gpr[ICE_GPR_NN_IDX]; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Start with Node: %u\n", node); + + while (true) { + struct ice_bst_tcam_item *bst; + struct ice_imem_item *imem; + + pc = rt->gpr[ICE_GPR_NP_IDX]; + imem = &psr->imem_table[pc]; + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Load imem at pc: %u\n", + pc); + + ice_bst_key_init(rt, imem); + bst = ice_bst_tcam_match(psr->bst_tcam_table, rt->bst_key); + if (!bst) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "No Boost TCAM Match\n"); + status = ice_imem_pgk_init(rt, imem); + if (status) + break; + ice_imem_alu0_set(rt, imem); + ice_imem_alu1_set(rt, imem); + ice_imem_alu2_set(rt, imem); + ice_imem_pgp_set(rt, imem); + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Boost TCAM Match address: %u\n", + bst->addr); + if (imem->b_m.pg) { + status = ice_bst_pgk_init(rt, bst); + if (status) + break; + ice_bst_pgp_set(rt, bst); + } else { + status = ice_imem_pgk_init(rt, imem); + if (status) + break; + ice_imem_pgp_set(rt, imem); + } + + if (imem->b_m.alu0) + ice_bst_alu0_set(rt, bst); + else + ice_imem_alu0_set(rt, imem); + + if (imem->b_m.alu1) + ice_bst_alu1_set(rt, bst); + else + ice_imem_alu1_set(rt, imem); + + if (imem->b_m.alu2) + ice_bst_alu2_set(rt, bst); + else + ice_imem_alu2_set(rt, imem); + } + + rt->action = NULL; + pg_cam = ice_rt_pg_cam_match(rt); + if (!pg_cam) { + pg_nm_cam = ice_rt_pg_nm_cam_match(rt); + if (pg_nm_cam) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph Nomatch CAM Address %u\n", + pg_nm_cam->idx); + rt->action = &pg_nm_cam->action; + } + } else { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Match ParseGraph CAM Address %u\n", + pg_cam->idx); + rt->action = &pg_cam->action; + } + + if (!rt->action) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Failed to match ParseGraph CAM, stop parsing.\n"); + status = -EINVAL; + break; + } + + ice_alu_pg_exe(rt); + ice_marker_update(rt); + ice_proto_off_update(rt); + + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Go to node %u\n", + rt->action->next_node); + + if (rt->action->is_last_round) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Last Round in ParseGraph Action, stop parsing.\n"); + break; + } + + if (rt->gpr[ICE_GPR_HO_IDX] >= rt->pkt_len) { + ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Header Offset (%u) is larger than packet len (%u), stop parsing\n", + rt->gpr[ICE_GPR_HO_IDX], rt->pkt_len); + break; + } + } + + ice_result_resolve(rt, rslt); + + return status; +} diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index f6f27361c3cf..7c09ea0f03ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -7,18 +7,24 @@ /* Each recipe can match up to 5 different fields. Fields to match can be meta- * data, values extracted from packet headers, or results from other recipes. - * One of the 5 fields is reserved for matching the switch ID. So, up to 4 - * recipes can provide intermediate results to another one through chaining, - * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4. + * Therefore, up to 5 recipes can provide intermediate results to another one + * through chaining, e.g. recipes 0, 1, 2, 3 and 4 can provide intermediate + * results to recipe 5. Note that one of the fields in one of the recipes must + * always be reserved for matching the switch ID. */ -#define ICE_NUM_WORDS_RECIPE 4 +#define ICE_NUM_WORDS_RECIPE 5 -/* Max recipes that can be chained */ +/* Max recipes that can be chained, not including the last one, which combines + * intermediate results. + */ #define ICE_MAX_CHAIN_RECIPE 5 -/* 1 word reserved for switch ID from allowed 5 words. - * So a recipe can have max 4 words. And you can chain 5 such recipes - * together. So maximum words that can be programmed for look up is 5 * 4. +/* Total max recipes in chain recipe (including intermediate results) */ +#define ICE_MAX_CHAIN_RECIPE_RES (ICE_MAX_CHAIN_RECIPE + 1) + +/* A recipe can have max 5 words, and 5 recipes can be chained together (using + * the 6th one, which would contain only result indexes). So maximum words that + * can be programmed for lookup is 5 * 5 (not including intermediate results). */ #define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE) @@ -43,6 +49,7 @@ enum ice_protocol_type { ICE_NVGRE, ICE_GTP, ICE_GTP_NO_PAY, + ICE_PFCP, ICE_PPPOE, ICE_L2TPV3, ICE_VLAN_EX, @@ -61,6 +68,7 @@ enum ice_sw_tunnel_type { ICE_SW_TUN_NVGRE, ICE_SW_TUN_GTPU, ICE_SW_TUN_GTPC, + ICE_SW_TUN_PFCP, ICE_ALL_TUNNELS /* All tunnel types including NVGRE */ }; @@ -202,6 +210,15 @@ struct ice_udp_gtp_hdr { u8 rsvrd; }; +struct ice_pfcp_hdr { + u8 flags; + u8 msg_type; + __be16 length; + __be64 seid; + __be32 seq; + u8 spare; +} __packed __aligned(__alignof__(u16)); + struct ice_pppoe_hdr { u8 rsrvd_ver_type; u8 rsrvd_code; @@ -418,6 +435,7 @@ union ice_prot_hdr { struct ice_udp_tnl_hdr tnl_hdr; struct ice_nvgre_hdr nvgre_hdr; struct ice_udp_gtp_hdr gtp_hdr; + struct ice_pfcp_hdr pfcp_hdr; struct ice_pppoe_hdr pppoe_hdr; struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr; struct ice_hw_metadata metadata; @@ -437,32 +455,11 @@ struct ice_prot_ext_tbl_entry { /* Extractions to be looked up for a given recipe */ struct ice_prot_lkup_ext { - u16 prot_type; u8 n_val_words; /* create a buffer to hold max words per recipe */ - u16 field_off[ICE_MAX_CHAIN_WORDS]; u16 field_mask[ICE_MAX_CHAIN_WORDS]; struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS]; - - /* Indicate field offsets that have field vector indices assigned */ - DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS); -}; - -struct ice_pref_recipe_group { - u8 n_val_pairs; /* Number of valid pairs */ - struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE]; - u16 mask[ICE_NUM_WORDS_RECIPE]; }; -struct ice_recp_grp_entry { - struct list_head l_entry; - -#define ICE_INVAL_CHAIN_IND 0xFF - u16 rid; - u8 chain_idx; - u16 fv_idx[ICE_NUM_WORDS_RECIPE]; - u16 fv_mask[ICE_NUM_WORDS_RECIPE]; - struct ice_pref_recipe_group r_group; -}; #endif /* _ICE_PROTOCOL_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index c11eba07283c..b79a148ed0f2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -4,255 +4,187 @@ #include "ice.h" #include "ice_lib.h" #include "ice_trace.h" +#include "ice_cgu_regs.h" + +static const char ice_pin_names[][64] = { + "SDP0", + "SDP1", + "SDP2", + "SDP3", + "TIME_SYNC", + "1PPS" +}; -#define E810_OUT_PROP_DELAY_NS 1 - -#define UNKNOWN_INCVAL_E82X 0x100000000ULL +static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = { + /* name, gpio, delay */ + { TIME_SYNC, { 4, -1 }, { 0, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 11 }}, +}; -static const struct ptp_pin_desc ice_pin_desc_e810t[] = { - /* name idx func chan */ - { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, - { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } }, - { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } }, - { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } }, - { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } }, +static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = { + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 15, 14 }}, + { SDP1, { 1, 1 }, { 15, 14 }}, + { SDP2, { 2, 2 }, { 15, 14 }}, + { SDP3, { 3, 3 }, { 15, 14 }}, + { TIME_SYNC, { 4, -1 }, { 11, 0 }}, + { ONE_PPS, { -1, 5 }, { 0, 9 }}, }; -/** - * ice_get_sma_config_e810t - * @hw: pointer to the hw struct - * @ptp_pins: pointer to the ptp_pin_desc struture - * - * Read the configuration of the SMA control logic and put it into the - * ptp_pin_desc structure - */ -static int -ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins) -{ - u8 data, i; - int status; +static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = { + /* name, gpio, delay */ + { SDP0, { 0, 0 }, { 0, 1 }}, + { SDP1, { 1, 1 }, { 0, 1 }}, + { SDP2, { 2, 2 }, { 0, 1 }}, + { SDP3, { 3, 3 }, { 0, 1 }}, + { ONE_PPS, { -1, 5 }, { 0, 1 }}, +}; - /* Read initial pin state */ - status = ice_read_sma_ctrl_e810t(hw, &data); - if (status) - return status; +static const char ice_pin_names_nvm[][64] = { + "GNSS", + "SMA1", + "U.FL1", + "SMA2", + "U.FL2", +}; - /* initialize with defaults */ - for (i = 0; i < NUM_PTP_PINS_E810T; i++) { - strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name, - sizeof(ptp_pins[i].name)); - ptp_pins[i].index = ice_pin_desc_e810t[i].index; - ptp_pins[i].func = ice_pin_desc_e810t[i].func; - ptp_pins[i].chan = ice_pin_desc_e810t[i].chan; - } +static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = { + /* name, gpio, delay */ + { GNSS, { 1, -1 }, { 0, 0 }}, + { SMA1, { 1, 0 }, { 0, 1 }}, + { UFL1, { -1, 0 }, { 0, 1 }}, + { SMA2, { 3, 2 }, { 0, 1 }}, + { UFL2, { 3, -1 }, { 0, 0 }}, +}; - /* Parse SMA1/UFL1 */ - switch (data & ICE_SMA1_MASK_E810T) { - case ICE_SMA1_MASK_E810T: - default: - ptp_pins[SMA1].func = PTP_PF_NONE; - ptp_pins[UFL1].func = PTP_PF_NONE; - break; - case ICE_SMA1_DIR_EN_E810T: - ptp_pins[SMA1].func = PTP_PF_PEROUT; - ptp_pins[UFL1].func = PTP_PF_NONE; - break; - case ICE_SMA1_TX_EN_E810T: - ptp_pins[SMA1].func = PTP_PF_EXTTS; - ptp_pins[UFL1].func = PTP_PF_NONE; - break; - case 0: - ptp_pins[SMA1].func = PTP_PF_EXTTS; - ptp_pins[UFL1].func = PTP_PF_PEROUT; - break; - } +static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf) +{ + return !pf->adapter ? NULL : pf->adapter->ctrl_pf; +} - /* Parse SMA2/UFL2 */ - switch (data & ICE_SMA2_MASK_E810T) { - case ICE_SMA2_MASK_E810T: - default: - ptp_pins[SMA2].func = PTP_PF_NONE; - ptp_pins[UFL2].func = PTP_PF_NONE; - break; - case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): - ptp_pins[SMA2].func = PTP_PF_EXTTS; - ptp_pins[UFL2].func = PTP_PF_NONE; - break; - case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T): - ptp_pins[SMA2].func = PTP_PF_PEROUT; - ptp_pins[UFL2].func = PTP_PF_NONE; - break; - case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T): - ptp_pins[SMA2].func = PTP_PF_NONE; - ptp_pins[UFL2].func = PTP_PF_EXTTS; - break; - case ICE_SMA2_DIR_EN_E810T: - ptp_pins[SMA2].func = PTP_PF_PEROUT; - ptp_pins[UFL2].func = PTP_PF_EXTTS; - break; - } +static struct ice_ptp *ice_get_ctrl_ptp(struct ice_pf *pf) +{ + struct ice_pf *ctrl_pf = ice_get_ctrl_pf(pf); - return 0; + return !ctrl_pf ? NULL : &ctrl_pf->ptp; } /** - * ice_ptp_set_sma_config_e810t - * @hw: pointer to the hw struct - * @ptp_pins: pointer to the ptp_pin_desc struture + * ice_ptp_find_pin_idx - Find pin index in ptp_pin_desc + * @pf: Board private structure + * @func: Pin function + * @chan: GPIO channel * - * Set the configuration of the SMA control logic based on the configuration in - * num_pins parameter + * Return: positive pin number when pin is present, -1 otherwise */ -static int -ice_ptp_set_sma_config_e810t(struct ice_hw *hw, - const struct ptp_pin_desc *ptp_pins) +static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func, + unsigned int chan) { - int status; - u8 data; + const struct ptp_clock_info *info = &pf->ptp.info; + int i; - /* SMA1 and UFL1 cannot be set to TX at the same time */ - if (ptp_pins[SMA1].func == PTP_PF_PEROUT && - ptp_pins[UFL1].func == PTP_PF_PEROUT) - return -EINVAL; + for (i = 0; i < info->n_pins; i++) { + if (info->pin_config[i].func == func && + info->pin_config[i].chan == chan) + return i; + } - /* SMA2 and UFL2 cannot be set to RX at the same time */ - if (ptp_pins[SMA2].func == PTP_PF_EXTTS && - ptp_pins[UFL2].func == PTP_PF_EXTTS) - return -EINVAL; + return -1; +} - /* Read initial pin state value */ - status = ice_read_sma_ctrl_e810t(hw, &data); - if (status) - return status; - - /* Set the right sate based on the desired configuration */ - data &= ~ICE_SMA1_MASK_E810T; - if (ptp_pins[SMA1].func == PTP_PF_NONE && - ptp_pins[UFL1].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled"); - data |= ICE_SMA1_MASK_E810T; - } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && - ptp_pins[UFL1].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA1 RX"); - data |= ICE_SMA1_TX_EN_E810T; - } else if (ptp_pins[SMA1].func == PTP_PF_NONE && - ptp_pins[UFL1].func == PTP_PF_PEROUT) { - /* U.FL 1 TX will always enable SMA 1 RX */ - dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); - } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS && - ptp_pins[UFL1].func == PTP_PF_PEROUT) { - dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX"); - } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT && - ptp_pins[UFL1].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA1 TX"); - data |= ICE_SMA1_DIR_EN_E810T; - } - - data &= ~ICE_SMA2_MASK_E810T; - if (ptp_pins[SMA2].func == PTP_PF_NONE && - ptp_pins[UFL2].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled"); - data |= ICE_SMA2_MASK_E810T; - } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS && - ptp_pins[UFL2].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA2 RX"); - data |= (ICE_SMA2_TX_EN_E810T | - ICE_SMA2_UFL2_RX_DIS_E810T); - } else if (ptp_pins[SMA2].func == PTP_PF_NONE && - ptp_pins[UFL2].func == PTP_PF_EXTTS) { - dev_info(ice_hw_to_dev(hw), "UFL2 RX"); - data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T); - } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && - ptp_pins[UFL2].func == PTP_PF_NONE) { - dev_info(ice_hw_to_dev(hw), "SMA2 TX"); - data |= (ICE_SMA2_DIR_EN_E810T | - ICE_SMA2_UFL2_RX_DIS_E810T); - } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT && - ptp_pins[UFL2].func == PTP_PF_EXTTS) { - dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX"); - data |= ICE_SMA2_DIR_EN_E810T; - } - - return ice_write_sma_ctrl_e810t(hw, data); -} - -/** - * ice_ptp_set_sma_e810t - * @info: the driver's PTP info structure - * @pin: pin index in kernel structure - * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT) - * - * Set the configuration of a single SMA pin +/** + * ice_ptp_update_sma_data - update SMA pins data according to pins setup + * @pf: Board private structure + * @sma_pins: parsed SMA pins status + * @data: SMA data to update */ -static int -ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin, - enum ptp_pin_function func) +static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[], + u8 *data) { - struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T]; - struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_hw *hw = &pf->hw; - int err; - - if (pin < SMA1 || func > PTP_PF_PEROUT) - return -EOPNOTSUPP; + const char *state1, *state2; - err = ice_get_sma_config_e810t(hw, ptp_pins); - if (err) - return err; - - /* Disable the same function on the other pin sharing the channel */ - if (pin == SMA1 && ptp_pins[UFL1].func == func) - ptp_pins[UFL1].func = PTP_PF_NONE; - if (pin == UFL1 && ptp_pins[SMA1].func == func) - ptp_pins[SMA1].func = PTP_PF_NONE; - - if (pin == SMA2 && ptp_pins[UFL2].func == func) - ptp_pins[UFL2].func = PTP_PF_NONE; - if (pin == UFL2 && ptp_pins[SMA2].func == func) - ptp_pins[SMA2].func = PTP_PF_NONE; + /* Set the right state based on the desired configuration. + * When bit is set, functionality is disabled. + */ + *data &= ~ICE_ALL_SMA_MASK; + if (!sma_pins[UFL1 - 1]) { + if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) { + state1 = "SMA1 Rx, U.FL1 disabled"; + *data |= ICE_SMA1_TX_EN; + } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) { + state1 = "SMA1 Tx U.FL1 disabled"; + *data |= ICE_SMA1_DIR_EN; + } else { + state1 = "SMA1 disabled, U.FL1 disabled"; + *data |= ICE_SMA1_MASK; + } + } else { + /* U.FL1 Tx will always enable SMA1 Rx */ + state1 = "SMA1 Rx, U.FL1 Tx"; + } - /* Set up new pin function in the temp table */ - ptp_pins[pin].func = func; + if (!sma_pins[UFL2 - 1]) { + if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) { + state2 = "SMA2 Rx, U.FL2 disabled"; + *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS; + } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) { + state2 = "SMA2 Tx, U.FL2 disabled"; + *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS; + } else { + state2 = "SMA2 disabled, U.FL2 disabled"; + *data |= ICE_SMA2_MASK; + } + } else { + if (!sma_pins[SMA2 - 1]) { + state2 = "SMA2 disabled, U.FL2 Rx"; + *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN; + } else { + state2 = "SMA2 Tx, U.FL2 Rx"; + *data |= ICE_SMA2_DIR_EN; + } + } - return ice_ptp_set_sma_config_e810t(hw, ptp_pins); + dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2); } /** - * ice_verify_pin_e810t - * @info: the driver's PTP info structure - * @pin: Pin index - * @func: Assigned function - * @chan: Assigned channel + * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic + * @pf: Board private structure * - * Verify if pin supports requested pin function. If the Check pins consistency. - * Reconfigure the SMA logic attached to the given pin to enable its - * desired functionality + * Return: 0 on success, negative error code otherwise */ -static int -ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin, - enum ptp_pin_function func, unsigned int chan) +static int ice_ptp_set_sma_cfg(struct ice_pf *pf) { - /* Don't allow channel reassignment */ - if (chan != ice_pin_desc_e810t[pin].chan) - return -EOPNOTSUPP; + const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc; + struct ptp_pin_desc *pins = pf->ptp.pin_desc; + unsigned int sma_pins[ICE_SMA_PINS_NUM] = {}; + int err; + u8 data; - /* Check if functions are properly assigned */ - switch (func) { - case PTP_PF_NONE: - break; - case PTP_PF_EXTTS: - if (pin == UFL1) - return -EOPNOTSUPP; - break; - case PTP_PF_PEROUT: - if (pin == UFL2 || pin == GNSS) - return -EOPNOTSUPP; - break; - case PTP_PF_PHYSYNC: - return -EOPNOTSUPP; - } + /* Read initial pin state value */ + err = ice_read_sma_ctrl(&pf->hw, &data); + if (err) + return err; - return ice_ptp_set_sma_e810t(info, pin, func); + /* Get SMA/U.FL pins states */ + for (int i = 0; i < pf->ptp.info.n_pins; i++) + if (pins[i].func) { + int name_idx = ice_pins[i].name_idx; + + switch (name_idx) { + case SMA1: + case UFL1: + case SMA2: + case UFL2: + sma_pins[name_idx - 1] = pins[i].func; + break; + default: + continue; + } + } + + ice_ptp_update_sma_data(pf, sma_pins, &data); + return ice_write_sma_ctrl(&pf->hw, data); } /** @@ -366,17 +298,30 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) * @sts: Optional parameter for holding a pair of system timestamps from * the system clock. Will be ignored if NULL is given. */ -static u64 -ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts) +u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, + struct ptp_system_timestamp *sts) { struct ice_hw *hw = &pf->hw; u32 hi, lo, lo2; u8 tmr_idx; + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + tmr_idx = ice_get_ptp_src_clock_index(hw); + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); /* Read the system timestamp pre PHC read */ ptp_read_system_prets(sts); + if (hw->mac_type == ICE_MAC_E830) { + u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx)); + + /* Read the system timestamp post PHC read */ + ptp_read_system_postts(sts); + + return clk_time; + } + lo = rd32(hw, GLTSYN_TIME_L(tmr_idx)); /* Read the system timestamp post PHC read */ @@ -531,7 +476,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx) */ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) { + struct ice_e810_params *params; struct ice_ptp_port *ptp_port; + unsigned long flags; struct sk_buff *skb; struct ice_pf *pf; @@ -540,6 +487,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); + params = &pf->hw.ptp.phy.e810; /* Drop packets which have waited for more than 2 seconds */ if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) { @@ -556,11 +504,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx) ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx); + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS; + /* Write TS index to read to the PF register so the FW can read it */ - wr32(&pf->hw, PF_SB_ATQBAL, - TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) | - TS_LL_READ_TS); + wr32(&pf->hw, REG_LL_PROXY_H, + REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | + REG_LL_PROXY_H_EXEC); tx->last_ll_ts_idx_read = idx; + + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); } /** @@ -571,35 +525,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { struct skb_shared_hwtstamps shhwtstamps = {}; u8 idx = tx->last_ll_ts_idx_read; + struct ice_e810_params *params; struct ice_ptp_port *ptp_port; u64 raw_tstamp, tstamp; bool drop_ts = false; struct sk_buff *skb; + unsigned long flags; + struct device *dev; struct ice_pf *pf; - u32 val; + u32 reg_ll_high; if (!tx->init || tx->last_ll_ts_idx_read < 0) return; ptp_port = container_of(tx, struct ice_ptp_port, tx); pf = ptp_port_to_pf(ptp_port); + dev = ice_pf_to_dev(pf); + params = &pf->hw.ptp.phy.e810; ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx); - val = rd32(&pf->hw, PF_SB_ATQBAL); + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS)) + dev_dbg(dev, "%s: low latency interrupt request not in progress?\n", + __func__); + + /* Read the low 32 bit value */ + raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L); + /* Read the status together with high TS part */ + reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H); + + /* Wake up threads waiting on low latency interface */ + params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS; + + wake_up_locked(¶ms->atqbal_wq); + + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); /* When the bit is cleared, the TS is ready in the register */ - if (val & TS_LL_READ_TS) { + if (reg_ll_high & REG_LL_PROXY_H_EXEC) { dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready"); return; } /* High 8 bit value of the TS is on the bits 16:23 */ - raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val); - raw_tstamp <<= 32; - - /* Read the low 32 bit value */ - raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH); + raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32; /* Devices using this interface always verify the timestamp differs * relative to the last cached timestamp value. @@ -801,8 +772,8 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) struct ice_ptp_port *port; unsigned int i; - mutex_lock(&pf->ptp.ports_owner.lock); - list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) { + mutex_lock(&pf->adapter->ports.lock); + list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { struct ice_ptp_tx *tx = &port->tx; if (!tx || !tx->init) @@ -810,9 +781,9 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) ice_ptp_process_tx_tstamp(tx); } - mutex_unlock(&pf->ptp.ports_owner.lock); + mutex_unlock(&pf->adapter->ports.lock); - for (i = 0; i < ICE_MAX_QUAD; i++) { + for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { u64 tstamp_ready; int err; @@ -975,7 +946,7 @@ ice_ptp_flush_all_tx_tracker(struct ice_pf *pf) { struct ice_ptp_port *port; - list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) + list_for_each_entry(port, &pf->adapter->ports.ports, list_node) ice_ptp_flush_tx_tracker(ptp_port_to_pf(port), &port->tx); } @@ -1022,11 +993,13 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) * the timestamp block is shared for all ports in the same quad. To avoid * ports using the same timestamp index, logically break the block of * registers into chunks based on the port number. + * + * Return: 0 on success, -ENOMEM when out of memory */ -static int -ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) +static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, + u8 port) { - tx->block = port / ICE_PORTS_PER_QUAD; + tx->block = ICE_GET_QUAD_NUM(port); tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; tx->len = INDEX_PER_PORT_E82X; tx->has_ready_bitmap = 1; @@ -1035,24 +1008,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) } /** - * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps + * ice_ptp_init_tx - Initialize tracking for Tx timestamps * @pf: Board private structure * @tx: the Tx tracking structure to initialize + * @port: the port this structure tracks * - * Initialize the Tx timestamp tracker for this PF. For E810 devices, each - * port has its own block of timestamps, independent of the other ports. + * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X, + * each port has its own block of timestamps, independent of the other ports. + * + * Return: 0 on success, -ENOMEM when out of memory */ -static int -ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx) +static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { - tx->block = pf->hw.port_info->lport; + tx->block = port; tx->offset = 0; - tx->len = INDEX_PER_PORT_E810; + tx->len = INDEX_PER_PORT; + /* The E810 PHY does not provide a timestamp ready bitmap. Instead, * verify new timestamps against cached copy of the last read * timestamp. */ - tx->has_ready_bitmap = 0; + tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810; return ice_ptp_alloc_tx_tracker(tx); } @@ -1166,26 +1142,6 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf) } /** - * ice_ptp_read_time - Read the time from the device - * @pf: Board private structure - * @ts: timespec structure to hold the current time value - * @sts: Optional parameter for holding a pair of system timestamps from - * the system clock. Will be ignored if NULL is given. - * - * This function reads the source clock registers and stores them in a timespec. - * However, since the registers are 64 bits of nanoseconds, we must convert the - * result to a timespec before we can return. - */ -static void -ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts, - struct ptp_system_timestamp *sts) -{ - u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts); - - *ts = ns_to_timespec64(time_ns); -} - -/** * ice_ptp_write_init - Set PHC time to provided value * @pf: Board private structure * @ts: timespec structure that holds the new time value @@ -1229,12 +1185,7 @@ static u64 ice_base_incval(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; u64 incval; - if (ice_is_e810(hw)) - incval = ICE_PTP_NOMINAL_INCVAL_E810; - else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) - incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); - else - incval = UNKNOWN_INCVAL_E82X; + incval = ice_get_base_incval(hw); dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", incval); @@ -1248,8 +1199,8 @@ static u64 ice_base_incval(struct ice_pf *pf) */ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) { - int quad = port->port_num / ICE_PORTS_PER_QUAD; int offs = port->port_num % ICE_PORTS_PER_QUAD; + int quad = ICE_GET_QUAD_NUM(port->port_num); struct ice_pf *pf; struct ice_hw *hw; u32 val, phy_sts; @@ -1362,15 +1313,25 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) struct ice_hw *hw = &pf->hw; int err; - if (ice_is_e810(hw)) - return 0; - mutex_lock(&ptp_port->ps_lock); - kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + err = 0; + break; + case ICE_MAC_GENERIC: + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - err = ice_stop_phy_timer_e82x(hw, port, true); - if (err) + err = ice_stop_phy_timer_e82x(hw, port, true); + break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_stop_phy_timer_eth56g(hw, port, true); + break; + default: + err = -ENODEV; + } + if (err && err != -EBUSY) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", port, err); @@ -1396,35 +1357,48 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) unsigned long flags; int err; - if (ice_is_e810(hw)) - return 0; - if (!ptp_port->link_up) return ice_ptp_port_phy_stop(ptp_port); mutex_lock(&ptp_port->ps_lock); - kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + err = 0; + break; + case ICE_MAC_GENERIC: + /* Start the PHY timer in Vernier mode */ + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - /* temporarily disable Tx timestamps while calibrating PHY offset */ - spin_lock_irqsave(&ptp_port->tx.lock, flags); - ptp_port->tx.calibrating = true; - spin_unlock_irqrestore(&ptp_port->tx.lock, flags); - ptp_port->tx_fifo_busy_cnt = 0; + /* temporarily disable Tx timestamps while calibrating + * PHY offset + */ + spin_lock_irqsave(&ptp_port->tx.lock, flags); + ptp_port->tx.calibrating = true; + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); + ptp_port->tx_fifo_busy_cnt = 0; - /* Start the PHY timer in Vernier mode */ - err = ice_start_phy_timer_e82x(hw, port); - if (err) - goto out_unlock; + /* Start the PHY timer in Vernier mode */ + err = ice_start_phy_timer_e82x(hw, port); + if (err) + break; - /* Enable Tx timestamps right away */ - spin_lock_irqsave(&ptp_port->tx.lock, flags); - ptp_port->tx.calibrating = false; - spin_unlock_irqrestore(&ptp_port->tx.lock, flags); + /* Enable Tx timestamps right away */ + spin_lock_irqsave(&ptp_port->tx.lock, flags); + ptp_port->tx.calibrating = false; + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); - kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); + kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, + 0); + break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_start_phy_timer_eth56g(hw, port); + break; + default: + err = -ENODEV; + } -out_unlock: if (err) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", port, err); @@ -1437,10 +1411,9 @@ out_unlock: /** * ice_ptp_link_change - Reconfigure PTP after link status change * @pf: Board private structure - * @port: Port for which the PHY start is set * @linkup: Link is up or down */ -void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +void ice_ptp_link_change(struct ice_pf *pf, bool linkup) { struct ice_ptp_port *ptp_port; struct ice_hw *hw = &pf->hw; @@ -1448,21 +1421,22 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) if (pf->ptp.state != ICE_PTP_READY) return; - if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) - return; - ptp_port = &pf->ptp.port; - if (WARN_ON_ONCE(ptp_port->port_num != port)) - return; /* Update cached link status for this port immediately */ ptp_port->link_up = linkup; - switch (hw->phy_model) { - case ICE_PHY_E810: - /* Do not reconfigure E810 PHY */ + /* Skip HW writes if reset is in progress */ + if (pf->hw.reset_ongoing) + return; + + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + /* Do not reconfigure E810 or E830 PHY */ return; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: ice_ptp_port_phy_restart(ptp_port); return; default: @@ -1476,42 +1450,61 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) * @ena: bool value to enable or disable interrupt * @threshold: Minimum number of packets at which intr is triggered * - * Utility function to enable or disable Tx timestamp interrupt and threshold + * Utility function to configure all the PHY interrupt settings, including + * whether the PHY interrupt is enabled, and what threshold to use. Also + * configures The E82X timestamp owner to react to interrupts from all PHYs. + * + * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes + * when failed to configure PHY interrupt for E82X */ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int err = 0; - int quad; - u32 val; ice_ptp_reset_ts_memory(hw); - for (quad = 0; quad < ICE_MAX_QUAD; quad++) { - err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, - &val); - if (err) - break; - - if (ena) { - val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; - val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; - val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, - threshold); - } else { - val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + return 0; + case ICE_MAC_GENERIC: { + int quad; + + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); + quad++) { + int err; + + err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); + if (err) { + dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", + quad, err); + return err; + } } - err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, - val); - if (err) - break; + return 0; } + case ICE_MAC_GENERIC_3K_E825: { + int port; - if (err) - dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", - err); - return err; + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); + if (err) { + dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", + port, err); + return err; + } + } + + return 0; + } + case ICE_MAC_UNKNOWN: + default: + return -EOPNOTSUPP; + } } /** @@ -1531,10 +1524,10 @@ static void ice_ptp_restart_all_phy(struct ice_pf *pf) { struct list_head *entry; - list_for_each(entry, &pf->ptp.ports_owner.ports) { + list_for_each(entry, &pf->adapter->ports.ports) { struct ice_ptp_port *port = list_entry(entry, struct ice_ptp_port, - list_member); + list_node); if (port->link_up) ice_ptp_port_phy_restart(port); @@ -1578,6 +1571,10 @@ void ice_ptp_extts_event(struct ice_pf *pf) u8 chan, tmr_idx; u32 hi, lo; + /* Don't process timestamp events if PTP is not ready */ + if (pf->ptp.state != ICE_PTP_READY) + return; + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Event time is captured by one of the two matched registers * GLTSYN_EVNT_L: 32 LSB of sampled time event @@ -1585,45 +1582,62 @@ void ice_ptp_extts_event(struct ice_pf *pf) * Event is defined in GLTSYN_EVNT_0 register */ for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) { + int pin_desc_idx; + /* Check if channel is enabled */ - if (pf->ptp.ext_ts_irq & (1 << chan)) { - lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); - hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); - event.timestamp = (((u64)hi) << 32) | lo; - event.type = PTP_CLOCK_EXTTS; - event.index = chan; - - /* Fire event */ - ptp_clock_event(pf->ptp.clock, &event); - pf->ptp.ext_ts_irq &= ~(1 << chan); + if (!(pf->ptp.ext_ts_irq & (1 << chan))) + continue; + + lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx)); + hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx)); + event.timestamp = (u64)hi << 32 | lo; + + /* Add delay compensation */ + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); + if (pin_desc_idx >= 0) { + const struct ice_ptp_pin_desc *desc; + + desc = &pf->ptp.ice_pin_desc[pin_desc_idx]; + event.timestamp -= desc->delay[0]; } + + event.type = PTP_CLOCK_EXTTS; + event.index = chan; + pf->ptp.ext_ts_irq &= ~(1 << chan); + ptp_clock_event(pf->ptp.clock, &event); } } /** * ice_ptp_cfg_extts - Configure EXTTS pin and channel * @pf: Board private structure - * @ena: true to enable; false to disable - * @chan: GPIO channel (0-3) - * @gpio_pin: GPIO pin - * @extts_flags: request flags from the ptp_extts_request.flags + * @rq: External timestamp request + * @on: Enable/disable flag + * + * Configure an external timestamp event on the requested channel. + * + * Return: 0 on success, negative error code otherwise */ -static int -ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, - unsigned int extts_flags) +static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq, + int on) { - u32 func, aux_reg, gpio_reg, irq_reg; + u32 aux_reg, gpio_reg, irq_reg; struct ice_hw *hw = &pf->hw; + unsigned int chan, gpio_pin; + int pin_desc_idx; u8 tmr_idx; - if (chan > (unsigned int)pf->ptp.info.n_ext_ts) - return -EINVAL; - tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + chan = rq->index; + + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan); + if (pin_desc_idx < 0) + return -EIO; + gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[0]; irq_reg = rd32(hw, PFINT_OICR_ENA); - if (ena) { + if (on) { /* Enable the interrupt */ irq_reg |= PFINT_OICR_TSYN_EVNT_M; aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M; @@ -1632,24 +1646,32 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, #define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1) /* set event level to requested edge */ - if (extts_flags & PTP_FALLING_EDGE) + if (rq->flags & PTP_FALLING_EDGE) aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE; - if (extts_flags & PTP_RISING_EDGE) + if (rq->flags & PTP_RISING_EDGE) aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE; /* Write GPIO CTL reg. * 0x1 is input sampled by EVENT register(channel) * + num_in_channels * tmr_idx */ - func = 1 + chan + (tmr_idx * 3); - gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); - pf->ptp.ext_ts_chan |= (1 << chan); + gpio_reg = FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, + 1 + chan + (tmr_idx * 3)); } else { + bool last_enabled = true; + /* clear the values we set to reset defaults */ aux_reg = 0; gpio_reg = 0; - pf->ptp.ext_ts_chan &= ~(1 << chan); - if (!pf->ptp.ext_ts_chan) + + for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts; i++) + if ((pf->ptp.extts_rqs[i].flags & + PTP_ENABLE_FEATURE) && + i != chan) { + last_enabled = false; + } + + if (last_enabled) irq_reg &= ~PFINT_OICR_TSYN_EVNT_M; } @@ -1661,253 +1683,347 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin, } /** - * ice_ptp_cfg_clkout - Configure clock to generate periodic wave + * ice_ptp_disable_all_extts - Disable all EXTTS channels * @pf: Board private structure - * @chan: GPIO channel (0-3) - * @config: desired periodic clk configuration. NULL will disable channel - * @store: If set to true the values will be stored - * - * Configure the internal clock generator modules to generate the clock wave of - * specified period. */ -static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, - struct ice_perout_channel *config, bool store) +static void ice_ptp_disable_all_extts(struct ice_pf *pf) { - u64 current_time, period, start_time, phase; - struct ice_hw *hw = &pf->hw; - u32 func, val, gpio_pin; - u8 tmr_idx; + for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) + if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) + ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], + false); - tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + synchronize_irq(pf->oicr_irq.virq); +} - /* 0. Reset mode & out_en in AUX_OUT */ - wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); +/** + * ice_ptp_enable_all_extts - Enable all EXTTS channels + * @pf: Board private structure + * + * Called during reset to restore user configuration. + */ +static void ice_ptp_enable_all_extts(struct ice_pf *pf) +{ + for (unsigned int i = 0; i < pf->ptp.info.n_ext_ts ; i++) + if (pf->ptp.extts_rqs[i].flags & PTP_ENABLE_FEATURE) + ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[i], + true); +} - /* If we're disabling the output, clear out CLKO and TGT and keep - * output level low - */ - if (!config || !config->ena) { - wr32(hw, GLTSYN_CLKO(chan, tmr_idx), 0); - wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), 0); - wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), 0); +/** + * ice_ptp_write_perout - Write periodic wave parameters to HW + * @hw: pointer to the HW struct + * @chan: target channel + * @gpio_pin: target GPIO pin + * @start: target time to start periodic output + * @period: target period + * + * Return: 0 on success, negative error code otherwise + */ +static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan, + unsigned int gpio_pin, u64 start, u64 period) +{ - val = GLGEN_GPIO_CTL_PIN_DIR_M; - gpio_pin = pf->ptp.perout_channels[chan].gpio_pin; - wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + u32 val = 0; - /* Store the value if requested */ - if (store) - memset(&pf->ptp.perout_channels[chan], 0, - sizeof(struct ice_perout_channel)); + /* 0. Reset mode & out_en in AUX_OUT */ + wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0); - return 0; - } - period = config->period; - start_time = config->start_time; - div64_u64_rem(start_time, period, &phase); - gpio_pin = config->gpio_pin; + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) { + int err; - /* 1. Write clkout with half of required period value */ - if (period & 0x1) { - dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); - goto err; + /* Enable/disable CGU 1PPS output for E825C */ + err = ice_cgu_cfg_pps_out(hw, !!period); + if (err) + return err; } + /* 1. Write perout with half of required period value. + * HW toggles output when source clock hits the TGT and then adds + * GLTSYN_CLKO value to the target, so it ends up with 50% duty cycle. + */ period >>= 1; - /* For proper operation, the GLTSYN_CLKO must be larger than clock tick + /* For proper operation, GLTSYN_CLKO must be larger than clock tick and + * period has to fit in 32 bit register. */ #define MIN_PULSE 3 - if (period <= MIN_PULSE || period > U32_MAX) { - dev_err(ice_pf_to_dev(pf), "CLK Period must be > %d && < 2^33", - MIN_PULSE * 2); - goto err; + if (!!period && (period <= MIN_PULSE || period > U32_MAX)) { + dev_err(ice_hw_to_dev(hw), "CLK period ticks must be >= %d && <= 2^32", + MIN_PULSE); + return -EIO; } wr32(hw, GLTSYN_CLKO(chan, tmr_idx), lower_32_bits(period)); - /* Allow time for programming before start_time is hit */ - current_time = ice_ptp_read_src_clk_reg(pf, NULL); - - /* if start time is in the past start the timer at the nearest second - * maintaining phase - */ - if (start_time < current_time) - start_time = div64_u64(current_time + NSEC_PER_SEC - 1, - NSEC_PER_SEC) * NSEC_PER_SEC + phase; - - if (ice_is_e810(hw)) - start_time -= E810_OUT_PROP_DELAY_NS; - else - start_time -= ice_e82x_pps_delay(ice_e82x_time_ref(hw)); - /* 2. Write TARGET time */ - wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start_time)); - wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start_time)); + wr32(hw, GLTSYN_TGT_L(chan, tmr_idx), lower_32_bits(start)); + wr32(hw, GLTSYN_TGT_H(chan, tmr_idx), upper_32_bits(start)); /* 3. Write AUX_OUT register */ - val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; + if (!!period) + val = GLTSYN_AUX_OUT_0_OUT_ENA_M | GLTSYN_AUX_OUT_0_OUTMOD_M; wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), val); /* 4. write GPIO CTL reg */ - func = 8 + chan + (tmr_idx * 4); - val = GLGEN_GPIO_CTL_PIN_DIR_M | - FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, func); + val = GLGEN_GPIO_CTL_PIN_DIR_M; + if (!!period) + val |= FIELD_PREP(GLGEN_GPIO_CTL_PIN_FUNC_M, + 8 + chan + (tmr_idx * 4)); + wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val); + ice_flush(hw); + + return 0; +} + +/** + * ice_ptp_cfg_perout - Configure clock to generate periodic wave + * @pf: Board private structure + * @rq: Periodic output request + * @on: Enable/disable flag + * + * Configure the internal clock generator modules to generate the clock wave of + * specified period. + * + * Return: 0 on success, negative error code otherwise + */ +static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq, + int on) +{ + unsigned int gpio_pin, prop_delay_ns; + u64 clk, period, start, phase; + struct ice_hw *hw = &pf->hw; + int pin_desc_idx; + + pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index); + if (pin_desc_idx < 0) + return -EIO; + + gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1]; + prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1]; + period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec; + + /* If we're disabling the output or period is 0, clear out CLKO and TGT + * and keep output level low. + */ + if (!on || !period) + return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0); + + if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 && + period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) { + dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n"); + return -EOPNOTSUPP; + } - /* Store the value if requested */ - if (store) { - memcpy(&pf->ptp.perout_channels[chan], config, - sizeof(struct ice_perout_channel)); - pf->ptp.perout_channels[chan].start_time = phase; + if (period & 0x1) { + dev_err(ice_pf_to_dev(pf), "CLK Period must be an even value\n"); + return -EIO; } - return 0; -err: - dev_err(ice_pf_to_dev(pf), "PTP failed to cfg per_clk\n"); - return -EFAULT; + start = rq->start.sec * NSEC_PER_SEC + rq->start.nsec; + + /* If PTP_PEROUT_PHASE is set, rq has phase instead of start time */ + if (rq->flags & PTP_PEROUT_PHASE) + phase = start; + else + div64_u64_rem(start, period, &phase); + + /* If we have only phase or start time is in the past, start the timer + * at the next multiple of period, maintaining phase at least 0.5 second + * from now, so we have time to write it to HW. + */ + clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500; + if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns) + start = div64_u64(clk + period - 1, period) * period + phase; + + /* Compensate for propagation delay from the generator to the pin. */ + start -= prop_delay_ns; + + return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period); } /** - * ice_ptp_disable_all_clkout - Disable all currently configured outputs - * @pf: pointer to the PF structure + * ice_ptp_disable_all_perout - Disable all currently configured outputs + * @pf: Board private structure * * Disable all currently configured clock outputs. This is necessary before - * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to + * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_perout to * re-enable the clocks again. */ -static void ice_ptp_disable_all_clkout(struct ice_pf *pf) +static void ice_ptp_disable_all_perout(struct ice_pf *pf) { - uint i; - - for (i = 0; i < pf->ptp.info.n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, NULL, false); + for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_rqs[i].period.sec || + pf->ptp.perout_rqs[i].period.nsec) + ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], + false); } /** - * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs - * @pf: pointer to the PF structure + * ice_ptp_enable_all_perout - Enable all configured periodic clock outputs + * @pf: Board private structure * * Enable all currently configured clock outputs. Use this after - * ice_ptp_disable_all_clkout to reconfigure the output signals according to + * ice_ptp_disable_all_perout to reconfigure the output signals according to * their configuration. */ -static void ice_ptp_enable_all_clkout(struct ice_pf *pf) +static void ice_ptp_enable_all_perout(struct ice_pf *pf) { - uint i; - - for (i = 0; i < pf->ptp.info.n_per_out; i++) - if (pf->ptp.perout_channels[i].ena) - ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], - false); + for (unsigned int i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_rqs[i].period.sec || + pf->ptp.perout_rqs[i].period.nsec) + ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[i], + true); } /** - * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC - * @info: the driver's PTP info structure - * @rq: The requested feature to change - * @on: Enable/disable flag + * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO + * @pf: Board private structure + * @pin: Pin index + * @func: Assigned function + * + * Return: 0 on success, negative error code otherwise */ -static int -ice_ptp_gpio_enable_e810(struct ptp_clock_info *info, - struct ptp_clock_request *rq, int on) +static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin, + enum ptp_pin_function func) { - struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_perout_channel clk_cfg = {0}; - bool sma_pres = false; - unsigned int chan; - u32 gpio_pin; - int err; + unsigned int gpio_pin; - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) - sma_pres = true; + switch (func) { + case PTP_PF_PEROUT: + gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1]; + break; + case PTP_PF_EXTTS: + gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0]; + break; + default: + return -EOPNOTSUPP; + } - switch (rq->type) { - case PTP_CLK_REQ_PEROUT: - chan = rq->perout.index; - if (sma_pres) { - if (chan == ice_pin_desc_e810t[SMA1].chan) - clk_cfg.gpio_pin = GPIO_20; - else if (chan == ice_pin_desc_e810t[SMA2].chan) - clk_cfg.gpio_pin = GPIO_22; - else - return -1; - } else if (ice_is_e810t(&pf->hw)) { - if (chan == 0) - clk_cfg.gpio_pin = GPIO_20; - else - clk_cfg.gpio_pin = GPIO_22; - } else if (chan == PPS_CLK_GEN_CHAN) { - clk_cfg.gpio_pin = PPS_PIN_INDEX; - } else { - clk_cfg.gpio_pin = chan; - } + for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { + struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i]; + unsigned int chan = pin_desc->chan; - clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) + - rq->perout.period.nsec); - clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) + - rq->perout.start.nsec); - clk_cfg.ena = !!on; + /* Skip pin idx from the request */ + if (i == pin) + continue; - err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true); - break; - case PTP_CLK_REQ_EXTTS: - chan = rq->extts.index; - if (sma_pres) { - if (chan < ice_pin_desc_e810t[SMA2].chan) - gpio_pin = GPIO_21; - else - gpio_pin = GPIO_23; - } else if (ice_is_e810t(&pf->hw)) { - if (chan == 0) - gpio_pin = GPIO_21; - else - gpio_pin = GPIO_23; - } else { - gpio_pin = chan; + if (pin_desc->func == PTP_PF_PEROUT && + pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) { + pf->ptp.perout_rqs[chan].period.sec = 0; + pf->ptp.perout_rqs[chan].period.nsec = 0; + pin_desc->func = PTP_PF_NONE; + pin_desc->chan = 0; + dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n", + i, gpio_pin); + return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan], + false); + } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS && + pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) { + pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE; + pin_desc->func = PTP_PF_NONE; + pin_desc->chan = 0; + dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n", + i, gpio_pin); + return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan], + false); } + } + + return 0; +} - err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin, - rq->extts.flags); +/** + * ice_verify_pin - verify if pin supports requested pin function + * @info: the driver's PTP info structure + * @pin: Pin index + * @func: Assigned function + * @chan: Assigned channel + * + * Return: 0 on success, -EOPNOTSUPP when function is not supported. + */ +static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct ice_pf *pf = ptp_info_to_pf(info); + const struct ice_ptp_pin_desc *pin_desc; + + pin_desc = &pf->ptp.ice_pin_desc[pin]; + + /* Is assigned function allowed? */ + switch (func) { + case PTP_PF_EXTTS: + if (pin_desc->gpio[0] < 0) + return -EOPNOTSUPP; break; + case PTP_PF_PEROUT: + if (pin_desc->gpio[1] < 0) + return -EOPNOTSUPP; + break; + case PTP_PF_NONE: + break; + case PTP_PF_PHYSYNC: default: return -EOPNOTSUPP; } - return err; + /* On adapters with SMA_CTRL disable other pins that share same GPIO */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + ice_ptp_disable_shared_pin(pf, pin, func); + pf->ptp.pin_desc[pin].func = func; + pf->ptp.pin_desc[pin].chan = chan; + return ice_ptp_set_sma_cfg(pf); + } + + return 0; } /** - * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC - * @info: the driver's PTP info structure + * ice_ptp_gpio_enable - Enable/disable ancillary features of PHC + * @info: The driver's PTP info structure * @rq: The requested feature to change * @on: Enable/disable flag + * + * Return: 0 on success, negative error code otherwise */ -static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info, - struct ptp_clock_request *rq, int on) +static int ice_ptp_gpio_enable(struct ptp_clock_info *info, + struct ptp_clock_request *rq, int on) { struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_perout_channel clk_cfg = {0}; int err; switch (rq->type) { - case PTP_CLK_REQ_PPS: - clk_cfg.gpio_pin = PPS_PIN_INDEX; - clk_cfg.period = NSEC_PER_SEC; - clk_cfg.ena = !!on; + case PTP_CLK_REQ_PEROUT: + { + struct ptp_perout_request *cached = + &pf->ptp.perout_rqs[rq->perout.index]; - err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true); - break; + err = ice_ptp_cfg_perout(pf, &rq->perout, on); + if (!err) { + *cached = rq->perout; + } else { + cached->period.sec = 0; + cached->period.nsec = 0; + } + return err; + } case PTP_CLK_REQ_EXTTS: - err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index, - TIME_SYNC_PIN_INDEX, rq->extts.flags); - break; + { + struct ptp_extts_request *cached = + &pf->ptp.extts_rqs[rq->extts.index]; + + err = ice_ptp_cfg_extts(pf, &rq->extts, on); + if (!err) + *cached = rq->extts; + else + cached->flags &= ~PTP_ENABLE_FEATURE; + return err; + } default: return -EOPNOTSUPP; } - - return err; } /** @@ -1925,16 +2041,10 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts, struct ptp_system_timestamp *sts) { struct ice_pf *pf = ptp_info_to_pf(info); - struct ice_hw *hw = &pf->hw; - - if (!ice_ptp_lock(hw)) { - dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n"); - return -EBUSY; - } - - ice_ptp_read_time(pf, ts, sts); - ice_ptp_unlock(hw); + u64 time_ns; + time_ns = ice_ptp_read_src_clk_reg(pf, sts); + *ts = ns_to_timespec64(time_ns); return 0; } @@ -1954,11 +2064,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) struct ice_hw *hw = &pf->hw; int err; - /* For Vernier mode, we need to recalibrate after new settime - * Start with disabling timestamp block + /* For Vernier mode on E82X, we need to recalibrate after new settime. + * Start with marking timestamps as invalid. */ - if (pf->ptp.port.link_up) - ice_ptp_port_phy_stop(&pf->ptp.port); + if (hw->mac_type == ICE_MAC_GENERIC) { + err = ice_ptp_clear_phy_offset_ready_e82x(hw); + if (err) + dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); + } if (!ice_ptp_lock(hw)) { err = -EBUSY; @@ -1966,7 +2079,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) } /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); err = ice_ptp_write_init(pf, &ts64); ice_ptp_unlock(hw); @@ -1975,10 +2088,10 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_reset_cached_phctime(pf); /* Reenable periodic outputs */ - ice_ptp_enable_all_clkout(pf); + ice_ptp_enable_all_perout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (hw->phy_model == ICE_PHY_E82X) + if (hw->mac_type == ICE_MAC_GENERIC) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2037,12 +2150,12 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) } /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); err = ice_ptp_write_adj(pf, delta); /* Reenable periodic outputs */ - ice_ptp_enable_all_clkout(pf); + ice_ptp_enable_all_perout(pf); ice_ptp_unlock(hw); @@ -2056,92 +2169,157 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) return 0; } +/** + * struct ice_crosststamp_cfg - Device cross timestamp configuration + * @lock_reg: The hardware semaphore lock to use + * @lock_busy: Bit in the semaphore lock indicating the lock is busy + * @ctl_reg: The hardware register to request cross timestamp + * @ctl_active: Bit in the control register to request cross timestamp + * @art_time_l: Lower 32-bits of ART system time + * @art_time_h: Upper 32-bits of ART system time + * @dev_time_l: Lower 32-bits of device time (per timer index) + * @dev_time_h: Upper 32-bits of device time (per timer index) + */ +struct ice_crosststamp_cfg { + /* HW semaphore lock register */ + u32 lock_reg; + u32 lock_busy; + + /* Capture control register */ + u32 ctl_reg; + u32 ctl_active; + + /* Time storage */ + u32 art_time_l; + u32 art_time_h; + u32 dev_time_l[2]; + u32 dev_time_h[2]; +}; + +static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = { + .lock_reg = PFHH_SEM, + .lock_busy = PFHH_SEM_BUSY_M, + .ctl_reg = GLHH_ART_CTL, + .ctl_active = GLHH_ART_CTL_ACTIVE_M, + .art_time_l = GLHH_ART_TIME_L, + .art_time_h = GLHH_ART_TIME_H, + .dev_time_l[0] = GLTSYN_HHTIME_L(0), + .dev_time_h[0] = GLTSYN_HHTIME_H(0), + .dev_time_l[1] = GLTSYN_HHTIME_L(1), + .dev_time_h[1] = GLTSYN_HHTIME_H(1), +}; + #ifdef CONFIG_ICE_HWTS +static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = { + .lock_reg = E830_PFPTM_SEM, + .lock_busy = E830_PFPTM_SEM_BUSY_M, + .ctl_reg = E830_GLPTM_ART_CTL, + .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M, + .art_time_l = E830_GLPTM_ART_TIME_L, + .art_time_h = E830_GLPTM_ART_TIME_H, + .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0), + .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0), + .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1), + .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1), +}; + +#endif /* CONFIG_ICE_HWTS */ +/** + * struct ice_crosststamp_ctx - Device cross timestamp context + * @snapshot: snapshot of system clocks for historic interpolation + * @pf: pointer to the PF private structure + * @cfg: pointer to hardware configuration for cross timestamp + */ +struct ice_crosststamp_ctx { + struct system_time_snapshot snapshot; + struct ice_pf *pf; + const struct ice_crosststamp_cfg *cfg; +}; + /** - * ice_ptp_get_syncdevicetime - Get the cross time stamp info + * ice_capture_crosststamp - Capture a device/system cross timestamp * @device: Current device time * @system: System counter value read synchronously with device time - * @ctx: Context provided by timekeeping code + * @__ctx: Context passed from ice_ptp_getcrosststamp * * Read device and system (ART) clock simultaneously and return the corrected * clock values in ns. + * + * Return: zero on success, or a negative error code on failure. */ -static int -ice_ptp_get_syncdevicetime(ktime_t *device, - struct system_counterval_t *system, - void *ctx) +static int ice_capture_crosststamp(ktime_t *device, + struct system_counterval_t *system, + void *__ctx) { - struct ice_pf *pf = (struct ice_pf *)ctx; - struct ice_hw *hw = &pf->hw; - u32 hh_lock, hh_art_ctl; - int i; + struct ice_crosststamp_ctx *ctx = __ctx; + const struct ice_crosststamp_cfg *cfg; + u32 lock, ctl, ts_lo, ts_hi, tmr_idx; + struct ice_pf *pf; + struct ice_hw *hw; + int err; + u64 ts; -#define MAX_HH_HW_LOCK_TRIES 5 -#define MAX_HH_CTL_LOCK_TRIES 100 + cfg = ctx->cfg; + pf = ctx->pf; + hw = &pf->hw; - for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) { - /* Get the HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); - if (hh_lock & PFHH_SEM_BUSY_M) { - usleep_range(10000, 15000); - continue; - } - break; - } - if (hh_lock & PFHH_SEM_BUSY_M) { - dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n"); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; + if (tmr_idx > 1) + return -EINVAL; + + /* Poll until we obtain the cross-timestamp hardware semaphore */ + err = rd32_poll_timeout(hw, cfg->lock_reg, lock, + !(lock & cfg->lock_busy), + 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC); + if (err) { + dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n"); return -EBUSY; } + /* Snapshot system time for historic interpolation */ + ktime_get_snapshot(&ctx->snapshot); + /* Program cmd to master timer */ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); /* Start the ART and device clock sync sequence */ - hh_art_ctl = rd32(hw, GLHH_ART_CTL); - hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M; - wr32(hw, GLHH_ART_CTL, hh_art_ctl); - - for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) { - /* Wait for sync to complete */ - hh_art_ctl = rd32(hw, GLHH_ART_CTL); - if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) { - udelay(1); - continue; - } else { - u32 hh_ts_lo, hh_ts_hi, tmr_idx; - u64 hh_ts; - - tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc; - /* Read ART time */ - hh_ts_lo = rd32(hw, GLHH_ART_TIME_L); - hh_ts_hi = rd32(hw, GLHH_ART_TIME_H); - hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; - *system = convert_art_ns_to_tsc(hh_ts); - /* Read Device source clock time */ - hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx)); - hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx)); - hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo; - *device = ns_to_ktime(hh_ts); - break; - } - } + ctl = rd32(hw, cfg->ctl_reg); + ctl |= cfg->ctl_active; + wr32(hw, cfg->ctl_reg, ctl); + /* Poll until hardware completes the capture */ + err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active), + 5, 20 * USEC_PER_MSEC); + if (err) + goto err_timeout; + + /* Read ART system time */ + ts_lo = rd32(hw, cfg->art_time_l); + ts_hi = rd32(hw, cfg->art_time_h); + ts = ((u64)ts_hi << 32) | ts_lo; + system->cycles = ts; + system->cs_id = CSID_X86_ART; + + /* Read Device source clock time */ + ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]); + ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]); + ts = ((u64)ts_hi << 32) | ts_lo; + *device = ns_to_ktime(ts); + +err_timeout: /* Clear the master timer */ ice_ptp_src_cmd(hw, ICE_PTP_NOP); /* Release HW lock */ - hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id)); - hh_lock = hh_lock & ~PFHH_SEM_BUSY_M; - wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock); + lock = rd32(hw, cfg->lock_reg); + lock &= ~cfg->lock_busy; + wr32(hw, cfg->lock_reg, lock); - if (i == MAX_HH_CTL_LOCK_TRIES) - return -ETIMEDOUT; - - return 0; + return err; } /** - * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp + * ice_ptp_getcrosststamp - Capture a device cross timestamp * @info: the driver's PTP info structure * @cts: The memory to fill the cross timestamp info * @@ -2149,22 +2327,36 @@ ice_ptp_get_syncdevicetime(ktime_t *device, * clock. Fill the cross timestamp information and report it back to the * caller. * - * This is only valid for E822 and E823 devices which have support for - * generating the cross timestamp via PCIe PTM. - * * In order to correctly correlate the ART timestamp back to the TSC time, the * CPU must have X86_FEATURE_TSC_KNOWN_FREQ. + * + * Return: zero on success, or a negative error code on failure. */ -static int -ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info, - struct system_device_crosststamp *cts) +static int ice_ptp_getcrosststamp(struct ptp_clock_info *info, + struct system_device_crosststamp *cts) { struct ice_pf *pf = ptp_info_to_pf(info); + struct ice_crosststamp_ctx ctx = { + .pf = pf, + }; + + switch (pf->hw.mac_type) { + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + ctx.cfg = &ice_crosststamp_cfg_e82x; + break; +#ifdef CONFIG_ICE_HWTS + case ICE_MAC_E830: + ctx.cfg = &ice_crosststamp_cfg_e830; + break; +#endif /* CONFIG_ICE_HWTS */ + default: + return -EOPNOTSUPP; + } - return get_device_system_crosststamp(ice_ptp_get_syncdevicetime, - pf, NULL, cts); + return get_device_system_crosststamp(ice_capture_crosststamp, &ctx, + &ctx.snapshot, cts); } -#endif /* CONFIG_ICE_HWTS */ /** * ice_ptp_get_ts_config - ioctl interface to read the timestamping config @@ -2299,20 +2491,41 @@ u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, } /** - * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins + * ice_ptp_setup_pin_cfg - setup PTP pin_config structure + * @pf: Board private structure + */ +static void ice_ptp_setup_pin_cfg(struct ice_pf *pf) +{ + for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) { + const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i]; + struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i]; + const char *name = NULL; + + if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + name = ice_pin_names[desc->name_idx]; + else if (desc->name_idx != GPIO_NA) + name = ice_pin_names_nvm[desc->name_idx]; + if (name) + strscpy(pin->name, name, sizeof(pin->name)); + + pin->index = i; + } + + pf->ptp.info.pin_config = pf->ptp.pin_desc; +} + +/** + * ice_ptp_disable_pins - Disable PTP pins * @pf: pointer to the PF structure - * @info: PTP clock info structure * * Disable the OS access to the SMA pins. Called to clear out the OS - * indications of pin support when we fail to setup the E810-T SMA control - * register. + * indications of pin support when we fail to setup the SMA control register. */ -static void -ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +static void ice_ptp_disable_pins(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); + struct ptp_clock_info *info = &pf->ptp.info; - dev_warn(dev, "Failed to configure E810-T SMA pin control\n"); + dev_warn(ice_pf_to_dev(pf), "Failed to configure PTP pin control\n"); info->enable = NULL; info->verify = NULL; @@ -2322,126 +2535,176 @@ ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) } /** - * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins + * ice_ptp_parse_sdp_entries - update ice_ptp_pin_desc structure from NVM * @pf: pointer to the PF structure - * @info: PTP clock info structure + * @entries: SDP connection section from NVM + * @num_entries: number of valid entries in sdp_entries + * @pins: PTP pins array to update * - * Finish setting up the SMA pins by allocating pin_config, and setting it up - * according to the current status of the SMA. On failure, disable all of the - * extended SMA pin support. + * Return: 0 on success, negative error code otherwise. */ -static void -ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info) +static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries, + unsigned int num_entries, + struct ice_ptp_pin_desc *pins) { - struct device *dev = ice_pf_to_dev(pf); - int err; + unsigned int n_pins = 0; + unsigned int i; - /* Allocate memory for kernel pins interface */ - info->pin_config = devm_kcalloc(dev, info->n_pins, - sizeof(*info->pin_config), GFP_KERNEL); - if (!info->pin_config) { - ice_ptp_disable_sma_pins_e810t(pf, info); - return; - } + /* Setup ice_pin_desc array */ + for (i = 0; i < ICE_N_PINS_MAX; i++) { + pins[i].name_idx = -1; + pins[i].gpio[0] = -1; + pins[i].gpio[1] = -1; + } + + for (i = 0; i < num_entries; i++) { + u16 entry = le16_to_cpu(entries[i]); + DECLARE_BITMAP(bitmap, GPIO_NA); + unsigned int bitmap_idx; + bool dir; + u16 gpio; + + *bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry); + dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry); + gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry); + for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) { + unsigned int idx; + + /* Check if entry's pin bit is valid */ + if (bitmap_idx >= NUM_PTP_PINS_NVM && + bitmap_idx != GPIO_NA) + continue; - /* Read current SMA status */ - err = ice_get_sma_config_e810t(&pf->hw, info->pin_config); - if (err) - ice_ptp_disable_sma_pins_e810t(pf, info); -} + /* Check if pin already exists */ + for (idx = 0; idx < ICE_N_PINS_MAX; idx++) + if (pins[idx].name_idx == bitmap_idx) + break; + + if (idx == ICE_N_PINS_MAX) { + /* Pin not found, setup its entry and name */ + idx = n_pins++; + pins[idx].name_idx = bitmap_idx; + if (bitmap_idx == GPIO_NA) + strscpy(pf->ptp.pin_desc[idx].name, + ice_pin_names[gpio], + sizeof(pf->ptp.pin_desc[idx] + .name)); + } + + /* Setup in/out GPIO number */ + pins[idx].gpio[dir] = gpio; + } + } -/** - * ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs - * @pf: pointer to the PF instance - * @info: PTP clock capabilities - */ -static void -ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info) -{ - if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { - info->n_ext_ts = N_EXT_TS_E810; - info->n_per_out = N_PER_OUT_E810T; - info->n_pins = NUM_PTP_PINS_E810T; - info->verify = ice_verify_pin_e810t; - - /* Complete setup of the SMA pins */ - ice_ptp_setup_sma_pins_e810t(pf, info); - } else if (ice_is_e810t(&pf->hw)) { - info->n_ext_ts = N_EXT_TS_NO_SMA_E810T; - info->n_per_out = N_PER_OUT_NO_SMA_E810T; - } else { - info->n_per_out = N_PER_OUT_E810; - info->n_ext_ts = N_EXT_TS_E810; + for (i = 0; i < n_pins; i++) { + dev_dbg(ice_pf_to_dev(pf), + "NVM pin entry[%d] : name_idx %d gpio_out %d gpio_in %d\n", + i, pins[i].name_idx, pins[i].gpio[1], pins[i].gpio[0]); } -} -/** - * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs - * @pf: pointer to the PF instance - * @info: PTP clock capabilities - */ -static void -ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info) -{ - info->pps = 1; - info->n_per_out = 0; - info->n_ext_ts = 1; + pf->ptp.info.n_pins = n_pins; + return 0; } /** - * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support + * ice_ptp_set_funcs_e82x - Set specialized functions for E82X support * @pf: Board private structure - * @info: PTP info to fill * - * Assign functions to the PTP capabiltiies structure for E82x devices. + * Assign functions to the PTP capabilities structure for E82X devices. * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for E82x + * in ice_ptp_set_caps. Only add functions here which are distinct for E82X * devices. */ -static void -ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info) +static void ice_ptp_set_funcs_e82x(struct ice_pf *pf) { -#ifdef CONFIG_ICE_HWTS - if (boot_cpu_has(X86_FEATURE_ART) && - boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) - info->getcrosststamp = ice_ptp_getcrosststamp_e82x; -#endif /* CONFIG_ICE_HWTS */ + pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; + + if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) { + pf->ptp.ice_pin_desc = ice_pin_desc_e825c; + pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c); + } else { + pf->ptp.ice_pin_desc = ice_pin_desc_e82x; + pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x); + } + ice_ptp_setup_pin_cfg(pf); } /** * ice_ptp_set_funcs_e810 - Set specialized functions for E810 support * @pf: Board private structure - * @info: PTP info to fill * * Assign functions to the PTP capabiltiies structure for E810 devices. * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for e810 + * in ice_ptp_set_caps. Only add functions here which are distinct for E810 * devices. */ -static void -ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info) +static void ice_ptp_set_funcs_e810(struct ice_pf *pf) { - info->enable = ice_ptp_gpio_enable_e810; - ice_ptp_setup_pins_e810(pf, info); + __le16 entries[ICE_AQC_NVM_SDP_AC_MAX_SIZE]; + struct ice_ptp_pin_desc *desc = NULL; + struct ice_ptp *ptp = &pf->ptp; + unsigned int num_entries; + int err; + + err = ice_ptp_read_sdp_ac(&pf->hw, entries, &num_entries); + if (err) { + /* SDP section does not exist in NVM or is corrupted */ + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) { + ptp->ice_pin_desc = ice_pin_desc_e810_sma; + ptp->info.n_pins = + ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma); + } else { + pf->ptp.ice_pin_desc = ice_pin_desc_e810; + pf->ptp.info.n_pins = + ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); + err = 0; + } + } else { + desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX, + sizeof(struct ice_ptp_pin_desc), + GFP_KERNEL); + if (!desc) + goto err; + + err = ice_ptp_parse_sdp_entries(pf, entries, num_entries, desc); + if (err) + goto err; + + ptp->ice_pin_desc = (const struct ice_ptp_pin_desc *)desc; + } + + ptp->info.pin_config = ptp->pin_desc; + ice_ptp_setup_pin_cfg(pf); + + if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) + err = ice_ptp_set_sma_cfg(pf); +err: + if (err) { + devm_kfree(ice_pf_to_dev(pf), desc); + ice_ptp_disable_pins(pf); + } } /** - * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support + * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support * @pf: Board private structure - * @info: PTP info to fill * - * Assign functions to the PTP capabiltiies structure for E823 devices. + * Assign functions to the PTP capabiltiies structure for E830 devices. * Functions which operate across all device families should be set directly - * in ice_ptp_set_caps. Only add functions here which are distinct for e823 + * in ice_ptp_set_caps. Only add functions here which are distinct for E830 * devices. */ -static void -ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info) +static void ice_ptp_set_funcs_e830(struct ice_pf *pf) { - ice_ptp_set_funcs_e82x(pf, info); +#ifdef CONFIG_ICE_HWTS + if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART)) + pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp; - info->enable = ice_ptp_gpio_enable_e823; - ice_ptp_setup_pins_e823(pf, info); +#endif /* CONFIG_ICE_HWTS */ + /* Rest of the config is the same as base E810 */ + pf->ptp.ice_pin_desc = ice_pin_desc_e810; + pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810); + ice_ptp_setup_pin_cfg(pf); } /** @@ -2461,13 +2724,30 @@ static void ice_ptp_set_caps(struct ice_pf *pf) info->adjfine = ice_ptp_adjfine; info->gettimex64 = ice_ptp_gettimex64; info->settime64 = ice_ptp_settime64; - - if (ice_is_e810(&pf->hw)) - ice_ptp_set_funcs_e810(pf, info); - else if (ice_is_e823(&pf->hw)) - ice_ptp_set_funcs_e823(pf, info); - else - ice_ptp_set_funcs_e82x(pf, info); + info->n_per_out = GLTSYN_TGT_H_IDX_MAX; + info->n_ext_ts = GLTSYN_EVNT_H_IDX_MAX; + info->enable = ice_ptp_gpio_enable; + info->verify = ice_verify_pin; + + info->supported_extts_flags = PTP_RISING_EDGE | + PTP_FALLING_EDGE | + PTP_STRICT_FLAGS; + info->supported_perout_flags = PTP_PEROUT_PHASE; + + switch (pf->hw.mac_type) { + case ICE_MAC_E810: + ice_ptp_set_funcs_e810(pf); + return; + case ICE_MAC_E830: + ice_ptp_set_funcs_e830(pf); + return; + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + ice_ptp_set_funcs_e82x(pf); + return; + default: + return; + } } /** @@ -2578,6 +2858,65 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) } /** + * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context + * @pf: Board private structure + * + * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom + * half of the interrupt and IRQ_HANDLED otherwise. + */ +irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) +{ + struct ice_hw *hw = &pf->hw; + + switch (hw->mac_type) { + case ICE_MAC_E810: + /* E810 capable of low latency timestamping with interrupt can + * request a single timestamp in the top half and wait for + * a second LL TS interrupt from the FW when it's ready. + */ + if (hw->dev_caps.ts_dev_info.ts_ll_int_read) { + struct ice_ptp_tx *tx = &pf->ptp.port.tx; + u8 idx; + + if (!ice_pf_state_is_nominal(pf)) + return IRQ_HANDLED; + + spin_lock(&tx->lock); + idx = find_next_bit_wrap(tx->in_use, tx->len, + tx->last_ll_ts_idx_read + 1); + if (idx != tx->len) + ice_ptp_req_tx_single_tstamp(tx, idx); + spin_unlock(&tx->lock); + + return IRQ_HANDLED; + } + fallthrough; /* non-LL_TS E810 */ + case ICE_MAC_GENERIC: + case ICE_MAC_GENERIC_3K_E825: + /* All other devices process timestamps in the bottom half due + * to sleeping or polling. + */ + if (!ice_ptp_pf_handles_tx_interrupt(pf)) + return IRQ_HANDLED; + + set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); + return IRQ_WAKE_THREAD; + case ICE_MAC_E830: + /* E830 can read timestamps in the top half using rd32() */ + if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { + /* Process outstanding Tx timestamps. If there + * is more work, re-arm the interrupt to trigger again. + */ + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); + ice_flush(hw); + } + return IRQ_HANDLED; + default: + return IRQ_HANDLED; + } +} + +/** * ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt * @pf: Board private structure * @@ -2597,13 +2936,13 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) bool trigger_oicr = false; unsigned int i; - if (ice_is_e810(hw)) + if (!pf->ptp.port.tx.has_ready_bitmap) return; if (!ice_pf_src_tmr_owned(pf)) return; - for (i = 0; i < ICE_MAX_QUAD; i++) { + for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { u64 tstamp_ready; int err; @@ -2644,6 +2983,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work) } /** + * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild + * @pf: Board private structure + * @rebuild: rebuild if true, prepare if false + * @reset_type: the reset type being performed + */ +static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild, + enum ice_reset_req reset_type) +{ + struct list_head *entry; + + list_for_each(entry, &pf->adapter->ports.ports) { + struct ice_ptp_port *port = list_entry(entry, + struct ice_ptp_port, + list_node); + struct ice_pf *peer_pf = ptp_port_to_pf(port); + + if (!ice_is_primary(&peer_pf->hw)) { + if (rebuild) + ice_ptp_rebuild(peer_pf, reset_type); + else + ice_ptp_prepare_for_reset(peer_pf, reset_type); + } + } +} + +/** * ice_ptp_prepare_for_reset - Prepare PTP for reset * @pf: Board private structure * @reset_type: the reset type being performed @@ -2651,6 +3016,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work) void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) { struct ice_ptp *ptp = &pf->ptp; + struct ice_hw *hw = &pf->hw; u8 src_tmr; if (ptp->state != ICE_PTP_READY) @@ -2666,10 +3032,13 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type) if (reset_type == ICE_RESET_PFR) return; + if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825) + ice_ptp_prepare_rebuild_sec(pf, false, reset_type); + ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); src_tmr = ice_get_ptp_src_clock_index(&pf->hw); @@ -2707,10 +3076,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) /* Write the increment time value to PHY and LAN */ err = ice_ptp_write_incval(hw, ice_base_incval(pf)); - if (err) { - ice_ptp_unlock(hw); - return err; - } + if (err) + goto err_unlock; /* Write the initial Time value to PHY and LAN using the cached PHC * time before the reset and time difference between stopping and @@ -2723,10 +3090,8 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) ts = ktime_to_timespec64(ktime_get_real()); } err = ice_ptp_write_init(pf, &ts); - if (err) { - ice_ptp_unlock(hw); - return err; - } + if (err) + goto err_unlock; /* Release the global hardware lock */ ice_ptp_unlock(hw); @@ -2736,16 +3101,22 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf) */ ice_ptp_flush_all_tx_tracker(pf); - if (!ice_is_e810(hw)) { - /* Enable quad interrupts */ - err = ice_ptp_cfg_phy_interrupt(pf, true, 1); - if (err) - return err; + /* Enable quad interrupts */ + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); + if (err) + return err; - ice_ptp_restart_all_phy(pf); - } + ice_ptp_restart_all_phy(pf); + + /* Re-enable all periodic outputs and external timestamp events */ + ice_ptp_enable_all_perout(pf); + ice_ptp_enable_all_extts(pf); return 0; + +err_unlock: + ice_ptp_unlock(hw); + return err; } /** @@ -2785,187 +3156,43 @@ err: dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err); } -/** - * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device - * @aux_dev: auxiliary device to get the auxiliary PF for - */ -static struct ice_pf * -ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev) -{ - struct ice_ptp_port *aux_port; - struct ice_ptp *aux_ptp; - - aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev); - aux_ptp = container_of(aux_port, struct ice_ptp, port); - - return container_of(aux_ptp, struct ice_pf, ptp); -} - -/** - * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device - * @aux_dev: auxiliary device to get the PF for - */ -static struct ice_pf * -ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev) -{ - struct ice_ptp_port_owner *ports_owner; - struct auxiliary_driver *aux_drv; - struct ice_ptp *owner_ptp; - - if (!aux_dev->dev.driver) - return NULL; - - aux_drv = to_auxiliary_drv(aux_dev->dev.driver); - ports_owner = container_of(aux_drv, struct ice_ptp_port_owner, - aux_driver); - owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner); - return container_of(owner_ptp, struct ice_pf, ptp); -} - -/** - * ice_ptp_auxbus_probe - Probe auxiliary devices - * @aux_dev: PF's auxiliary device - * @id: Auxiliary device ID - */ -static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev, - const struct auxiliary_device_id *id) +static int ice_ptp_setup_adapter(struct ice_pf *pf) { - struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); - struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); + if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw)) + return -EPERM; - if (WARN_ON(!owner_pf)) - return -ENODEV; - - INIT_LIST_HEAD(&aux_pf->ptp.port.list_member); - mutex_lock(&owner_pf->ptp.ports_owner.lock); - list_add(&aux_pf->ptp.port.list_member, - &owner_pf->ptp.ports_owner.ports); - mutex_unlock(&owner_pf->ptp.ports_owner.lock); + pf->adapter->ctrl_pf = pf; return 0; } -/** - * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus - * @aux_dev: PF's auxiliary device - */ -static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev) +static int ice_ptp_setup_pf(struct ice_pf *pf) { - struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); - struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev); + struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); + struct ice_ptp *ptp = &pf->ptp; - mutex_lock(&owner_pf->ptp.ports_owner.lock); - list_del(&aux_pf->ptp.port.list_member); - mutex_unlock(&owner_pf->ptp.ports_owner.lock); -} + if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN) + return -ENODEV; -/** - * ice_ptp_auxbus_shutdown - * @aux_dev: PF's auxiliary device - */ -static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev) -{ - /* Doing nothing here, but handle to auxbus driver must be satisfied */ -} + INIT_LIST_HEAD(&ptp->port.list_node); + mutex_lock(&pf->adapter->ports.lock); -/** - * ice_ptp_auxbus_suspend - * @aux_dev: PF's auxiliary device - * @state: power management state indicator - */ -static int -ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state) -{ - /* Doing nothing here, but handle to auxbus driver must be satisfied */ - return 0; -} + list_add(&ptp->port.list_node, + &pf->adapter->ports.ports); + mutex_unlock(&pf->adapter->ports.lock); -/** - * ice_ptp_auxbus_resume - * @aux_dev: PF's auxiliary device - */ -static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev) -{ - /* Doing nothing here, but handle to auxbus driver must be satisfied */ return 0; } -/** - * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table - * @pf: Board private structure - * @name: auxiliary bus driver name - */ -static struct auxiliary_device_id * -ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name) +static void ice_ptp_cleanup_pf(struct ice_pf *pf) { - struct auxiliary_device_id *ids; - - /* Second id left empty to terminate the array */ - ids = devm_kcalloc(ice_pf_to_dev(pf), 2, - sizeof(struct auxiliary_device_id), GFP_KERNEL); - if (!ids) - return NULL; - - snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name); - - return ids; -} - -/** - * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver - * @pf: Board private structure - */ -static int ice_ptp_register_auxbus_driver(struct ice_pf *pf) -{ - struct auxiliary_driver *aux_driver; - struct ice_ptp *ptp; - struct device *dev; - char *name; - int err; - - ptp = &pf->ptp; - dev = ice_pf_to_dev(pf); - aux_driver = &ptp->ports_owner.aux_driver; - INIT_LIST_HEAD(&ptp->ports_owner.ports); - mutex_init(&ptp->ports_owner.lock); - name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", - pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), - ice_get_ptp_src_clock_index(&pf->hw)); - if (!name) - return -ENOMEM; - - aux_driver->name = name; - aux_driver->shutdown = ice_ptp_auxbus_shutdown; - aux_driver->suspend = ice_ptp_auxbus_suspend; - aux_driver->remove = ice_ptp_auxbus_remove; - aux_driver->resume = ice_ptp_auxbus_resume; - aux_driver->probe = ice_ptp_auxbus_probe; - aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name); - if (!aux_driver->id_table) - return -ENOMEM; + struct ice_ptp *ptp = &pf->ptp; - err = auxiliary_driver_register(aux_driver); - if (err) { - devm_kfree(dev, aux_driver->id_table); - dev_err(dev, "Failed registering aux_driver, name <%s>\n", - name); + if (pf->hw.mac_type != ICE_MAC_UNKNOWN) { + mutex_lock(&pf->adapter->ports.lock); + list_del(&ptp->port.list_node); + mutex_unlock(&pf->adapter->ports.lock); } - - return err; -} - -/** - * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver - * @pf: Board private structure - */ -static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) -{ - struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver; - - auxiliary_driver_unregister(aux_driver); - devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table); - - mutex_destroy(&pf->ptp.ports_owner.lock); } /** @@ -2977,15 +3204,12 @@ static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf) */ int ice_ptp_clock_index(struct ice_pf *pf) { - struct auxiliary_device *aux_dev; - struct ice_pf *owner_pf; + struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf); struct ptp_clock *clock; - aux_dev = &pf->ptp.port.aux_dev; - owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev); - if (!owner_pf) + if (!ctrl_ptp) return -1; - clock = owner_pf->ptp.clock; + clock = ctrl_ptp->clock; return clock ? ptp_clock_index(clock) : -1; } @@ -3019,47 +3243,37 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Write the increment time value to PHY and LAN */ err = ice_ptp_write_incval(hw, ice_base_incval(pf)); - if (err) { - ice_ptp_unlock(hw); - goto err_exit; - } + if (err) + goto err_unlock; ts = ktime_to_timespec64(ktime_get_real()); /* Write the initial Time value to PHY and LAN */ err = ice_ptp_write_init(pf, &ts); - if (err) { - ice_ptp_unlock(hw); - goto err_exit; - } + if (err) + goto err_unlock; /* Release the global hardware lock */ ice_ptp_unlock(hw); - if (!ice_is_e810(hw)) { - /* Enable quad interrupts */ - err = ice_ptp_cfg_phy_interrupt(pf, true, 1); - if (err) - goto err_exit; - } + /* Configure PHY interrupt settings */ + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); + if (err) + goto err_exit; /* Ensure we have a clock device */ err = ice_ptp_create_clock(pf); if (err) goto err_clk; - err = ice_ptp_register_auxbus_driver(pf); - if (err) { - dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver"); - goto err_aux; - } - return 0; -err_aux: - ptp_clock_unregister(pf->ptp.clock); err_clk: pf->ptp.clock = NULL; err_exit: return err; + +err_unlock: + ice_ptp_unlock(hw); + return err; } /** @@ -3077,7 +3291,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) /* Allocate a kworker for handling work required for the ports * connected to the PTP hardware clock. */ - kworker = kthread_create_worker(0, "ice-ptp-%s", + kworker = kthread_run_worker(0, "ice-ptp-%s", dev_name(ice_pf_to_dev(pf))); if (IS_ERR(kworker)) return PTR_ERR(kworker); @@ -3094,6 +3308,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp) * ice_ptp_init_port - Initialize PTP port structure * @pf: Board private structure * @ptp_port: PTP port structure + * + * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc. */ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) { @@ -3101,13 +3317,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) mutex_init(&ptp_port->ps_lock); - switch (hw->phy_model) { - case ICE_PHY_E810: - return ice_ptp_init_tx_e810(pf, &ptp_port->tx); - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num); + case ICE_MAC_GENERIC: kthread_init_delayed_work(&ptp_port->ov_work, ice_ptp_wait_for_offsets); - return ice_ptp_init_tx_e82x(pf, &ptp_port->tx, ptp_port->port_num); default: @@ -3116,76 +3333,6 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) } /** - * ice_ptp_release_auxbus_device - * @dev: device that utilizes the auxbus - */ -static void ice_ptp_release_auxbus_device(struct device *dev) -{ - /* Doing nothing here, but handle to auxbux device must be satisfied */ -} - -/** - * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device - * @pf: Board private structure - */ -static int ice_ptp_create_auxbus_device(struct ice_pf *pf) -{ - struct auxiliary_device *aux_dev; - struct ice_ptp *ptp; - struct device *dev; - char *name; - int err; - u32 id; - - ptp = &pf->ptp; - id = ptp->port.port_num; - dev = ice_pf_to_dev(pf); - - aux_dev = &ptp->port.aux_dev; - - name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u", - pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn), - ice_get_ptp_src_clock_index(&pf->hw)); - if (!name) - return -ENOMEM; - - aux_dev->name = name; - aux_dev->id = id; - aux_dev->dev.release = ice_ptp_release_auxbus_device; - aux_dev->dev.parent = dev; - - err = auxiliary_device_init(aux_dev); - if (err) - goto aux_err; - - err = auxiliary_device_add(aux_dev); - if (err) { - auxiliary_device_uninit(aux_dev); - goto aux_err; - } - - return 0; -aux_err: - dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name); - devm_kfree(dev, name); - return err; -} - -/** - * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device - * @pf: Board private structure - */ -static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) -{ - struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev; - - auxiliary_device_delete(aux_dev); - auxiliary_device_uninit(aux_dev); - - memset(aux_dev, 0, sizeof(*aux_dev)); -} - -/** * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode * @pf: Board private structure * @@ -3196,8 +3343,8 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) */ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { - switch (pf->hw.phy_model) { - case ICE_PHY_E82X: + switch (pf->hw.mac_type) { + case ICE_MAC_GENERIC: /* E822 based PHY has the clock owner process the interrupt * for all ports. */ @@ -3232,23 +3379,35 @@ void ice_ptp_init(struct ice_pf *pf) ptp->state = ICE_PTP_INITIALIZING; - ice_ptp_init_phy_model(hw); + if (hw->lane_num < 0) { + err = hw->lane_num; + goto err_exit; + } + ptp->port.port_num = hw->lane_num; + + ice_ptp_init_hw(hw); ice_ptp_init_tx_interrupt_mode(pf); /* If this function owns the clock hardware, it must allocate and * configure the PTP clock device to represent it. */ - if (ice_pf_src_tmr_owned(pf)) { + if (ice_pf_src_tmr_owned(pf) && ice_is_primary(hw)) { + err = ice_ptp_setup_adapter(pf); + if (err) + goto err_exit; err = ice_ptp_init_owner(pf); if (err) - goto err; + goto err_exit; } - ptp->port.port_num = hw->pf_id; + err = ice_ptp_setup_pf(pf); + if (err) + goto err_exit; + err = ice_ptp_init_port(pf, &ptp->port); if (err) - goto err; + goto err_exit; /* Start the PHY timestamping block */ ice_ptp_reset_phy_timestamping(pf); @@ -3256,20 +3415,16 @@ void ice_ptp_init(struct ice_pf *pf) /* Configure initial Tx interrupt settings */ ice_ptp_cfg_tx_interrupt(pf); - err = ice_ptp_create_auxbus_device(pf); - if (err) - goto err; - ptp->state = ICE_PTP_READY; err = ice_ptp_init_work(pf, ptp); if (err) - goto err; + goto err_exit; dev_info(ice_pf_to_dev(pf), "PTP init successful\n"); return; -err: +err_exit: /* If we registered a PTP clock, release it */ if (pf->ptp.clock) { ptp_clock_unregister(ptp->clock); @@ -3296,10 +3451,12 @@ void ice_ptp_release(struct ice_pf *pf) /* Disable timestamping for both Tx and Rx */ ice_ptp_disable_timestamp_mode(pf); - ice_ptp_remove_auxbus_device(pf); + ice_ptp_cleanup_pf(pf); ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx); + ice_ptp_disable_all_extts(pf); + kthread_cancel_delayed_work_sync(&pf->ptp.work); ice_ptp_port_phy_stop(&pf->ptp.port); @@ -3309,14 +3466,11 @@ void ice_ptp_release(struct ice_pf *pf) pf->ptp.kworker = NULL; } - if (ice_pf_src_tmr_owned(pf)) - ice_ptp_unregister_auxbus_driver(pf); - if (!pf->ptp.clock) return; /* Disable periodic outputs */ - ice_ptp_disable_all_clkout(pf); + ice_ptp_disable_all_perout(pf); ptp_clock_unregister(pf->ptp.clock); pf->ptp.clock = NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index 3af20025043a..3b769a0cad00 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -9,30 +9,6 @@ #include "ice_ptp_hw.h" -enum ice_ptp_pin_e810 { - GPIO_20 = 0, - GPIO_21, - GPIO_22, - GPIO_23, - NUM_PTP_PIN_E810 -}; - -enum ice_ptp_pin_e810t { - GNSS = 0, - SMA1, - UFL1, - SMA2, - UFL2, - NUM_PTP_PINS_E810T -}; - -struct ice_perout_channel { - bool ena; - u32 gpio_pin; - u64 period; - u64 start_time; -}; - /* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp * is stored in a buffer of registers. Depending on the specific hardware, * this buffer might be shared across multiple PHY ports. @@ -152,7 +128,7 @@ struct ice_ptp_tx { /* Quad and port information for initializing timestamp blocks */ #define INDEX_PER_QUAD 64 #define INDEX_PER_PORT_E82X 16 -#define INDEX_PER_PORT_E810 64 +#define INDEX_PER_PORT 64 /** * struct ice_ptp_port - data used to initialize an external port for PTP @@ -161,9 +137,8 @@ struct ice_ptp_tx { * ready for PTP functionality. It is used to track the port initialization * and determine when the port's PHY offset is valid. * - * @list_member: list member structure of auxiliary device + * @list_node: list member structure * @tx: Tx timestamp tracking for this port - * @aux_dev: auxiliary device associated with this port * @ov_work: delayed work task for tracking when PHY offset is valid * @ps_lock: mutex used to protect the overall PTP PHY start procedure * @link_up: indicates whether the link is up @@ -171,9 +146,8 @@ struct ice_ptp_tx { * @port_num: the port number this structure represents */ struct ice_ptp_port { - struct list_head list_member; + struct list_head list_node; struct ice_ptp_tx tx; - struct auxiliary_device aux_dev; struct kthread_delayed_work ov_work; struct mutex ps_lock; /* protects overall PTP PHY start procedure */ bool link_up; @@ -187,22 +161,6 @@ enum ice_ptp_tx_interrupt { ICE_PTP_TX_INTERRUPT_ALL, }; -/** - * struct ice_ptp_port_owner - data used to handle the PTP clock owner info - * - * This structure contains data necessary for the PTP clock owner to correctly - * handle the timestamping feature for all attached ports. - * - * @aux_driver: the structure carring the auxiliary driver information - * @ports: list of porst handled by this port owner - * @lock: protect access to ports list - */ -struct ice_ptp_port_owner { - struct auxiliary_driver aux_driver; - struct list_head ports; - struct mutex lock; -}; - #define GLTSYN_TGT_H_IDX_MAX 4 enum ice_ptp_state { @@ -213,19 +171,71 @@ enum ice_ptp_state { ICE_PTP_ERROR, }; +enum ice_ptp_pin { + SDP0 = 0, + SDP1, + SDP2, + SDP3, + TIME_SYNC, + ONE_PPS +}; + +enum ice_ptp_pin_nvm { + GNSS = 0, + SMA1, + UFL1, + SMA2, + UFL2, + NUM_PTP_PINS_NVM, + GPIO_NA = 9 +}; + +/* Per-channel register definitions */ +#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) +#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8)) +#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8)) +#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16)) +#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16)) +#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16)) +#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16)) +#define GLTSYN_EVNT_H_IDX_MAX 3 + +/* Pin definitions for PTP */ +#define ICE_N_PINS_MAX 6 +#define ICE_SMA_PINS_NUM 4 +#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \ + sizeof(struct ice_ptp_pin_desc)) + +/** + * struct ice_ptp_pin_desc - hardware pin description data + * @name_idx: index of the name of pin in ice_pin_names + * @gpio: the associated GPIO input and output pins + * @delay: input and output signal delays in nanoseconds + * + * Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array + * for the device. Device families have separate sets of available pins with + * varying restrictions. + */ +struct ice_ptp_pin_desc { + int name_idx; + int gpio[2]; + unsigned int delay[2]; +}; + /** * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK * @state: current state of PTP state machine * @tx_interrupt_mode: the TX interrupt mode for the PTP clock * @port: data for the PHY port initialization procedure - * @ports_owner: data for the auxiliary driver owner * @work: delayed work function for periodic tasks * @cached_phc_time: a cached copy of the PHC time for timestamp extension * @cached_phc_jiffies: jiffies when cached_phc_time was last updated - * @ext_ts_chan: the external timestamp channel in use - * @ext_ts_irq: the external timestamp IRQ in use * @kworker: kwork thread for handling periodic work - * @perout_channels: periodic output data + * @ext_ts_irq: the external timestamp IRQ in use + * @pin_desc: structure defining pins + * @ice_pin_desc: internal structure describing pin relations + * @perout_rqs: cached periodic output requests + * @extts_rqs: cached external timestamp requests * @info: structure defining PTP hardware capabilities * @clock: pointer to registered PTP clock device * @tstamp_config: hardware timestamping configuration @@ -241,14 +251,15 @@ struct ice_ptp { enum ice_ptp_state state; enum ice_ptp_tx_interrupt tx_interrupt_mode; struct ice_ptp_port port; - struct ice_ptp_port_owner ports_owner; struct kthread_delayed_work work; u64 cached_phc_time; unsigned long cached_phc_jiffies; - u8 ext_ts_chan; - u8 ext_ts_irq; struct kthread_worker *kworker; - struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX]; + u8 ext_ts_irq; + struct ptp_pin_desc pin_desc[ICE_N_PINS_MAX]; + const struct ice_ptp_pin_desc *ice_pin_desc; + struct ptp_perout_request perout_rqs[GLTSYN_TGT_H_IDX_MAX]; + struct ptp_extts_request extts_rqs[GLTSYN_EVNT_H_IDX_MAX]; struct ptp_clock_info info; struct ptp_clock *clock; struct hwtstamp_config tstamp_config; @@ -279,27 +290,6 @@ struct ice_ptp { #define FIFO_EMPTY BIT(2) #define FIFO_OK 0xFF #define ICE_PTP_FIFO_NUM_CHECKS 5 -/* Per-channel register definitions */ -#define GLTSYN_AUX_OUT(_chan, _idx) (GLTSYN_AUX_OUT_0(_idx) + ((_chan) * 8)) -#define GLTSYN_AUX_IN(_chan, _idx) (GLTSYN_AUX_IN_0(_idx) + ((_chan) * 8)) -#define GLTSYN_CLKO(_chan, _idx) (GLTSYN_CLKO_0(_idx) + ((_chan) * 8)) -#define GLTSYN_TGT_L(_chan, _idx) (GLTSYN_TGT_L_0(_idx) + ((_chan) * 16)) -#define GLTSYN_TGT_H(_chan, _idx) (GLTSYN_TGT_H_0(_idx) + ((_chan) * 16)) -#define GLTSYN_EVNT_L(_chan, _idx) (GLTSYN_EVNT_L_0(_idx) + ((_chan) * 16)) -#define GLTSYN_EVNT_H(_chan, _idx) (GLTSYN_EVNT_H_0(_idx) + ((_chan) * 16)) -#define GLTSYN_EVNT_H_IDX_MAX 3 - -/* Pin definitions for PTP PPS out */ -#define PPS_CLK_GEN_CHAN 3 -#define PPS_CLK_SRC_CHAN 2 -#define PPS_PIN_INDEX 5 -#define TIME_SYNC_PIN_INDEX 4 -#define N_EXT_TS_E810 3 -#define N_PER_OUT_E810 4 -#define N_PER_OUT_E810T 3 -#define N_PER_OUT_NO_SMA_E810T 2 -#define N_EXT_TS_NO_SMA_E810T 2 -#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4)) #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) int ice_ptp_clock_index(struct ice_pf *pf); @@ -313,6 +303,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); +irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf); +u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, + struct ptp_system_timestamp *sts); u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, const struct ice_pkt_ctx *pkt_ctx); @@ -321,7 +314,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type); void ice_ptp_init(struct ice_pf *pf); void ice_ptp_release(struct ice_pf *pf); -void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup); +void ice_ptp_link_change(struct ice_pf *pf, bool linkup); #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr) { @@ -351,6 +344,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf) return true; } +static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) +{ + return IRQ_HANDLED; +} + +static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, + struct ptp_system_timestamp *sts) +{ + return 0; +} + static inline u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc, const struct ice_pkt_ctx *pkt_ctx) @@ -369,7 +373,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf, } static inline void ice_ptp_init(struct ice_pf *pf) { } static inline void ice_ptp_release(struct ice_pf *pf) { } -static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) +static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup) { } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 2c4dab0c48ab..003cdfada3ca 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -9,6 +9,276 @@ */ /* Constants defined for the PTP 1588 clock hardware. */ +const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = { + [ETH56G_PHY_REG_PTP] = { + .base_addr = 0x092000, + .step = 0x98, + }, + [ETH56G_PHY_MEM_PTP] = { + .base_addr = 0x093000, + .step = 0x200, + }, + [ETH56G_PHY_REG_XPCS] = { + .base_addr = 0x000000, + .step = 0x21000, + }, + [ETH56G_PHY_REG_MAC] = { + .base_addr = 0x085000, + .step = 0x1000, + }, + [ETH56G_PHY_REG_GPCS] = { + .base_addr = 0x084000, + .step = 0x400, + }, +}; + +const +struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = { + [ICE_ETH56G_LNK_SPD_1G] = { + .tx_mode = { .def = 6, }, + .rx_mode = { .def = 6, }, + .blks_per_clk = 1, + .blktime = 0x4000, /* 32 */ + .tx_offset = { + .serdes = 0x6666, /* 51.2 */ + .no_fec = 0xd066, /* 104.2 */ + .sfd = 0x3000, /* 24 */ + .onestep = 0x30000 /* 384 */ + }, + .rx_offset = { + .serdes = 0xffffc59a, /* -29.2 */ + .no_fec = 0xffff0a80, /* -122.75 */ + .sfd = 0x2c00, /* 22 */ + .bs_ds = 0x19a /* 0.8 */ + /* Dynamic bitslip 0 equals to 10 */ + } + }, + [ICE_ETH56G_LNK_SPD_2_5G] = { + .tx_mode = { .def = 6, }, + .rx_mode = { .def = 6, }, + .blks_per_clk = 1, + .blktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0x28f6, /* 20.48 */ + .no_fec = 0x53b8, /* 41.86 */ + .sfd = 0x1333, /* 9.6 */ + .onestep = 0x13333 /* 153.6 */ + }, + .rx_offset = { + .serdes = 0xffffe8a4, /* -11.68 */ + .no_fec = 0xffff9a76, /* -50.77 */ + .sfd = 0xf33, /* 7.6 */ + .bs_ds = 0xa4 /* 0.32 */ + } + }, + [ICE_ETH56G_LNK_SPD_10G] = { + .tx_mode = { .def = 1, }, + .rx_mode = { .def = 1, }, + .blks_per_clk = 1, + .blktime = 0x666, /* 3.2 */ + .tx_offset = { + .serdes = 0x234c, /* 17.6484848 */ + .no_fec = 0x8e80, /* 71.25 */ + .fc = 0xb4a4, /* 90.32 */ + .sfd = 0x4a4, /* 2.32 */ + .onestep = 0x4ccd /* 38.4 */ + }, + .rx_offset = { + .serdes = 0xffffeb27, /* -10.42424 */ + .no_fec = 0xffffcccd, /* -25.6 */ + .fc = 0xfffc557b, /* -469.26 */ + .sfd = 0x4a4, /* 2.32 */ + .bs_ds = 0x32 /* 0.0969697 */ + } + }, + [ICE_ETH56G_LNK_SPD_25G] = { + .tx_mode = { + .def = 1, + .rs = 4 + }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { + .def = 1, + .rs = 4 + }, + .rx_mk_dly = { + .def = 1, + .rs = 1 + }, + .rx_cw_dly = { + .def = 1, + .rs = 1 + }, + .blks_per_clk = 1, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0x147b, /* 10.24, only if RS-FEC enabled */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x3857, /* 28.17 */ + .fc = 0x48c3, /* 36.38 */ + .rs = 0x8100, /* 64.5 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0x1eb8 /* 15.36 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xffffe71a, /* -12.45 */ + .fc = 0xfffe894d, /* -187.35 */ + .rs = 0xfffff8cd, /* -3.6 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */ + } + }, + [ICE_ETH56G_LNK_SPD_40G] = { + .tx_mode = { .def = 3 }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { .def = 4 }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x333, /* 1.6 */ + .mktime = 0xccd, /* 6.4 */ + .tx_offset = { + .serdes = 0x234c, /* 17.6484848 */ + .no_fec = 0x5a8a, /* 45.27 */ + .fc = 0x81b8, /* 64.86 */ + .sfd = 0x4a4, /* 2.32 */ + .onestep = 0x1333 /* 9.6 */ + }, + .rx_offset = { + .serdes = 0xffffeb27, /* -10.42424 */ + .no_fec = 0xfffff594, /* -5.21 */ + .fc = 0xfffe3080, /* -231.75 */ + .sfd = 0x4a4, /* 2.32 */ + .bs_ds = 0xccd /* 6.4 */ + } + }, + [ICE_ETH56G_LNK_SPD_50G] = { + .tx_mode = { .def = 5 }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { .def = 5 }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0xa3d, /* 5.12 */ + .tx_offset = { + .serdes = 0x13ba, /* 9.86353 */ + .rs = 0x5400, /* 42 */ + .sfd = 0xe6, /* 0.45 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7e8, /* -4.04706 */ + .rs = 0xfffff994, /* -3.21 */ + .sfd = 0xe6 /* 0.45 */ + } + }, + [ICE_ETH56G_LNK_SPD_50G2] = { + .tx_mode = { + .def = 3, + .rs = 2 + }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { + .def = 4, + .rs = 1 + }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0xa3d, /* 5.12 */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x3d33, /* 30.6 */ + .rs = 0x5057, /* 40.17 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xfffff8cd, /* -3.6 */ + .rs = 0xfffff21a, /* -6.95 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0xa3d /* 5.12, RS-FEC 0x633 (3.1) */ + } + }, + [ICE_ETH56G_LNK_SPD_100G] = { + .tx_mode = { + .def = 3, + .rs = 2 + }, + .tx_mk_dly = 10, + .tx_cw_dly = { + .def = 3, + .onestep = 6 + }, + .rx_mode = { + .def = 4, + .rs = 1 + }, + .rx_mk_dly = { .def = 5 }, + .rx_cw_dly = { .def = 5 }, + .blks_per_clk = 1, + .blktime = 0x148, /* 0.64 */ + .mktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x67ec, /* 51.96 */ + .rs = 0x44fb, /* 34.49 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xfffff5a9, /* -5.17 */ + .rs = 0xfffff6e6, /* -4.55 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0x199a /* 12.8, RS-FEC 0x31b (1.552) */ + } + }, + [ICE_ETH56G_LNK_SPD_100G2] = { + .tx_mode = { .def = 5 }, + .tx_mk_dly = 10, + .tx_cw_dly = { + .def = 3, + .onestep = 6 + }, + .rx_mode = { .def = 5 }, + .rx_mk_dly = { .def = 5 }, + .rx_cw_dly = { .def = 5 }, + .blks_per_clk = 1, + .blktime = 0x148, /* 0.64 */ + .mktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0x13ba, /* 9.86353 */ + .rs = 0x460a, /* 35.02 */ + .sfd = 0xe6, /* 0.45 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7e8, /* -4.04706 */ + .rs = 0xfffff548, /* -5.36 */ + .sfd = 0xe6, /* 0.45 */ + .bs_ds = 0x303 /* 1.506 */ + } + } +}; + /* struct ice_time_ref_info_e82x * * E822 hardware can use different sources as the reference for the PTP @@ -19,15 +289,13 @@ * reference. See the struct ice_time_ref_info_e82x for information about the * meaning of each constant. */ -const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { +const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = { /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ { /* pll_freq */ 823437500, /* 823.4375 MHz PLL */ /* nominal_incval */ 0x136e44fabULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ @@ -36,8 +304,6 @@ const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ @@ -46,8 +312,6 @@ const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { 796875000, /* 796.875 MHz */ /* nominal_incval */ 0x141414141ULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ @@ -56,8 +320,6 @@ const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { 816000000, /* 816 MHz */ /* nominal_incval */ 0x139b9b9baULL, - /* pps_delay */ - 12, }, /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ @@ -66,8 +328,6 @@ const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { 830078125, /* 830.78125 MHz */ /* nominal_incval */ 0x134679aceULL, - /* pps_delay */ - 11, }, /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ @@ -76,8 +336,6 @@ const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ] = { 783360000, /* 783.36 MHz */ /* nominal_incval */ 0x146cc2177ULL, - /* pps_delay */ - 12, }, }; @@ -155,6 +413,93 @@ const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { }, }; +const +struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = { + /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x19, + /* tspll_ndivratio */ + 1, + /* tspll_fbdiv_intgr */ + 320, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x29, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 195, + /* tspll_fbdiv_frac */ + 1342177280UL, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x3E, + /* tspll_ndivratio */ + 2, + /* tspll_fbdiv_intgr */ + 128, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x33, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 156, + /* tspll_fbdiv_frac */ + 1073741824UL, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x1F, + /* tspll_ndivratio */ + 5, + /* tspll_fbdiv_intgr */ + 256, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x52, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 97, + /* tspll_fbdiv_frac */ + 2818572288UL, + /* ref1588_ck_div */ + 0, + }, +}; + /* struct ice_vernier_info_e82x * * E822 hardware calibrates the delay of the timestamp indication from the @@ -359,9 +704,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = { /* rx_desk_rsgb_par */ 644531250, /* 644.53125 MHz Reed Solomon gearbox */ /* tx_desk_rsgb_pcs */ - 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + 390625000, /* 390.625 MHz Reed Solomon gearbox */ /* rx_desk_rsgb_pcs */ - 644531250, /* 644.53125 MHz Reed Solomon gearbox */ + 390625000, /* 390.625 MHz Reed Solomon gearbox */ /* tx_fixed_delay */ 1620, /* pmd_adj_divisor */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 187ce9b54e1a..ccac84eb34c9 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021, Intel Corporation. */ #include <linux/delay.h> +#include <linux/iopoll.h> #include "ice_common.h" #include "ice_ptp_hw.h" #include "ice_ptp_consts.h" @@ -33,7 +34,6 @@ static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = { ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, - { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, }, }; static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = { @@ -51,7 +51,6 @@ static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = { ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common }, { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS, ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz }, - { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, }, }; static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = { @@ -227,40 +226,660 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw) } /** - * ice_ptp_src_cmd - Prepare source timer for a timer command - * @hw: pointer to HW structure + * ice_read_cgu_reg_e82x - Read a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to read + * @val: storage for register value read + * + * Read the contents of a register of the Clock Generation Unit. Only + * applicable to E822 devices. + * + * Return: 0 on success, other error codes when failed to read from CGU + */ +static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) +{ + struct ice_sbq_msg_input cgu_msg = { + .opcode = ice_sbq_msg_rd, + .dest_dev = ice_sbq_dev_cgu, + .msg_addr_low = addr + }; + int err; + + err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + *val = cgu_msg.data; + + return 0; +} + +/** + * ice_write_cgu_reg_e82x - Write a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to write + * @val: value to write into the register + * + * Write the specified value to a register of the Clock Generation Unit. Only + * applicable to E822 devices. + * + * Return: 0 on success, other error codes when failed to write to CGU + */ +static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) +{ + struct ice_sbq_msg_input cgu_msg = { + .opcode = ice_sbq_msg_wr, + .dest_dev = ice_sbq_dev_cgu, + .msg_addr_low = addr, + .data = val + }; + int err; + + err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + return err; +} + +/** + * ice_clk_freq_str - Convert time_ref_freq to string + * @clk_freq: Clock frequency + * + * Return: specified TIME_REF clock frequency converted to a string + */ +static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq) +{ + switch (clk_freq) { + case ICE_TIME_REF_FREQ_25_000: + return "25 MHz"; + case ICE_TIME_REF_FREQ_122_880: + return "122.88 MHz"; + case ICE_TIME_REF_FREQ_125_000: + return "125 MHz"; + case ICE_TIME_REF_FREQ_153_600: + return "153.6 MHz"; + case ICE_TIME_REF_FREQ_156_250: + return "156.25 MHz"; + case ICE_TIME_REF_FREQ_245_760: + return "245.76 MHz"; + default: + return "Unknown"; + } +} + +/** + * ice_clk_src_str - Convert time_ref_src to string + * @clk_src: Clock source + * + * Return: specified clock source converted to its string name + */ +static const char *ice_clk_src_str(enum ice_clk_src clk_src) +{ + switch (clk_src) { + case ICE_CLK_SRC_TCXO: + return "TCXO"; + case ICE_CLK_SRC_TIME_REF: + return "TIME_REF"; + default: + return "Unknown"; + } +} + +/** + * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCXO) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + * + * Return: + * * %0 - success + * * %-EINVAL - input parameters are incorrect + * * %-EBUSY - failed to lock TS PLL + * * %other - CGU read/write failure + */ +static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw, + enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_bwm_lf bwm_lf; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int err; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCXO && + clk_freq != ICE_TIME_REF_FREQ_25_000) { + dev_warn(ice_hw_to_dev(hw), + "TCXO only supports 25 MHz frequency\n"); + return -EINVAL; + } + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + str_enabled_disabled(dw24.ts_pll_enable), + ice_clk_src_str(dw24.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw24.ts_pll_enable) { + dw24.ts_pll_enable = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + } + + /* Set the frequency */ + dw9.time_ref_freq_sel = clk_freq; + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); + if (err) + return err; + + /* Configure the TS PLL feedback divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); + if (err) + return err; + + dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; + dw19.tspll_ndivratio = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); + if (err) + return err; + + /* Configure the TS PLL post divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); + if (err) + return err; + + dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; + dw22.time1588clk_sel_div2 = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); + if (err) + return err; + + /* Configure the TS PLL pre divisor and clock source */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; + dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; + dw24.time_ref_sel = clk_src; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Finally, enable the PLL */ + dw24.ts_pll_enable = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Wait to verify if the PLL locks */ + usleep_range(1000, 5000); + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + if (!bwm_lf.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + str_enabled_disabled(dw24.ts_pll_enable), + ice_clk_src_str(dw24.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +/** + * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCXO) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + * + * Return: + * * %0 - success + * * %-EINVAL - input parameters are incorrect + * * %-EBUSY - failed to lock TS PLL + * * %other - CGU read/write failure + */ +static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, + enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_lock_e825c ro_lock; + union nac_cgu_dword16_e825c dw16; + union nac_cgu_dword23_e825c dw23; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int err; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCXO && + clk_freq != ICE_TIME_REF_FREQ_156_250) { + dev_warn(ice_hw_to_dev(hw), + "TCXO only supports 156.25 MHz frequency\n"); + return -EINVAL; + } + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val); + if (err) + return err; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + str_enabled_disabled(dw24.ts_pll_enable), + ice_clk_src_str(dw23.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw23.ts_pll_enable) { + dw23.ts_pll_enable = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, + dw23.val); + if (err) + return err; + } + + /* Set the frequency */ + dw9.time_ref_freq_sel = clk_freq; + + /* Enable the correct receiver */ + if (clk_src == ICE_CLK_SRC_TCXO) { + dw9.time_ref_en = 0; + dw9.clk_eref0_en = 1; + } else { + dw9.time_ref_en = 1; + dw9.clk_eref0_en = 0; + } + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); + if (err) + return err; + + /* Choose the referenced frequency */ + dw16.tspll_ck_refclkfreq = + e825c_cgu_params[clk_freq].tspll_ck_refclkfreq; + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val); + if (err) + return err; + + /* Configure the TS PLL feedback divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); + if (err) + return err; + + dw19.tspll_fbdiv_intgr = + e825c_cgu_params[clk_freq].tspll_fbdiv_intgr; + dw19.tspll_ndivratio = + e825c_cgu_params[clk_freq].tspll_ndivratio; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); + if (err) + return err; + + /* Configure the TS PLL post divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); + if (err) + return err; + + /* These two are constant for E825C */ + dw22.time1588clk_div = 5; + dw22.time1588clk_sel_div2 = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); + if (err) + return err; + + /* Configure the TS PLL pre divisor and clock source */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val); + if (err) + return err; + + dw23.ref1588_ck_div = + e825c_cgu_params[clk_freq].ref1588_ck_div; + dw23.time_ref_sel = clk_src; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val); + if (err) + return err; + + dw24.tspll_fbdiv_frac = + e825c_cgu_params[clk_freq].tspll_fbdiv_frac; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Finally, enable the PLL */ + dw23.ts_pll_enable = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val); + if (err) + return err; + + /* Wait to verify if the PLL locks */ + usleep_range(1000, 5000); + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val); + if (err) + return err; + + if (!ro_lock.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + str_enabled_disabled(dw24.ts_pll_enable), + ice_clk_src_str(dw23.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +#define ICE_ONE_PPS_OUT_AMP_MAX 3 + +/** + * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU + * @hw: pointer to the HW struct + * @enable: true to enable 1PPS output, false to disable it + * + * Return: 0 on success, other negative error code when CGU read/write failed + */ +int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable) +{ + union nac_cgu_dword9 dw9; + int err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + dw9.one_pps_out_en = enable; + dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX; + return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); +} + +/** + * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits + * @hw: pointer to the HW struct + * + * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on + * losing TS PLL lock, but always show current state. + * + * Return: 0 on success, other error codes when failed to read/write CGU + */ +static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw) +{ + union tspll_cntr_bist_settings cntr_bist; + int err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, + &cntr_bist.val); + if (err) + return err; + + /* Disable sticky lock detection so lock err reported is accurate */ + cntr_bist.i_plllock_sel_0 = 0; + cntr_bist.i_plllock_sel_1 = 0; + + return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, + cntr_bist.val); +} + +/** + * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C + * @hw: pointer to the HW struct + * + * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on + * losing TS PLL lock, but always show current state. + * + * Return: 0 on success, other error codes when failed to read/write CGU + */ +static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw) +{ + union tspll_bw_tdc_e825c bw_tdc; + int err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val); + if (err) + return err; + + bw_tdc.i_plllock_sel_1_0 = 0; + + return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val); +} + +/** + * ice_init_cgu_e82x - Initialize CGU with settings from firmware + * @hw: pointer to the HW structure + * + * Initialize the Clock Generation Unit of the E822 device. + * + * Return: 0 on success, other error codes when failed to read/write/cfg CGU + */ +static int ice_init_cgu_e82x(struct ice_hw *hw) +{ + struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; + int err; + + /* Disable sticky lock detection so lock err reported is accurate */ + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) + err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw); + else + err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw); + if (err) + return err; + + /* Configure the CGU PLL using the parameters from the function + * capabilities. + */ + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) + err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + else + err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + + return err; +} + +/** + * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value + * @hw: pointer to HW struct * @cmd: Timer command * - * Prepare the source timer for an upcoming timer sync command. + * Return: the source timer command register value for the given PTP timer + * command. */ -void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw, + enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val; - u8 tmr_idx; + u32 cmd_val, tmr_idx; + + switch (cmd) { + case ICE_PTP_INIT_TIME: + cmd_val = GLTSYN_CMD_INIT_TIME; + break; + case ICE_PTP_INIT_INCVAL: + cmd_val = GLTSYN_CMD_INIT_INCVAL; + break; + case ICE_PTP_ADJ_TIME: + cmd_val = GLTSYN_CMD_ADJ_TIME; + break; + case ICE_PTP_ADJ_TIME_AT_TIME: + cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; + break; + case ICE_PTP_NOP: + case ICE_PTP_READ_TIME: + cmd_val = GLTSYN_CMD_READ_TIME; + break; + default: + dev_warn(ice_hw_to_dev(hw), + "Ignoring unrecognized timer command %u\n", cmd); + cmd_val = 0; + } tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_CPK_SRC; + + return tmr_idx << SEL_CPK_SRC | cmd_val; +} + +/** + * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value + * @hw: pointer to HW struct + * @cmd: Timer command + * + * Note that some hardware families use a different command register value for + * the PHY ports, while other hardware families use the same register values + * as the source timer. + * + * Return: the PHY port timer command register value for the given PTP timer + * command. + */ +static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, + enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val, tmr_idx; + + /* Certain hardware families share the same register values for the + * port register and source timer register. + */ + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810; + default: + break; + } switch (cmd) { case ICE_PTP_INIT_TIME: - cmd_val |= GLTSYN_CMD_INIT_TIME; + cmd_val = PHY_CMD_INIT_TIME; break; case ICE_PTP_INIT_INCVAL: - cmd_val |= GLTSYN_CMD_INIT_INCVAL; + cmd_val = PHY_CMD_INIT_INCVAL; break; case ICE_PTP_ADJ_TIME: - cmd_val |= GLTSYN_CMD_ADJ_TIME; + cmd_val = PHY_CMD_ADJ_TIME; break; case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; + cmd_val = PHY_CMD_ADJ_TIME_AT_TIME; break; case ICE_PTP_READ_TIME: - cmd_val |= GLTSYN_CMD_READ_TIME; + cmd_val = PHY_CMD_READ_TIME; break; case ICE_PTP_NOP: + cmd_val = 0; break; + default: + dev_warn(ice_hw_to_dev(hw), + "Ignoring unrecognized timer command %u\n", cmd); + cmd_val = 0; } + tmr_idx = ice_get_ptp_src_clock_index(hw); + + return tmr_idx << SEL_PHY_SRC | cmd_val; +} + +/** + * ice_ptp_src_cmd - Prepare source timer for a timer command + * @hw: pointer to HW structure + * @cmd: Timer command + * + * Prepare the source timer for an upcoming timer sync command. + */ +void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd); + + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + wr32(hw, GLTSYN_CMD, cmd_val); } @@ -274,10 +893,1868 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) */ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) { + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + + if (!ice_is_primary(hw)) + hw = ice_get_primary_hw(pf); + + guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock); wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD); ice_flush(hw); } +/** + * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay + * @hw: pointer to HW struct + * @delay: delay between PHC and PHY SYNC command execution in nanoseconds + */ +static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay) +{ + wr32(hw, GLTSYN_SYNC_DLAY, delay); + ice_flush(hw); +} + +/* 56G PHY device functions + * + * The following functions operate on devices with the ETH 56G PHY. + */ + +/** + * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number + * @hw: pointer to the HW struct + * @port: destination port + * + * Return: destination sideband queue PHY device. + */ +static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw, + u8 port) +{ + u8 curr_phy, tgt_phy; + + tgt_phy = port >= hw->ptp.ports_per_phy; + curr_phy = hw->lane_num >= hw->ptp.ports_per_phy; + /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY. + * On a single complex E825C, PHY 0 is always destination device phy_0 + * and PHY 1 is phy_0_peer. + * On dual complex E825C, device phy_0 points to PHY on a current + * complex and phy_0_peer to PHY on a different complex. + */ + if ((!ice_is_dual(hw) && tgt_phy == 1) || + (ice_is_dual(hw) && tgt_phy != curr_phy)) + return ice_sbq_dev_phy_0_peer; + else + return ice_sbq_dev_phy_0; +} + +/** + * ice_write_phy_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @port: destination port + * @addr: PHY register address + * @val: Value to write + * + * Return: 0 on success, other error codes when failed to write to PHY + */ +static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val) +{ + struct ice_sbq_msg_input msg = { + .dest_dev = ice_ptp_get_dest_dev_e825(hw, port), + .opcode = ice_sbq_msg_wr, + .msg_addr_low = lower_16_bits(addr), + .msg_addr_high = upper_16_bits(addr), + .data = val + }; + int err; + + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); + if (err) + ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", + err); + + return err; +} + +/** + * ice_read_phy_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: destination port + * @addr: PHY register address + * @val: Value to write + * + * Return: 0 on success, other error codes when failed to read from PHY + */ +static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val) +{ + struct ice_sbq_msg_input msg = { + .dest_dev = ice_ptp_get_dest_dev_e825(hw, port), + .opcode = ice_sbq_msg_rd, + .msg_addr_low = lower_16_bits(addr), + .msg_addr_high = upper_16_bits(addr) + }; + int err; + + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); + if (err) + ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", + err); + else + *val = msg.data; + + return err; +} + +/** + * ice_phy_res_address_eth56g - Calculate a PHY port register address + * @hw: pointer to the HW struct + * @lane: Lane number to be written + * @res_type: resource type (register/memory) + * @offset: Offset from PHY port register base + * @addr: The result address + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + */ +static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane, + enum eth56g_res_type res_type, + u32 offset, + u32 *addr) +{ + if (res_type >= NUM_ETH56G_PHY_RES) + return -EINVAL; + + /* Lanes 4..7 are in fact 0..3 on a second PHY */ + lane %= hw->ptp.ports_per_phy; + *addr = eth56g_phy_res[res_type].base_addr + + lane * eth56g_phy_res[res_type].step + offset; + + return 0; +} + +/** + * ice_write_port_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * @res_type: resource type (register/memory) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val, enum eth56g_res_type res_type) +{ + u32 addr; + int err; + + if (port >= hw->ptp.num_lports) + return -EINVAL; + + err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr); + if (err) + return err; + + return ice_write_phy_eth56g(hw, port, addr, val); +} + +/** + * ice_read_port_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * @res_type: resource type (register/memory) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 *val, enum eth56g_res_type res_type) +{ + u32 addr; + int err; + + if (port >= hw->ptp.num_lports) + return -EINVAL; + + err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr); + if (err) + return err; + + return ice_read_phy_eth56g(hw, port, addr, val); +} + +/** + * ice_write_ptp_reg_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_mac_reg_eth56g - Write a MAC PHY port register + * parameter + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC); +} + +/** + * ice_write_xpcs_reg_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, + ETH56G_PHY_REG_XPCS); +} + +/** + * ice_read_ptp_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP); +} + +/** + * ice_read_mac_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC); +} + +/** + * ice_read_gpcs_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS); +} + +/** + * ice_read_port_mem_eth56g - Read a PHY port memory location + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP); +} + +/** + * ice_write_port_mem_eth56g - Write a PHY port memory location + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP); +} + +/** + * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * + * Return: + * * %0 - success + * * %EIO - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u32 offset, u32 val) +{ + u32 addr; + + if (port >= hw->ptp.num_lports) + return -EIO; + + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset; + + return ice_write_phy_eth56g(hw, port, addr, val); +} + +/** + * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to read + * + * Return: + * * %0 - success + * * %EIO - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u32 offset, u32 *val) +{ + u32 addr; + + if (port >= hw->ptp.num_lports) + return -EIO; + + addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset; + + return ice_read_phy_eth56g(hw, port, addr, val); +} + +/** + * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 64bit register + * + * Write the appropriate high register offset to use. + * + * Return: true if the provided low address is one of the known 64bit PHY values + * represented as two 32bit registers, false otherwise. + */ +static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case PHY_REG_TX_TIMER_INC_PRE_L: + *high_addr = PHY_REG_TX_TIMER_INC_PRE_U; + return true; + case PHY_REG_RX_TIMER_INC_PRE_L: + *high_addr = PHY_REG_RX_TIMER_INC_PRE_U; + return true; + case PHY_REG_TX_CAPTURE_L: + *high_addr = PHY_REG_TX_CAPTURE_U; + return true; + case PHY_REG_RX_CAPTURE_L: + *high_addr = PHY_REG_RX_CAPTURE_U; + return true; + case PHY_REG_TOTAL_TX_OFFSET_L: + *high_addr = PHY_REG_TOTAL_TX_OFFSET_U; + return true; + case PHY_REG_TOTAL_RX_OFFSET_L: + *high_addr = PHY_REG_TOTAL_RX_OFFSET_U; + return true; + case PHY_REG_TX_MEMORY_STATUS_L: + *high_addr = PHY_REG_TX_MEMORY_STATUS_U; + return true; + default: + return false; + } +} + +/** + * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 40bit value + * + * Write the appropriate high register offset to use. + * + * Return: true if the provided low address is one of the known 40bit PHY + * values split into two registers with the lower 8 bits in the low register and + * the upper 32 bits in the high register, false otherwise. + */ +static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case PHY_REG_TIMETUS_L: + *high_addr = PHY_REG_TIMETUS_U; + return true; + case PHY_PCS_REF_TUS_L: + *high_addr = PHY_PCS_REF_TUS_U; + return true; + case PHY_PCS_REF_INC_L: + *high_addr = PHY_PCS_REF_INC_U; + return true; + default: + return false; + } +} + +/** + * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * @res_type: resource type + * + * Check if the caller has specified a known 40 bit register offset and read + * the two registers associated with a 40bit value and return it in the val + * pointer. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to read from PHY + */ +static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, + u64 *val, enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d", + high_addr, err); + return err; + } + + *val = ((u64)hi << 32) | lo; + + return 0; +} + +/** + * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * + * Check if the caller has specified a known 40 bit register offset and read + * the two registers associated with a 40bit value and return it in the val + * pointer. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to read from PHY + */ +static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, + u64 *val) +{ + return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * @res_type: resource type + * + * Check if the caller has specified a known 40 bit register offset and write + * provided 40b value to the two associated registers by splitting it up into + * two chunks, the lower 8 bits and the upper 32 bits. + * + * Return: + * * %0 - success + * * %EINVAL - not a 40 bit register + * * %other - failed to write to PHY + */ +static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val, + enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + lo = FIELD_GET(P_REG_40B_LOW_M, val); + hi = (u32)(val >> P_REG_40B_HIGH_S); + + err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * + * Check if the caller has specified a known 40 bit register offset and write + * provided 40b value to the two associated registers by splitting it up into + * two chunks, the lower 8 bits and the upper 32 bits. + * + * Return: + * * %0 - success + * * %EINVAL - not a 40 bit register + * * %other - failed to write to PHY + */ +static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val) +{ + return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * @res_type: resource type + * + * Check if the caller has specified a known 64 bit register offset and write + * the 64bit value to the two associated 32bit PHY registers. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to write to PHY + */ +static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val, + enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + lo = lower_32_bits(val); + hi = upper_32_bits(val); + + err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * + * Check if the caller has specified a known 64 bit register offset and write + * the 64bit value to the two associated 32bit PHY registers. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to write to PHY + */ +static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val) +{ + return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory + * @hw: pointer to the HW struct + * @port: the port to read from + * @idx: the timestamp index to read + * @tstamp: on return, the 40bit timestamp value + * + * Read a 40bit timestamp value out of the two associated entries in the + * port memory block of the internal PHYs of the 56G devices. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx, + u64 *tstamp) +{ + u16 lo_addr, hi_addr; + u32 lo, hi; + int err; + + lo_addr = (u16)PHY_TSTAMP_L(idx); + hi_addr = (u16)PHY_TSTAMP_U(idx); + + err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", + err); + return err; + } + + err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", + err); + return err; + } + + /* For 56G based internal PHYs, the timestamp is reported with the + * lower 8 bits in the low register, and the upper 32 bits in the high + * register. + */ + *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) | + FIELD_PREP(PHY_40B_LOW_M, lo); + return 0; +} + +/** + * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block + * @hw: pointer to the HW struct + * @port: the quad to read from + * @idx: the timestamp index to reset + * + * Read and then forcibly clear the timestamp index to ensure the valid bit is + * cleared and the timestamp status bit is reset in the PHY port memory of + * internal PHYs of the 56G devices. + * + * To directly clear the contents of the timestamp block entirely, discarding + * all timestamp data at once, software should instead use + * ice_ptp_reset_ts_memory_quad_eth56g(). + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready(). + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx) +{ + u64 unused_tstamp; + u16 lo_addr; + int err; + + /* Read the timestamp register to ensure the timestamp status bit is + * cleared. + */ + err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n", + port, idx, err); + } + + lo_addr = (u16)PHY_TSTAMP_L(idx); + + err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n", + port, idx, err); + return err; + } + + return 0; +} + +/** + * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block + * @hw: pointer to the HW struct + */ +static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw) +{ + unsigned int port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L, + 0); + ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U, + 0); + } +} + +/** + * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time + * @hw: pointer to the HW struct + * @port: port number + * @time: time to initialize the PHY port clocks to + * + * Write a new initial time value into registers of a specific PHY port. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port, + u64 time) +{ + int err; + + /* Tx case */ + err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L, + time); + if (err) + return err; + + /* Rx case */ + return ice_write_64b_ptp_reg_eth56g(hw, port, + PHY_REG_RX_TIMER_INC_PRE_L, time); +} + +/** + * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time + * @hw: pointer to the HW struct + * @time: Time to initialize the PHY port clocks to + * + * Program the PHY port registers with a new initial time value. The port + * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync + * command. The time value is the upper 32 bits of the PHY timer, usually in + * units of nominal nanoseconds. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time) +{ + u64 phy_time; + u8 port; + + /* The time represents the upper 32 bits of the PHY timer, so we need + * to shift to account for this when programming. + */ + phy_time = (u64)time << 32; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n", + port, err); + return err; + } + } + + return 0; +} + +/** + * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust + * @hw: pointer to HW struct + * @port: Port number to be programmed + * @time: time in cycles to adjust the port clocks + * + * Program the port for an atomic adjustment by writing the Tx and Rx timer + * registers. The atomic adjustment won't be completed until the driver issues + * an ICE_PTP_ADJ_TIME command. + * + * Note that time is not in units of nanoseconds. It is in clock time + * including the lower sub-nanosecond portion of the port timer. + * + * Negative adjustments are supported using 2s complement arithmetic. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time) +{ + u32 l_time, u_time; + int err; + + l_time = lower_32_bits(time); + u_time = upper_32_bits(time); + + /* Tx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + /* Rx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + return 0; + +exit_err: + ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n", + port, err); + return err; +} + +/** + * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment + * @hw: pointer to HW struct + * @adj: adjustment in nanoseconds + * + * Prepare the PHY ports for an atomic time adjustment by programming the PHY + * Tx and Rx port registers. The actual adjustment is completed by issuing an + * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj) +{ + s64 cycles; + u8 port; + + /* The port clock supports adjustment of the sub-nanosecond portion of + * the clock (lowest 32 bits). We shift the provided adjustment in + * nanoseconds by 32 to calculate the appropriate adjustment to program + * into the PHY ports. + */ + cycles = (s64)adj << 32; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles); + if (err) + return err; + } + + return 0; +} + +/** + * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment + * @hw: pointer to HW struct + * @incval: new increment value to prepare + * + * Prepare each of the PHY ports for a new increment value by programming the + * port's TIMETUS registers. The new increment value will be updated after + * issuing an ICE_PTP_INIT_INCVAL command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval) +{ + u8 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, + incval); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n", + port, err); + return err; + } + } + + return 0; +} + +/** + * ice_ptp_read_port_capture_eth56g - Read a port's local time capture + * @hw: pointer to HW struct + * @port: Port number to read + * @tx_ts: on return, the Tx port time capture + * @rx_ts: on return, the Rx port time capture + * + * Read the port's Tx and Rx local time capture values. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port, + u64 *tx_ts, u64 *rx_ts) +{ + int err; + + /* Tx case */ + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L, + tx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts); + + /* Rx case */ + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L, + rx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts); + + return 0; +} + +/** + * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command + * @hw: pointer to HW struct + * @port: Port to which cmd has to be sent + * @cmd: Command to be sent to the port + * + * Prepare the requested port for an upcoming timer sync command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) +{ + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); + int err; + + /* Tx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", + err); + return err; + } + + /* Rx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_phy_get_speed_eth56g - Get link speed based on PHY link type + * @li: pointer to link information struct + * + * Return: simplified ETH56G PHY speed + */ +static enum ice_eth56g_link_spd +ice_phy_get_speed_eth56g(struct ice_link_status *li) +{ + u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low, + li->phy_type_high); + + switch (speed) { + case ICE_AQ_LINK_SPEED_1000MB: + return ICE_ETH56G_LNK_SPD_1G; + case ICE_AQ_LINK_SPEED_2500MB: + return ICE_ETH56G_LNK_SPD_2_5G; + case ICE_AQ_LINK_SPEED_10GB: + return ICE_ETH56G_LNK_SPD_10G; + case ICE_AQ_LINK_SPEED_25GB: + return ICE_ETH56G_LNK_SPD_25G; + case ICE_AQ_LINK_SPEED_40GB: + return ICE_ETH56G_LNK_SPD_40G; + case ICE_AQ_LINK_SPEED_50GB: + switch (li->phy_type_low) { + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1: + return ICE_ETH56G_LNK_SPD_50G; + default: + return ICE_ETH56G_LNK_SPD_50G2; + } + case ICE_AQ_LINK_SPEED_100GB: + if (li->phy_type_high || + li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2) + return ICE_ETH56G_LNK_SPD_100G2; + else + return ICE_ETH56G_LNK_SPD_100G; + default: + return ICE_ETH56G_LNK_SPD_1G; + } +} + +/** + * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle + * @hw: pointer to the HW struct + * @port: port to configure + * + * Configure the number of TUs for the PAR and PCS clocks used as part of the + * timestamp calibration process. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) +{ + u32 val; + int err; + + err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH, + ICE_ETH56G_NOMINAL_THRESH4); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d", + err); + return err; + } + + switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { + case ICE_ETH56G_LNK_SPD_1G: + case ICE_ETH56G_LNK_SPD_2_5G: + err = ice_read_quad_ptp_reg_eth56g(hw, port, + PHY_GPCS_CONFIG_REG0, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d", + err); + return err; + } + + val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M; + val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M, + ICE_ETH56G_NOMINAL_TX_THRESH); + + err = ice_write_quad_ptp_reg_eth56g(hw, port, + PHY_GPCS_CONFIG_REG0, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d", + err); + return err; + } + break; + default: + break; + } + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L, + ICE_ETH56G_NOMINAL_PCS_REF_TUS); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d", + err); + return err; + } + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L, + ICE_ETH56G_NOMINAL_PCS_REF_INC); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d", + err); + return err; + } + + return 0; +} + +/** + * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings + * @hw: Pointer to the HW struct + * @port: Port to configure + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port) +{ + u8 quad_lane = port % ICE_PORTS_PER_QUAD; + u32 addr, val, peer_delay; + bool enable, sfd_ena; + int err; + + enable = hw->ptp.phy.eth56g.onestep_ena; + peer_delay = hw->ptp.phy.eth56g.peer_delay; + sfd_ena = hw->ptp.phy.eth56g.sfd_ena; + + addr = PHY_PTP_1STEP_CONFIG; + err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val); + if (err) + return err; + + if (enable) + val |= BIT(quad_lane); + else + val &= ~BIT(quad_lane); + + val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M); + + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); + if (err) + return err; + + addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane); + val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay); + if (peer_delay) + val |= PHY_PTP_1STEP_PD_ADD_PD_M; + val |= PHY_PTP_1STEP_PD_DLY_V_M; + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); + if (err) + return err; + + val &= ~PHY_PTP_1STEP_PD_DLY_V_M; + err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val); + if (err) + return err; + + addr = PHY_MAC_XIF_MODE; + err = ice_read_mac_reg_eth56g(hw, port, addr, &val); + if (err) + return err; + + val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M | + PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M); + + switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { + case ICE_ETH56G_LNK_SPD_1G: + case ICE_ETH56G_LNK_SPD_2_5G: + val |= PHY_MAC_XIF_GMII_TS_SEL_M; + break; + default: + break; + } + + val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) | + FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) | + FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena); + + return ice_write_mac_reg_eth56g(hw, port, addr, val); +} + +/** + * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values + * @a: multiplier value + * @b: multiplicand value + * + * Return: result of multiplication + */ +static u32 mul_u32_u32_fx_q9(u32 a, u32 b) +{ + return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W); +} + +/** + * add_u32_u32_fx - Add two u32 fixed point values and discard overflow + * @a: first value + * @b: second value + * + * Return: result of addition + */ +static u32 add_u32_u32_fx(u32 a, u32 b) +{ + return lower_32_bits(((u64)a + b)); +} + +/** + * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value + * @hw: pointer to the HW struct + * @port: port to configure + * @bs: bitslip multiplier + * @fc: FC-FEC enabled + * @rs: RS-FEC enabled + * @spd: link speed + * + * Return: calculated bitslip value + */ +static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs, + bool fc, bool rs, + enum ice_eth56g_link_spd spd) +{ + u32 bitslip; + int err; + + if (!bs || rs) + return 0; + + if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) { + err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP, + &bitslip); + } else { + u8 quad_lane = port % ICE_PORTS_PER_QUAD; + u32 addr; + + addr = PHY_REG_SD_BIT_SLIP(quad_lane); + err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip); + } + if (err) + return 0; + + if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) { + /* Bitslip register value of 0 corresponds to 10 so substitute + * it for calculations + */ + bitslip = 10; + } else if (spd == ICE_ETH56G_LNK_SPD_10G || + spd == ICE_ETH56G_LNK_SPD_25G) { + if (fc) + bitslip = bitslip * 2 + 32; + else + bitslip = (u32)((s32)bitslip * -1 + 20); + } + + bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W; + return mul_u32_u32_fx_q9(bitslip, bs); +} + +/** + * ice_ptp_calc_deskew_eth56g - Calculate deskew value + * @hw: pointer to the HW struct + * @port: port to configure + * @ds: deskew multiplier + * @rs: RS-FEC enabled + * @spd: link speed + * + * Return: calculated deskew value + */ +static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds, + bool rs, enum ice_eth56g_link_spd spd) +{ + u32 deskew_i, deskew_f; + int err; + + if (!ds) + return 0; + + read_poll_timeout(ice_read_ptp_reg_eth56g, err, + FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500, + 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0, + &deskew_i); + if (err) + return err; + + deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i); + deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i); + + if (rs && spd == ICE_ETH56G_LNK_SPD_50G2) + ds = 0x633; /* 3.1 */ + else if (rs && spd == ICE_ETH56G_LNK_SPD_100G) + ds = 0x31b; /* 1.552 */ + + deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i); + /* Shift 3 fractional bits to the end of the integer part */ + deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W; + return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds); +} + +/** + * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values + * @hw: pointer to the HW struct + * @port: port to configure + * @spd: link speed + * @cfg: structure to store output values + * @fc: FC-FEC enabled + * @rs: RS-FEC enabled + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port, + enum ice_eth56g_link_spd spd, + const struct ice_eth56g_mac_reg_cfg *cfg, + bool fc, bool rs) +{ + u32 rx_offset, tx_offset, bs_ds; + bool onestep, sfd; + + onestep = hw->ptp.phy.eth56g.onestep_ena; + sfd = hw->ptp.phy.eth56g.sfd_ena; + bs_ds = cfg->rx_offset.bs_ds; + + if (fc) + rx_offset = cfg->rx_offset.fc; + else if (rs) + rx_offset = cfg->rx_offset.rs; + else + rx_offset = cfg->rx_offset.no_fec; + + rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes); + if (sfd) + rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd); + + if (spd < ICE_ETH56G_LNK_SPD_40G) + bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs, + spd); + else + bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd); + rx_offset = add_u32_u32_fx(rx_offset, bs_ds); + rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT | + ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC; + + if (fc) + tx_offset = cfg->tx_offset.fc; + else if (rs) + tx_offset = cfg->tx_offset.rs; + else + tx_offset = cfg->tx_offset.no_fec; + tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd + + cfg->tx_offset.onestep * onestep; + + ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset); + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset); +} + +/** + * ice_phy_cfg_mac_eth56g - Configure MAC for PTP + * @hw: Pointer to the HW struct + * @port: Port to configure + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port) +{ + const struct ice_eth56g_mac_reg_cfg *cfg; + enum ice_eth56g_link_spd spd; + struct ice_link_status *li; + bool fc = false; + bool rs = false; + bool onestep; + u32 val; + int err; + + onestep = hw->ptp.phy.eth56g.onestep_ena; + li = &hw->port_info->phy.link_info; + spd = ice_phy_get_speed_eth56g(li); + if (!!(li->an_info & ICE_AQ_FEC_EN)) { + if (spd == ICE_ETH56G_LNK_SPD_10G) { + fc = true; + } else { + fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN); + rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN); + } + } + cfg = ð56g_mac_cfg[spd]; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0); + if (err) + return err; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0); + if (err) + return err; + + val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M, + cfg->tx_mode.def + rs * cfg->tx_mode.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) | + FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M, + cfg->tx_cw_dly.def + + onestep * cfg->tx_cw_dly.onestep) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M, + cfg->rx_mode.def + rs * cfg->rx_mode.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M, + cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M, + cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk); + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val); + if (err) + return err; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME, + cfg->blktime); + if (err) + return err; + + err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs); + if (err) + return err; + + if (spd == ICE_ETH56G_LNK_SPD_25G && !rs) + val = 0; + else + val = cfg->mktime; + + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val); +} + +/** + * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt + * @hw: pointer to the HW struct + * @port: the timestamp port + * @ena: enable or disable interrupt + * @threshold: interrupt threshold + * + * Configure TX timestamp interrupt for the specified port + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold) +{ + int err; + u32 val; + + err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val); + if (err) + return err; + + if (ena) { + val |= PHY_TS_INT_CONFIG_ENA_M; + val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M; + val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold); + } else { + val &= ~PHY_TS_INT_CONFIG_ENA_M; + } + + return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val); +} + +/** + * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @phy_time: on return, the 64bit PHY timer value + * @phc_time: on return, the lower 64bits of PHC time + * + * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY + * and PHC timer values. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, + u64 *phy_time, u64 *phc_time) +{ + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + u64 tx_time, rx_time; + u32 zo, lo; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + + /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */ + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME); + if (err) + return err; + + /* Issue the sync to start the ICE_PTP_READ_TIME capture */ + ice_ptp_exec_tmr_cmd(hw); + + /* Read the captured PHC time from the shadow time registers */ + if (ice_is_primary(hw)) { + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + } else { + zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx)); + } + *phc_time = (u64)lo << 32 | zo; + + /* Read the captured PHY time from the PHY shadow registers */ + err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time); + if (err) + return err; + + /* If the PHY Tx and Rx timers don't match, log a warning message. + * Note that this should not happen in normal circumstances since the + * driver always programs them together. + */ + if (tx_time != rx_time) + dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n", + port, tx_time, rx_time); + + *phy_time = tx_time; + + return 0; +} + +/** + * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer + * @hw: pointer to the HW struct + * @port: the PHY port to synchronize + * + * Perform an adjustment to ensure that the PHY and PHC timers are in sync. + * This is done by issuing a ICE_PTP_READ_TIME command which triggers a + * simultaneous read of the PHY timer and PHC timer. Then we use the + * difference to calculate an appropriate 2s complement addition to add + * to the PHY timer in order to ensure it reads the same value as the + * primary PHC timer. + * + * Return: + * * %0 - success + * * %-EBUSY- failed to acquire PTP semaphore + * * %other - PHY read/write failed + */ +static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port) +{ + u64 phc_time, phy_time, difference; + int err; + + if (!ice_ptp_lock(hw)) { + ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n"); + return -EBUSY; + } + + err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + /* Calculate the amount required to add to the port time in order for + * it to match the PHC time. + * + * Note that the port adjustment is done using 2s complement + * arithmetic. This is convenient since it means that we can simply + * calculate the difference between the PHC time and the port time, + * and it will be interpreted correctly. + */ + + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + difference = phc_time - phy_time; + + err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference); + if (err) + goto err_unlock; + + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME); + if (err) + goto err_unlock; + + /* Issue the sync to activate the time adjustment */ + ice_ptp_exec_tmr_cmd(hw); + + /* Re-capture the timer values to flush the command registers and + * verify that the time was properly adjusted. + */ + err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + dev_info(ice_hw_to_dev(hw), + "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n", + port, phy_time, phc_time); + +err_unlock: + ice_ptp_unlock(hw); + return err; +} + +/** + * ice_stop_phy_timer_eth56g - Stop the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to stop + * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS + * + * Stop the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset) +{ + int err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0); + if (err) + return err; + + ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_start_phy_timer_eth56g - Start the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to start + * + * Start the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) +{ + struct ice_pf *pf = container_of(hw, struct ice_pf, hw); + u32 lo, hi; + u64 incval; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + err = ice_stop_phy_timer_eth56g(hw, port, false); + if (err) + return err; + + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + + err = ice_phy_cfg_parpcs_eth56g(hw, port); + if (err) + return err; + + err = ice_phy_cfg_ptp_1step_eth56g(hw, port); + if (err) + return err; + + err = ice_phy_cfg_mac_eth56g(hw, port); + if (err) + return err; + + if (ice_is_primary(hw)) { + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + } else { + lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx)); + } + incval = (u64)hi << 32 | lo; + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval); + if (err) + return err; + + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); + if (err) + return err; + + ice_ptp_exec_tmr_cmd(hw); + + err = ice_sync_phy_timer_eth56g(hw, port); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1); + if (err) + return err; + + ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_ptp_init_phc_e825 - Perform E825 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform E825-specific PTP hardware clock initialization steps. + * + * Return: 0 on success, negative error code otherwise. + */ +static int ice_ptp_init_phc_e825(struct ice_hw *hw) +{ + /* Initialize the Clock Generation Unit */ + return ice_init_cgu_e82x(hw); +} + +/** + * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status + * @hw: pointer to the HW struct + * @ts_status: the timestamp mask pointer + * + * Read the PHY Tx timestamp status mask indicating which ports have Tx + * timestamps available. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status) +{ + const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g; + u8 phy, mask; + u32 status; + + mask = (1 << hw->ptp.ports_per_phy) - 1; + *ts_status = 0; + + for (phy = 0; phy < params->num_phys; phy++) { + int err; + + err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status); + if (err) + return err; + + *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy); + } + + ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status); + + return 0; +} + +/** + * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read from + * @tstamp_ready: contents of the Tx memory status register + * + * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in + * the PHY are ready. A set bit means the corresponding timestamp is valid and + * ready to be captured from the PHY timestamp block. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port, + u64 *tstamp_ready) +{ + int err; + + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L, + tstamp_ready); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n", + port, err); + return err; + } + + return 0; +} + +/** + * ice_ptp_init_phy_e825 - initialize PHY parameters + * @hw: pointer to the HW struct + */ +static void ice_ptp_init_phy_e825(struct ice_hw *hw) +{ + struct ice_ptp_hw *ptp = &hw->ptp; + struct ice_eth56g_params *params; + + params = &ptp->phy.eth56g; + params->onestep_ena = false; + params->peer_delay = 0; + params->sfd_ena = false; + params->num_phys = 2; + ptp->ports_per_phy = 4; + ptp->num_lports = params->num_phys * ptp->ports_per_phy; +} + /* E822 family functions * * The following functions operate on the E822 family of devices. @@ -285,18 +2762,20 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) /** * ice_fill_phy_msg_e82x - Fill message data for a PHY register access + * @hw: pointer to the HW struct * @msg: the PHY message buffer to fill in * @port: the port to access * @offset: the register offset */ -static void -ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) +static void ice_fill_phy_msg_e82x(struct ice_hw *hw, + struct ice_sbq_msg_input *msg, u8 port, + u16 offset) { - int phy_port, phy, quadtype; + int phy_port, quadtype; - phy_port = port % ICE_PORTS_PER_PHY_E82X; - phy = port / ICE_PORTS_PER_PHY_E82X; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X; + phy_port = port % hw->ptp.ports_per_phy; + quadtype = ICE_GET_QUAD_NUM(port) % + ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy); if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -306,12 +2785,7 @@ ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port); } - if (phy == 0) - msg->dest_dev = rmn_0; - else if (phy == 1) - msg->dest_dev = rmn_1; - else - msg->dest_dev = rmn_2; + msg->dest_dev = ice_sbq_dev_phy_0; } /** @@ -427,10 +2901,10 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e82x(&msg, port, offset); + ice_fill_phy_msg_e82x(hw, &msg, port, offset); msg.opcode = ice_sbq_msg_rd; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -504,11 +2978,11 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e82x(&msg, port, offset); + ice_fill_phy_msg_e82x(hw, &msg, port, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -543,8 +3017,7 @@ ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low_addr); return -EINVAL; } - - low = (u32)(val & P_REG_40B_LOW_M); + low = FIELD_GET(P_REG_40B_LOW_M, val); high = (u32)(val >> P_REG_40B_HIGH_S); err = ice_write_phy_reg_e82x(hw, port, low_addr, low); @@ -614,24 +3087,30 @@ ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /** * ice_fill_quad_msg_e82x - Fill message data for quad register access + * @hw: pointer to the HW struct * @msg: the PHY message buffer to fill in * @quad: the quad to access * @offset: the register offset * * Fill a message buffer for accessing a register in a quad shared between * multiple PHYs. + * + * Return: + * * %0 - OK + * * %-EINVAL - invalid quad number */ -static int -ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) +static int ice_fill_quad_msg_e82x(struct ice_hw *hw, + struct ice_sbq_msg_input *msg, u8 quad, + u16 offset) { u32 addr; - if (quad >= ICE_MAX_QUAD) + if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports)) return -EINVAL; - msg->dest_dev = rmn_0; + msg->dest_dev = ice_sbq_dev_phy_0; - if ((quad % ICE_QUADS_PER_PHY_E82X) == 0) + if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy))) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; @@ -658,13 +3137,13 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e82x(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset); if (err) return err; msg.opcode = ice_sbq_msg_rd; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -692,14 +3171,14 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e82x(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset); if (err) return err; msg.opcode = ice_sbq_msg_wr; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -748,7 +3227,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * lower 8 bits in the low register, and the upper 32 bits in the high * register. */ - *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); + *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) | + FIELD_PREP(PHY_40B_LOW_M, lo); return 0; } @@ -813,294 +3293,11 @@ static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw) { unsigned int quad; - for (quad = 0; quad < ICE_MAX_QUAD; quad++) + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++) ice_ptp_reset_ts_memory_quad_e82x(hw, quad); } /** - * ice_read_cgu_reg_e82x - Read a CGU register - * @hw: pointer to the HW struct - * @addr: Register address to read - * @val: storage for register value read - * - * Read the contents of a register of the Clock Generation Unit. Only - * applicable to E822 devices. - */ -static int -ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) -{ - struct ice_sbq_msg_input cgu_msg; - int err; - - cgu_msg.opcode = ice_sbq_msg_rd; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = addr; - cgu_msg.msg_addr_high = 0x0; - - err = ice_sbq_rw_reg(hw, &cgu_msg); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", - addr, err); - return err; - } - - *val = cgu_msg.data; - - return err; -} - -/** - * ice_write_cgu_reg_e82x - Write a CGU register - * @hw: pointer to the HW struct - * @addr: Register address to write - * @val: value to write into the register - * - * Write the specified value to a register of the Clock Generation Unit. Only - * applicable to E822 devices. - */ -static int -ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) -{ - struct ice_sbq_msg_input cgu_msg; - int err; - - cgu_msg.opcode = ice_sbq_msg_wr; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = addr; - cgu_msg.msg_addr_high = 0x0; - cgu_msg.data = val; - - err = ice_sbq_rw_reg(hw, &cgu_msg); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", - addr, err); - return err; - } - - return err; -} - -/** - * ice_clk_freq_str - Convert time_ref_freq to string - * @clk_freq: Clock frequency - * - * Convert the specified TIME_REF clock frequency to a string. - */ -static const char *ice_clk_freq_str(u8 clk_freq) -{ - switch ((enum ice_time_ref_freq)clk_freq) { - case ICE_TIME_REF_FREQ_25_000: - return "25 MHz"; - case ICE_TIME_REF_FREQ_122_880: - return "122.88 MHz"; - case ICE_TIME_REF_FREQ_125_000: - return "125 MHz"; - case ICE_TIME_REF_FREQ_153_600: - return "153.6 MHz"; - case ICE_TIME_REF_FREQ_156_250: - return "156.25 MHz"; - case ICE_TIME_REF_FREQ_245_760: - return "245.76 MHz"; - default: - return "Unknown"; - } -} - -/** - * ice_clk_src_str - Convert time_ref_src to string - * @clk_src: Clock source - * - * Convert the specified clock source to its string name. - */ -static const char *ice_clk_src_str(u8 clk_src) -{ - switch ((enum ice_clk_src)clk_src) { - case ICE_CLK_SRC_TCX0: - return "TCX0"; - case ICE_CLK_SRC_TIME_REF: - return "TIME_REF"; - default: - return "Unknown"; - } -} - -/** - * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit - * @hw: pointer to the HW struct - * @clk_freq: Clock frequency to program - * @clk_src: Clock source to select (TIME_REF, or TCX0) - * - * Configure the Clock Generation Unit with the desired clock frequency and - * time reference, enabling the PLL which drives the PTP hardware clock. - */ -static int -ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, - enum ice_clk_src clk_src) -{ - union tspll_ro_bwm_lf bwm_lf; - union nac_cgu_dword19 dw19; - union nac_cgu_dword22 dw22; - union nac_cgu_dword24 dw24; - union nac_cgu_dword9 dw9; - int err; - - if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { - dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", - clk_freq); - return -EINVAL; - } - - if (clk_src >= NUM_ICE_CLK_SRC) { - dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", - clk_src); - return -EINVAL; - } - - if (clk_src == ICE_CLK_SRC_TCX0 && - clk_freq != ICE_TIME_REF_FREQ_25_000) { - dev_warn(ice_hw_to_dev(hw), - "TCX0 only supports 25 MHz frequency\n"); - return -EINVAL; - } - - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); - if (err) - return err; - - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); - if (err) - return err; - - err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); - if (err) - return err; - - /* Log the current clock configuration */ - ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.field.ts_pll_enable ? "enabled" : "disabled", - ice_clk_src_str(dw24.field.time_ref_sel), - ice_clk_freq_str(dw9.field.time_ref_freq_sel), - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); - - /* Disable the PLL before changing the clock source or frequency */ - if (dw24.field.ts_pll_enable) { - dw24.field.ts_pll_enable = 0; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - } - - /* Set the frequency */ - dw9.field.time_ref_freq_sel = clk_freq; - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); - if (err) - return err; - - /* Configure the TS PLL feedback divisor */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); - if (err) - return err; - - dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; - dw19.field.tspll_ndivratio = 1; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); - if (err) - return err; - - /* Configure the TS PLL post divisor */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); - if (err) - return err; - - dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; - dw22.field.time1588clk_sel_div2 = 0; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); - if (err) - return err; - - /* Configure the TS PLL pre divisor and clock source */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); - if (err) - return err; - - dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; - dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; - dw24.field.time_ref_sel = clk_src; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - - /* Finally, enable the PLL */ - dw24.field.ts_pll_enable = 1; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - - /* Wait to verify if the PLL locks */ - usleep_range(1000, 5000); - - err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); - if (err) - return err; - - if (!bwm_lf.field.plllock_true_lock_cri) { - dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); - return -EBUSY; - } - - /* Log the current clock configuration */ - ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.field.ts_pll_enable ? "enabled" : "disabled", - ice_clk_src_str(dw24.field.time_ref_sel), - ice_clk_freq_str(dw9.field.time_ref_freq_sel), - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); - - return 0; -} - -/** - * ice_init_cgu_e82x - Initialize CGU with settings from firmware - * @hw: pointer to the HW structure - * - * Initialize the Clock Generation Unit of the E822 device. - */ -static int ice_init_cgu_e82x(struct ice_hw *hw) -{ - struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; - union tspll_cntr_bist_settings cntr_bist; - int err; - - err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, - &cntr_bist.val); - if (err) - return err; - - /* Disable sticky lock detection so lock err reported is accurate */ - cntr_bist.field.i_plllock_sel_0 = 0; - cntr_bist.field.i_plllock_sel_1 = 0; - - err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, - cntr_bist.val); - if (err) - return err; - - /* Configure the CGU PLL using the parameters from the function - * capabilities. - */ - err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, - (enum ice_clk_src)ts_info->clk_src); - if (err) - return err; - - return 0; -} - -/** * ice_ptp_set_vernier_wl - Set the window length for vernier calibration * @hw: pointer to the HW struct * @@ -1110,7 +3307,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) { u8 port; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; err = ice_write_phy_reg_e82x(hw, port, P_REG_WL, @@ -1134,15 +3331,14 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) static int ice_ptp_init_phc_e82x(struct ice_hw *hw) { int err; - u32 regval; + u32 val; /* Enable reading switch and PHY registers over the sideband queue */ #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1) #define PF_SB_REM_DEV_CTL_PHY0 BIT(2) - regval = rd32(hw, PF_SB_REM_DEV_CTL); - regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ | - PF_SB_REM_DEV_CTL_PHY0); - wr32(hw, PF_SB_REM_DEV_CTL, regval); + val = rd32(hw, PF_SB_REM_DEV_CTL); + val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0); + wr32(hw, PF_SB_REM_DEV_CTL, val); /* Initialize the Clock Generation Unit */ err = ice_init_cgu_e82x(hw); @@ -1175,7 +3371,7 @@ ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time) */ phy_time = (u64)time << 32; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { /* Tx case */ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, @@ -1278,7 +3474,7 @@ ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj) else cycles = -(((s64)-adj) << 32); - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; err = ice_ptp_prep_port_adj_e82x(hw, port, cycles); @@ -1304,7 +3500,7 @@ ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval) int err; u8 port; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) @@ -1369,51 +3565,20 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) * * Prepare the requested port for an upcoming timer sync command. * - * Do not use this function directly. If you want to configure exactly one - * port, use ice_ptp_one_port_cmd() instead. + * Note there is no equivalent of this operation on E810, as that device + * always handles all external PHYs internally. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY */ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val, val; - u8 tmr_idx; + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); int err; - tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_PHY_SRC; - switch (cmd) { - case ICE_PTP_INIT_TIME: - cmd_val |= PHY_CMD_INIT_TIME; - break; - case ICE_PTP_INIT_INCVAL: - cmd_val |= PHY_CMD_INIT_INCVAL; - break; - case ICE_PTP_ADJ_TIME: - cmd_val |= PHY_CMD_ADJ_TIME; - break; - case ICE_PTP_READ_TIME: - cmd_val |= PHY_CMD_READ_TIME; - break; - case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; - break; - case ICE_PTP_NOP: - break; - } - /* Tx case */ - /* Read, modify, write */ - err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", - err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK; - val |= cmd_val; - err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", @@ -1422,19 +3587,8 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, } /* Rx case */ - /* Read, modify, write */ - err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", - err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK; - val |= cmd_val; - - err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, + val | TS_CMD_RX_TYPE); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", err); @@ -1444,63 +3598,6 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, return 0; } -/** - * ice_ptp_one_port_cmd - Prepare one port for a timer command - * @hw: pointer to the HW struct - * @configured_port: the port to configure with configured_cmd - * @configured_cmd: timer command to prepare on the configured_port - * - * Prepare the configured_port for the configured_cmd, and prepare all other - * ports for ICE_PTP_NOP. This causes the configured_port to execute the - * desired command while all other ports perform no operation. - */ -static int -ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, - enum ice_ptp_tmr_cmd configured_cmd) -{ - u8 port; - - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - enum ice_ptp_tmr_cmd cmd; - int err; - - if (port == configured_port) - cmd = configured_cmd; - else - cmd = ICE_PTP_NOP; - - err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); - if (err) - return err; - } - - return 0; -} - -/** - * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command - * @hw: pointer to the HW struct - * @cmd: timer command to prepare - * - * Prepare all ports connected to this device for an upcoming timer sync - * command. - */ -static int -ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) -{ - u8 port; - - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - int err; - - err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); - if (err) - return err; - } - - return 0; -} - /* E822 Vernier calibration functions * * The following functions are used as part of the vernier calibration of @@ -1603,7 +3700,7 @@ static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port) return; } - quad = port / ICE_PORTS_PER_QUAD; + quad = ICE_GET_QUAD_NUM(port); err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) { @@ -2324,6 +4421,40 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port) } /** + * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers + * @hw: pointer to the HW struct + * + * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted + * and received timestamps as invalid. + * + * Return: 0 on success, other error codes when failed to write to PHY + */ +int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw) +{ + u8 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0); + if (err) { + dev_warn(ice_hw_to_dev(hw), + "Failed to clear PHY TX_OFFSET_READY register\n"); + return err; + } + + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0); + if (err) { + dev_warn(ice_hw_to_dev(hw), + "Failed to clear PHY RX_OFFSET_READY register\n"); + return err; + } + } + + return 0; +} + +/** * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time * @hw: pointer to the HW struct * @port: the PHY port to read @@ -2633,6 +4764,47 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) return 0; } +/** + * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt + * @hw: pointer to the HW struct + * @quad: the timestamp quad + * @ena: enable or disable interrupt + * @threshold: interrupt threshold + * + * Configure TX timestamp interrupt for the specified quad + * + * Return: 0 on success, other error codes when failed to read/write quad + */ + +int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold) +{ + int err; + u32 val; + + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); + if (err) + return err; + + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + if (ena) { + val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; + val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold); + } + + return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); +} + +/** + * ice_ptp_init_phy_e82x - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp) +{ + ptp->num_lports = 8; + ptp->ports_per_phy = 8; +} + /* E810 functions * * The following functions operate on the E810 series devices which use @@ -2655,9 +4827,9 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); msg.opcode = ice_sbq_msg_rd; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -2685,10 +4857,10 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) msg.msg_addr_low = lower_16_bits(addr); msg.msg_addr_high = upper_16_bits(addr); msg.opcode = ice_sbq_msg_wr; - msg.dest_dev = rmn_0; + msg.dest_dev = ice_sbq_dev_phy_0; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -2712,33 +4884,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) static int ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo) { + struct ice_e810_params *params = &hw->ptp.phy.e810; + unsigned long flags; u32 val; - u8 i; + int err; + + spin_lock_irqsave(¶ms->atqbal_wq.lock, flags); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); + return err; + } /* Write TS index to read to the PF register so the FW can read it */ - val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS; - wr32(hw, PF_SB_ATQBAL, val); + val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); /* Read the register repeatedly until the FW provides us the TS */ - for (i = TS_LL_READ_RETRIES; i > 0; i--) { - val = rd32(hw, PF_SB_ATQBAL); + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10, + REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); + return err; + } - /* When the bit is cleared, the TS is ready in the register */ - if (!(FIELD_GET(TS_LL_READ_TS, val))) { - /* High 8 bit value of the TS is on the bits 16:23 */ - *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val); + /* High 8 bit value of the TS is on the bits 16:23 */ + *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val); - /* Read the low 32 bit value and set the TS valid bit */ - *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID; - return 0; - } + /* Read the low 32 bit value and set the TS valid bit */ + *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID; - udelay(10); - } + spin_unlock_irqrestore(¶ms->atqbal_wq.lock, flags); - /* FW failed to provide the TS in time */ - ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n"); - return -EINVAL; + return 0; } /** @@ -2809,7 +4994,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp) /* For E810 devices, the timestamp is reported with the lower 32 bits * in the low register, and the upper 8 bits in the high register. */ - *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M); + *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) | + FIELD_PREP(PHY_EXT_40B_LOW_M, lo); return 0; } @@ -2860,17 +5046,20 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) } /** - * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY + * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization * @hw: pointer to HW struct * - * Enable the timesync PTP functionality for the external PHY connected to - * this function. + * Perform E810-specific PTP hardware clock initialization steps. + * + * Return: 0 on success, other error codes when failed to initialize TimeSync */ -int ice_ptp_init_phy_e810(struct ice_hw *hw) +static int ice_ptp_init_phc_e810(struct ice_hw *hw) { u8 tmr_idx; int err; + ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), GLTSYN_ENA_TSYN_ENA_M); @@ -2882,21 +5071,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw) } /** - * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization - * @hw: pointer to HW struct - * - * Perform E810-specific PTP hardware clock initialization steps. - */ -static int ice_ptp_init_phc_e810(struct ice_hw *hw) -{ - /* Ensure synchronization delay is zero */ - wr32(hw, GLTSYN_SYNC_DLAY, 0); - - /* Initialize the PHY */ - return ice_ptp_init_phy_e810(hw); -} - -/** * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time * @hw: Board private structure * @time: Time to initialize the PHY port clock to @@ -2932,6 +5106,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time) } /** + * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment + * @hw: pointer to HW struct + * @adj: adjustment value to program + * + * Use the low latency firmware interface to program PHY time adjustment to + * all PHY ports. + * + * Return: 0 on success, -EBUSY on timeout + */ +static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj) +{ + const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + struct ice_e810_params *params = &hw->ptp.phy.e810; + u32 val; + int err; + + spin_lock_irq(¶ms->atqbal_wq.lock); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + wr32(hw, REG_LL_PROXY_L, adj); + val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) | + FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); + + /* Read the register repeatedly until the FW indicates completion */ + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), + 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n"); + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + spin_unlock_irq(¶ms->atqbal_wq.lock); + + return 0; +} + +/** * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment * @hw: pointer to HW struct * @adj: adjustment value to program @@ -2949,6 +5172,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) u8 tmr_idx; int err; + if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update) + return ice_ptp_prep_phy_adj_ll_e810(hw, adj); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Adjustments are represented as signed 2's complement values in @@ -2972,6 +5198,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj) } /** + * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change + * @hw: pointer to HW struct + * @incval: The new 40bit increment value to prepare + * + * Use the low latency firmware interface to program PHY time increment value + * for all PHY ports. + * + * Return: 0 on success, -EBUSY on timeout + */ +static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval) +{ + const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + struct ice_e810_params *params = &hw->ptp.phy.e810; + u32 val; + int err; + + spin_lock_irq(¶ms->atqbal_wq.lock); + + /* Wait for any pending in-progress low latency interrupt */ + err = wait_event_interruptible_locked_irq(params->atqbal_wq, + !(params->atqbal_flags & + ATQBAL_FLAGS_INTR_IN_PROGRESS)); + if (err) { + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval)); + val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) | + FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) | + FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC; + wr32(hw, REG_LL_PROXY_H, val); + + /* Read the register repeatedly until the FW indicates completion */ + err = read_poll_timeout_atomic(rd32, val, + !FIELD_GET(REG_LL_PROXY_H_EXEC, val), + 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw, + REG_LL_PROXY_H); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n"); + spin_unlock_irq(¶ms->atqbal_wq.lock); + return err; + } + + spin_unlock_irq(¶ms->atqbal_wq.lock); + + return 0; +} + +/** * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change * @hw: pointer to HW struct * @incval: The new 40bit increment value to prepare @@ -2986,6 +5262,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) u8 tmr_idx; int err; + if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update) + return ice_ptp_prep_phy_incval_ll_e810(hw, incval); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; low = lower_32_bits(incval); high = upper_32_bits(incval); @@ -3017,47 +5296,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) */ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val, val; - int err; + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); - switch (cmd) { - case ICE_PTP_INIT_TIME: - cmd_val = GLTSYN_CMD_INIT_TIME; - break; - case ICE_PTP_INIT_INCVAL: - cmd_val = GLTSYN_CMD_INIT_INCVAL; - break; - case ICE_PTP_ADJ_TIME: - cmd_val = GLTSYN_CMD_ADJ_TIME; - break; - case ICE_PTP_READ_TIME: - cmd_val = GLTSYN_CMD_READ_TIME; - break; - case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; - break; - case ICE_PTP_NOP: - return 0; - } - - /* Read, modify, write */ - err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK_E810; - val |= cmd_val; - - err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err); - return err; - } - - return 0; + return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val); } /** @@ -3076,83 +5317,21 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready) return 0; } -/* E810T SMA functions +/* E810 SMA functions * - * The following functions operate specifically on E810T hardware and are used + * The following functions operate specifically on E810 hardware and are used * to access the extended GPIOs available. */ /** - * ice_get_pca9575_handle - * @hw: pointer to the hw struct - * @pca9575_handle: GPIO controller's handle - * - * Find and return the GPIO controller's handle in the netlist. - * When found - the value will be cached in the hw structure and following calls - * will return cached value - */ -static int -ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle) -{ - struct ice_aqc_get_link_topo *cmd; - struct ice_aq_desc desc; - int status; - u8 idx; - - /* If handle was read previously return cached value */ - if (hw->io_expander_handle) { - *pca9575_handle = hw->io_expander_handle; - return 0; - } - - /* If handle was not detected read it from the netlist */ - cmd = &desc.params.get_link_topo; - ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo); - - /* Set node type to GPIO controller */ - cmd->addr.topo_params.node_type_ctx = - (ICE_AQC_LINK_TOPO_NODE_TYPE_M & - ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL); - -#define SW_PCA9575_SFP_TOPO_IDX 2 -#define SW_PCA9575_QSFP_TOPO_IDX 1 - - /* Check if the SW IO expander controlling SMA exists in the netlist. */ - if (hw->device_id == ICE_DEV_ID_E810C_SFP) - idx = SW_PCA9575_SFP_TOPO_IDX; - else if (hw->device_id == ICE_DEV_ID_E810C_QSFP) - idx = SW_PCA9575_QSFP_TOPO_IDX; - else - return -EOPNOTSUPP; - - cmd->addr.topo_params.index = idx; - - status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); - if (status) - return -EOPNOTSUPP; - - /* Verify if we found the right IO expander type */ - if (desc.params.get_link_topo.node_part_num != - ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575) - return -EOPNOTSUPP; - - /* If present save the handle and return it */ - hw->io_expander_handle = - le16_to_cpu(desc.params.get_link_topo.addr.handle); - *pca9575_handle = hw->io_expander_handle; - - return 0; -} - -/** - * ice_read_sma_ctrl_e810t + * ice_read_sma_ctrl * @hw: pointer to the hw struct * @data: pointer to data to be read from the GPIO controller * * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the * PCA9575 expander, so only bits 3-7 in data are valid. */ -int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) +int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data) { int status; u16 handle; @@ -3164,7 +5343,7 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) *data = 0; - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) { bool pin; status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET, @@ -3178,14 +5357,14 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data) } /** - * ice_write_sma_ctrl_e810t + * ice_write_sma_ctrl * @hw: pointer to the hw struct * @data: data to be written to the GPIO controller * * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1 * of the PCA9575 expander, so only bits 3-7 in data are valid. */ -int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) +int ice_write_sma_ctrl(struct ice_hw *hw, u8 data) { int status; u16 handle; @@ -3195,7 +5374,7 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) if (status) return status; - for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) { + for (i = ICE_SMA_MIN_BIT; i <= ICE_SMA_MAX_BIT; i++) { bool pin; pin = !(data & (1 << i)); @@ -3209,41 +5388,203 @@ int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data) } /** - * ice_read_pca9575_reg_e810t - * @hw: pointer to the hw struct - * @offset: GPIO controller register offset - * @data: pointer to data to be read from the GPIO controller + * ice_ptp_read_sdp_ac - read SDP available connections section from NVM + * @hw: pointer to the HW struct + * @entries: returns the SDP available connections section from NVM + * @num_entries: returns the number of valid entries * - * Read the register from the GPIO controller + * Return: 0 on success, negative error code if NVM read failed or section does + * not exist or is corrupted */ -int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) +int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries) { - struct ice_aqc_link_topo_addr link_topo; - __le16 addr; - u16 handle; + __le16 data; + u32 offset; int err; - memset(&link_topo, 0, sizeof(link_topo)); + err = ice_acquire_nvm(hw, ICE_RES_READ); + if (err) + goto exit; - err = ice_get_pca9575_handle(hw, &handle); + /* Read the offset of SDP_AC */ + offset = ICE_AQC_NVM_SDP_AC_PTR_OFFSET; + err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true, + NULL); if (err) - return err; + goto exit; + + /* Check if section exist */ + offset = FIELD_GET(ICE_AQC_NVM_SDP_AC_PTR_M, le16_to_cpu(data)); + if (offset == ICE_AQC_NVM_SDP_AC_PTR_INVAL) { + err = -EINVAL; + goto exit; + } + + if (offset & ICE_AQC_NVM_SDP_AC_PTR_TYPE_M) { + offset &= ICE_AQC_NVM_SDP_AC_PTR_M; + offset *= ICE_AQC_NVM_SECTOR_UNIT; + } else { + offset *= sizeof(data); + } + + /* Skip reading section length and read the number of valid entries */ + offset += sizeof(data); + err = ice_aq_read_nvm(hw, 0, offset, sizeof(data), &data, false, true, + NULL); + if (err) + goto exit; + *num_entries = le16_to_cpu(data); + + /* Read SDP configuration section */ + offset += sizeof(data); + err = ice_aq_read_nvm(hw, 0, offset, *num_entries * sizeof(data), + entries, false, true, NULL); + +exit: + if (err) + dev_dbg(ice_hw_to_dev(hw), "Failed to configure SDP connection section\n"); + ice_release_nvm(hw); + return err; +} + +/** + * ice_ptp_init_phy_e810 - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) +{ + ptp->num_lports = 8; + ptp->ports_per_phy = 4; + + init_waitqueue_head(&ptp->phy.e810.atqbal_wq); +} + +/* E830 functions + * + * The following functions operate on the E830 series devices. + * + */ + +/** + * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization + * @hw: pointer to HW struct + * + * Perform E830-specific PTP hardware clock initialization steps. + */ +static void ice_ptp_init_phc_e830(const struct ice_hw *hw) +{ + ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY); +} + +/** + * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change + * @hw: pointer to HW struct + * @incval: The new 40bit increment value to prepare + * + * Prepare the PHY port for a new increment value by programming the PHC + * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is + * completed by FW automatically. + */ +static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw, + u64 incval) +{ + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + + wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval)); + wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval)); +} + +/** + * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time + * @hw: Board private structure + * @time: Time to initialize the PHY port clock to + * + * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the + * initial clock time. The time will not actually be programmed until the + * driver issues an ICE_PTP_INIT_TIME command. + * + * The time value is the upper 32 bits of the PHY timer, usually in units of + * nominal nanoseconds. + */ +static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw, + u64 time) +{ + u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; - link_topo.handle = cpu_to_le16(handle); - link_topo.topo_params.node_type_ctx = - FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, - ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED); + wr32(hw, GLTSYN_TIME_0(tmr_idx), 0); + wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time)); + wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time)); +} - addr = cpu_to_le16((u16)offset); +/** + * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command + * @hw: pointer to HW struct + * @cmd: Command to be sent to the port + * + * Prepare the external PHYs connected to this device for a timer sync + * command. + * + * Return: 0 on success, negative error code when PHY write failed + */ +static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); - return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); + return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val); +} + +/** + * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY + * @hw: pointer to the HW struct + * @idx: the timestamp index to read + * @tstamp: on return, the 40bit timestamp value + * + * Read a 40bit timestamp value out of the timestamp block of the external PHY + * on the E830 device. + */ +static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx, + u64 *tstamp) +{ + u32 hi, lo; + + hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx)); + lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx)); + + /* For E830 devices, the timestamp is reported with the lower 32 bits + * in the low register, and the upper 8 bits in the high register. + */ + *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) | + FIELD_PREP(PHY_EXT_40B_LOW_M, lo); +} + +/** + * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @tstamp_ready: contents of the Tx memory status register + */ +static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port, + u64 *tstamp_ready) +{ + *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H); + *tstamp_ready <<= 32; + *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L); +} + +/** + * ice_ptp_init_phy_e830 - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp) +{ + ptp->num_lports = 8; + ptp->ports_per_phy = 4; } /* Device agnostic functions * - * The following functions implement shared behavior common to both E822 and - * E810 devices, possibly calling a device specific implementation where - * necessary. + * The following functions implement shared behavior common to all devices, + * possibly calling a device specific implementation where necessary. */ /** @@ -3296,18 +5637,136 @@ void ice_ptp_unlock(struct ice_hw *hw) } /** - * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type + * ice_ptp_init_hw - Initialize hw based on device type * @hw: pointer to the HW structure * - * Determine the PHY model for the device, and initialize hw->phy_model + * Determine the PHY model for the device, and initialize hw * for use by other functions. */ -void ice_ptp_init_phy_model(struct ice_hw *hw) +void ice_ptp_init_hw(struct ice_hw *hw) { - if (ice_is_e810(hw)) - hw->phy_model = ICE_PHY_E810; - else - hw->phy_model = ICE_PHY_E82X; + struct ice_ptp_hw *ptp = &hw->ptp; + + switch (hw->mac_type) { + case ICE_MAC_E810: + ice_ptp_init_phy_e810(ptp); + break; + case ICE_MAC_E830: + ice_ptp_init_phy_e830(ptp); + break; + case ICE_MAC_GENERIC: + ice_ptp_init_phy_e82x(ptp); + break; + case ICE_MAC_GENERIC_3K_E825: + ice_ptp_init_phy_e825(hw); + break; + default: + return; + } +} + +/** + * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command + * @hw: pointer to HW struct + * @port: Port to which cmd has to be sent + * @cmd: Command to be sent to the port + * + * Prepare one port for the upcoming timer sync command. Do not use this for + * programming only a single port, instead use ice_ptp_one_port_cmd() to + * ensure non-modified ports get properly initialized to ICE_PTP_NOP. + * + * Return: + * * %0 - success + * %-EBUSY - PHY type not supported + * * %other - failed to write port command + */ +static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) +{ + switch (hw->mac_type) { + case ICE_MAC_GENERIC: + return ice_ptp_write_port_cmd_e82x(hw, port, cmd); + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_ptp_one_port_cmd - Program one PHY port for a timer command + * @hw: pointer to HW struct + * @configured_port: the port that should execute the command + * @configured_cmd: the command to be executed on the configured port + * + * Prepare one port for executing a timer command, while preparing all other + * ports to ICE_PTP_NOP. This allows executing a command on a single port + * while ensuring all other ports do not execute stale commands. + * + * Return: + * * %0 - success + * * %other - failed to write port command + */ +int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, + enum ice_ptp_tmr_cmd configured_cmd) +{ + u32 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + /* Program the configured port with the configured command, + * program all other ports with ICE_PTP_NOP. + */ + if (port == configured_port) + err = ice_ptp_write_port_cmd(hw, port, configured_cmd); + else + err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP); + + if (err) + return err; + } + + return 0; +} + +/** + * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command + * @hw: pointer to HW struct + * @cmd: the timer command to setup + * + * Prepare all PHY ports on this device for the requested timer command. For + * some families this can be done in one shot, but for other families each + * port must be configured individually. + * + * Return: + * * %0 - success + * * %other - failed to write port command + */ +static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 port; + + /* PHY models which can program all ports simultaneously */ + switch (hw->mac_type) { + case ICE_MAC_E810: + return ice_ptp_port_cmd_e810(hw, cmd); + case ICE_MAC_E830: + return ice_ptp_port_cmd_e830(hw, cmd); + default: + break; + } + + /* PHY models which require programming each port separately */ + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_write_port_cmd(hw, port, cmd); + if (err) + return err; + } + + return 0; } /** @@ -3328,17 +5787,7 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) ice_ptp_src_cmd(hw, cmd); /* Next, prepare the ports */ - switch (hw->phy_model) { - case ICE_PHY_E810: - err = ice_ptp_port_cmd_e810(hw, cmd); - break; - case ICE_PHY_E82X: - err = ice_ptp_port_cmd_e82x(hw, cmd); - break; - default: - err = -EOPNOTSUPP; - } - + err = ice_ptp_port_cmd(hw, cmd); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", cmd, err); @@ -3374,19 +5823,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; /* Source timers */ + /* For E830 we don't need to use shadow registers, its automatic */ + if (hw->mac_type == ICE_MAC_E830) { + ice_ptp_write_direct_phc_time_e830(hw, time); + return 0; + } + wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time)); wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time)); wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0); /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - switch (hw->phy_model) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_time_eth56g(hw, + (u32)(time & 0xFFFFFFFF)); + break; default: err = -EOPNOTSUPP; } @@ -3418,17 +5877,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; + /* For E830 we don't need to use shadow registers, its automatic */ + if (hw->mac_type == ICE_MAC_E830) { + ice_ptp_write_direct_incval_e830(hw, incval); + return 0; + } + /* Shadow Adjust */ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - switch (hw->phy_model) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_incval_e82x(hw, incval); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_incval_eth56g(hw, incval); + break; default: err = -EOPNOTSUPP; } @@ -3488,13 +5956,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - switch (hw->phy_model) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; - case ICE_PHY_E82X: + case ICE_MAC_E830: + /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */ + return 0; + case ICE_MAC_GENERIC: err = ice_ptp_prep_phy_adj_e82x(hw, adj); break; + case ICE_MAC_GENERIC_3K_E825: + err = ice_ptp_prep_phy_adj_eth56g(hw, adj); + break; default: err = -EOPNOTSUPP; } @@ -3518,11 +5992,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - switch (hw->phy_model) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_read_phy_tstamp_e830(hw, idx, tstamp); + return 0; + case ICE_MAC_GENERIC: return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp); + case ICE_MAC_GENERIC_3K_E825: + return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); default: return -EOPNOTSUPP; } @@ -3546,11 +6025,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - switch (hw->phy_model) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); - case ICE_PHY_E82X: + case ICE_MAC_GENERIC: return ice_clear_phy_tstamp_e82x(hw, block, idx); + case ICE_MAC_GENERIC_3K_E825: + return ice_clear_ptp_tstamp_eth56g(hw, block, idx); default: return -EOPNOTSUPP; } @@ -3607,11 +6088,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) */ void ice_ptp_reset_ts_memory(struct ice_hw *hw) { - switch (hw->phy_model) { - case ICE_PHY_E82X: + switch (hw->mac_type) { + case ICE_MAC_GENERIC: ice_ptp_reset_ts_memory_e82x(hw); break; - case ICE_PHY_E810: + case ICE_MAC_GENERIC_3K_E825: + ice_ptp_reset_ts_memory_eth56g(hw); + break; + case ICE_MAC_E810: default: return; } @@ -3633,11 +6117,16 @@ int ice_ptp_init_phc(struct ice_hw *hw) /* Clear event err indications for auxiliary pins */ (void)rd32(hw, GLTSYN_STAT(src_idx)); - switch (hw->phy_model) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_ptp_init_phc_e810(hw); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_ptp_init_phc_e830(hw); + return 0; + case ICE_MAC_GENERIC: return ice_ptp_init_phc_e82x(hw); + case ICE_MAC_GENERIC_3K_E825: + return ice_ptp_init_phc_e825(hw); default: return -EOPNOTSUPP; } @@ -3656,14 +6145,19 @@ int ice_ptp_init_phc(struct ice_hw *hw) */ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) { - switch (hw->phy_model) { - case ICE_PHY_E810: + switch (hw->mac_type) { + case ICE_MAC_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); - case ICE_PHY_E82X: + case ICE_MAC_E830: + ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready); + return 0; + case ICE_MAC_GENERIC: return ice_get_phy_tx_tstamp_ready_e82x(hw, block, tstamp_ready); - break; + case ICE_MAC_GENERIC_3K_E825: + return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, + tstamp_ready); default: return -EOPNOTSUPP; } @@ -3760,6 +6254,25 @@ ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size) } /** + * ice_cgu_get_num_pins - get pin description array size + * @hw: pointer to the hw struct + * @input: if request is done against input or output pins + * + * Return: size of pin description array for given hw. + */ +int ice_cgu_get_num_pins(struct ice_hw *hw, bool input) +{ + const struct ice_cgu_pin_desc *t; + int size; + + t = ice_cgu_get_pin_desc(hw, input, &size); + if (t) + return size; + + return 0; +} + +/** * ice_cgu_get_pin_type - get pin's type * @hw: pointer to the hw struct * @pin: pin index diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 1f3e03124430..83f20fa7ace7 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -41,11 +41,45 @@ enum ice_ptp_fec_mode { ICE_PTP_FEC_MODE_RS_FEC }; +enum eth56g_res_type { + ETH56G_PHY_REG_PTP, + ETH56G_PHY_MEM_PTP, + ETH56G_PHY_REG_XPCS, + ETH56G_PHY_REG_MAC, + ETH56G_PHY_REG_GPCS, + NUM_ETH56G_PHY_RES +}; + +enum ice_eth56g_link_spd { + ICE_ETH56G_LNK_SPD_1G, + ICE_ETH56G_LNK_SPD_2_5G, + ICE_ETH56G_LNK_SPD_10G, + ICE_ETH56G_LNK_SPD_25G, + ICE_ETH56G_LNK_SPD_40G, + ICE_ETH56G_LNK_SPD_50G, + ICE_ETH56G_LNK_SPD_50G2, + ICE_ETH56G_LNK_SPD_100G, + ICE_ETH56G_LNK_SPD_100G2, + NUM_ICE_ETH56G_LNK_SPD /* Must be last */ +}; + +/** + * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters + * @base_addr: base address for each PHY block + * @step: step between PHY lanes + * + * Characteristic information for the various PHY register parameters in the + * ETH56G devices + */ +struct ice_phy_reg_info_eth56g { + u32 base_addr; + u32 step; +}; + /** * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz * @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L - * @pps_delay: propagation delay of the PPS output signal * * Characteristic information for the various TIME_REF sources possible in the * E822 devices @@ -53,7 +87,6 @@ enum ice_ptp_fec_mode { struct ice_time_ref_info_e82x { u64 pll_freq; u64 nominal_incval; - u8 pps_delay; }; /** @@ -94,8 +127,75 @@ struct ice_vernier_info_e82x { u32 rx_fixed_delay; }; +#define ICE_ETH56G_MAC_CFG_RX_OFFSET_INT GENMASK(19, 9) +#define ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC GENMASK(8, 0) +#define ICE_ETH56G_MAC_CFG_FRAC_W 9 +/** + * struct ice_eth56g_mac_reg_cfg - MAC config values for specific PTP registers + * @tx_mode: Tx timestamp compensation mode + * @tx_mk_dly: Tx timestamp marker start strobe delay + * @tx_cw_dly: Tx timestamp codeword start strobe delay + * @rx_mode: Rx timestamp compensation mode + * @rx_mk_dly: Rx timestamp marker start strobe delay + * @rx_cw_dly: Rx timestamp codeword start strobe delay + * @blks_per_clk: number of blocks transferred per clock cycle + * @blktime: block time, fixed point + * @mktime: marker time, fixed point + * @tx_offset: total Tx offset, fixed point + * @rx_offset: total Rx offset, contains value for bitslip/deskew, fixed point + * + * All fixed point registers except Rx offset are 23 bit unsigned ints with + * a 9 bit fractional. + * Rx offset is 11 bit unsigned int with a 9 bit fractional. + */ +struct ice_eth56g_mac_reg_cfg { + struct { + u8 def; + u8 rs; + } tx_mode; + u8 tx_mk_dly; + struct { + u8 def; + u8 onestep; + } tx_cw_dly; + struct { + u8 def; + u8 rs; + } rx_mode; + struct { + u8 def; + u8 rs; + } rx_mk_dly; + struct { + u8 def; + u8 rs; + } rx_cw_dly; + u8 blks_per_clk; + u16 blktime; + u16 mktime; + struct { + u32 serdes; + u32 no_fec; + u32 fc; + u32 rs; + u32 sfd; + u32 onestep; + } tx_offset; + struct { + u32 serdes; + u32 no_fec; + u32 fc; + u32 rs; + u32 sfd; + u32 bs_ds; + } rx_offset; +}; + +extern +const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD]; + /** - * struct ice_cgu_pll_params_e82x + * struct ice_cgu_pll_params_e82x - E82X CGU parameters * @refclk_pre_div: Reference clock pre-divisor * @feedback_div: Feedback divisor * @frac_n_div: Fractional divisor @@ -185,11 +285,36 @@ struct ice_cgu_pin_desc { extern const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; +/** + * struct ice_cgu_pll_params_e825c - E825C CGU parameters + * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection + * @tspll_ndivratio: ndiv ratio that goes directly to the pll + * @tspll_fbdiv_intgr: TS PLL integer feedback divide + * @tspll_fbdiv_frac: TS PLL fractional feedback divide + * @ref1588_ck_div: clock divider for tspll ref + * + * Clock Generation Unit parameters used to program the PLL based on the + * selected TIME_REF/TCXO frequency. + */ +struct ice_cgu_pll_params_e825c { + u32 tspll_ck_refclkfreq; + u32 tspll_ndivratio; + u32 tspll_fbdiv_intgr; + u32 tspll_fbdiv_frac; + u32 ref1588_ck_div; +}; + +extern const struct +ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ]; + #define E810C_QSFP_C827_0_HANDLE 2 #define E810C_QSFP_C827_1_HANDLE 3 +/* Table of constants related to possible ETH56G PHY resources */ +extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES]; + /* Table of constants related to possible TIME_REF sources */ -extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ]; +extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ]; /* Table of constants for Vernier calibration on E822 */ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; @@ -197,10 +322,13 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. */ -#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +#define ICE_E810_PLL_FREQ 812500000 +#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +#define ICE_E810_E830_SYNC_DELAY 0 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); +int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable); bool ice_ptp_lock(struct ice_hw *hw); void ice_ptp_unlock(struct ice_hw *hw); void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd); @@ -208,11 +336,15 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time); int ice_ptp_write_incval(struct ice_hw *hw, u64 incval); int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj); +int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw); int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp); int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); void ice_ptp_reset_ts_memory(struct ice_hw *hw); int ice_ptp_init_phc(struct ice_hw *hw); +void ice_ptp_init_hw(struct ice_hw *hw); int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); +int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, + enum ice_ptp_tmr_cmd configured_cmd); /* E822 family functions */ int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); @@ -225,7 +357,7 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad); * * Returns the current TIME_REF from the capabilities structure. */ -static inline enum ice_time_ref_freq ice_e82x_time_ref(struct ice_hw *hw) +static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw) { return hw->func_caps.ts_func_info.time_ref; } @@ -246,17 +378,12 @@ ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref) static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref) { - return e822_time_ref[time_ref].pll_freq; + return e82x_time_ref[time_ref].pll_freq; } static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref) { - return e822_time_ref[time_ref].nominal_incval; -} - -static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref) -{ - return e822_time_ref[time_ref].pps_delay; + return e82x_time_ref[time_ref].nominal_incval; } /* E822 Vernier calibration functions */ @@ -264,13 +391,13 @@ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port); int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold); /* E810 family functions */ -int ice_ptp_init_phy_e810(struct ice_hw *hw); -int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); -int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); -int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); -bool ice_is_pca9575_present(struct ice_hw *hw); +int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data); +int ice_write_sma_ctrl(struct ice_hw *hw, u8 data); +int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries); +int ice_cgu_get_num_pins(struct ice_hw *hw, bool input); enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input); struct dpll_pin_frequency * ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num); @@ -280,11 +407,43 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, u8 *ref_state, u8 *eec_mode, s64 *phase_offset, enum dpll_lock_status *dpll_state); int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num); - -void ice_ptp_init_phy_model(struct ice_hw *hw); int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, unsigned long *caps); +/* ETH56G family functions */ +int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status); +int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset); +int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port); +int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold); +int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); + +#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL +#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL +#define ICE_ETH56G_NOMINAL_PCS_REF_INC 0x300000000ULL +#define ICE_ETH56G_NOMINAL_THRESH4 0x7777 +#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 + +/** + * ice_get_base_incval - Get base clock increment value + * @hw: pointer to the HW struct + * + * Return: base clock increment value for supported PHYs, 0 otherwise + */ +static inline u64 ice_get_base_incval(struct ice_hw *hw) +{ + switch (hw->mac_type) { + case ICE_MAC_E810: + case ICE_MAC_E830: + return ICE_PTP_NOMINAL_INCVAL_E810; + case ICE_MAC_GENERIC: + return ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); + case ICE_MAC_GENERIC_3K_E825: + return ICE_ETH56G_NOMINAL_INCVAL; + default: + return 0; + } +} + #define PFTSYN_SEM_BYTES 4 #define ICE_PTP_CLOCK_INDEX_0 0x00 @@ -312,6 +471,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define TS_CMD_MASK_E810 0xFF #define TS_CMD_MASK 0xF #define SYNC_EXEC_CMD 0x3 +#define TS_CMD_RX_TYPE ICE_M(0x18, 0x4) /* Macros to derive port low and high addresses on both quads */ #define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF) @@ -344,11 +504,8 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define Q_REG_TX_MEM_GBL_CFG 0xC08 #define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0 #define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0) -#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1 #define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1) -#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9 #define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9) -#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15 #define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15) /* Tx Timestamp data registers */ @@ -380,7 +537,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define P_REG_TIMETUS_L 0x410 #define P_REG_TIMETUS_U 0x414 -#define P_REG_40B_LOW_M 0xFF +#define P_REG_40B_LOW_M GENMASK(7, 0) #define P_REG_40B_HIGH_S 8 /* PHY window length registers */ @@ -487,30 +644,43 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32)) /* E810 timer command register */ -#define ETH_GLTSYN_CMD 0x03000344 +#define E810_ETH_GLTSYN_CMD 0x03000344 + +/* E830 timer command register */ +#define E830_ETH_GLTSYN_CMD 0x00088814 + +/* E810 PHC time register */ +#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx)) /* Source timer incval macros */ #define INCVAL_HIGH_M 0xFF -/* Timestamp block macros */ +/* PHY 40b registers macros */ +#define PHY_EXT_40B_LOW_M GENMASK(31, 0) +#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32) +#define PHY_40B_LOW_M GENMASK(7, 0) +#define PHY_40B_HIGH_M GENMASK_ULL(39, 8) #define TS_VALID BIT(0) #define TS_LOW_M 0xFFFFFFFF #define TS_HIGH_M 0xFF #define TS_HIGH_S 32 -#define TS_PHY_LOW_M 0xFF -#define TS_PHY_HIGH_M 0xFFFFFFFF -#define TS_PHY_HIGH_S 8 - #define BYTES_PER_IDX_ADDR_L_U 8 #define BYTES_PER_IDX_ADDR_L 4 /* Tx timestamp low latency read definitions */ -#define TS_LL_READ_RETRIES 200 -#define TS_LL_READ_TS_HIGH GENMASK(23, 16) -#define TS_LL_READ_TS_IDX GENMASK(29, 24) -#define TS_LL_READ_TS_INTR BIT(30) -#define TS_LL_READ_TS BIT(31) +#define REG_LL_PROXY_H_TIMEOUT_US 2000 +#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6) +#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1 +#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2 +#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16) +#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24) +#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24) +#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30) +#define REG_LL_PROXY_H_EXEC BIT(31) + +#define REG_LL_PROXY_L PF_SB_ATQBAH +#define REG_LL_PROXY_H PF_SB_ATQBAL /* Internal PHY timestamp address */ #define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U)) @@ -524,29 +694,134 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define LOW_TX_MEMORY_BANK_START 0x03090000 #define HIGH_TX_MEMORY_BANK_START 0x03090004 -/* E810T SMA controller pin control */ -#define ICE_SMA1_DIR_EN_E810T BIT(4) -#define ICE_SMA1_TX_EN_E810T BIT(5) -#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3) -#define ICE_SMA2_DIR_EN_E810T BIT(6) -#define ICE_SMA2_TX_EN_E810T BIT(7) - -#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \ - ICE_SMA1_TX_EN_E810T) -#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \ - ICE_SMA2_DIR_EN_E810T | \ - ICE_SMA2_TX_EN_E810T) -#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \ - ICE_SMA2_MASK_E810T) - -#define ICE_SMA_MIN_BIT_E810T 3 -#define ICE_SMA_MAX_BIT_E810T 7 +/* SMA controller pin control */ +#define ICE_SMA1_DIR_EN BIT(4) +#define ICE_SMA1_TX_EN BIT(5) +#define ICE_SMA2_UFL2_RX_DIS BIT(3) +#define ICE_SMA2_DIR_EN BIT(6) +#define ICE_SMA2_TX_EN BIT(7) + +#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN) +#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \ + ICE_SMA2_TX_EN) +#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK) + +#define ICE_SMA_MIN_BIT 3 +#define ICE_SMA_MAX_BIT 7 #define ICE_PCA9575_P1_OFFSET 8 -/* E810T PCA9575 IO controller registers */ +/* PCA9575 IO controller registers */ #define ICE_PCA9575_P0_IN 0x0 -/* E810T PCA9575 IO controller pin control */ -#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4) +/* PCA9575 IO controller pin control */ +#define ICE_P0_GNSS_PRSNT_N BIT(4) + +/* ETH56G PHY register addresses */ +/* Timestamp PHY incval registers */ +#define PHY_REG_TIMETUS_L 0x8 +#define PHY_REG_TIMETUS_U 0xC + +/* Timestamp PCS registers */ +#define PHY_PCS_REF_TUS_L 0x18 +#define PHY_PCS_REF_TUS_U 0x1C + +/* Timestamp PCS ref incval registers */ +#define PHY_PCS_REF_INC_L 0x20 +#define PHY_PCS_REF_INC_U 0x24 + +/* Timestamp init registers */ +#define PHY_REG_RX_TIMER_INC_PRE_L 0x64 +#define PHY_REG_RX_TIMER_INC_PRE_U 0x68 +#define PHY_REG_TX_TIMER_INC_PRE_L 0x44 +#define PHY_REG_TX_TIMER_INC_PRE_U 0x48 + +/* Timestamp match and adjust target registers */ +#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C +#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70 +#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C +#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50 + +/* Timestamp command registers */ +#define PHY_REG_TX_TMR_CMD 0x40 +#define PHY_REG_RX_TMR_CMD 0x60 + +/* Phy offset ready registers */ +#define PHY_REG_TX_OFFSET_READY 0x54 +#define PHY_REG_RX_OFFSET_READY 0x74 + +/* Phy total offset registers */ +#define PHY_REG_TOTAL_TX_OFFSET_L 0x38 +#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C +#define PHY_REG_TOTAL_RX_OFFSET_L 0x58 +#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C + +/* Timestamp capture registers */ +#define PHY_REG_TX_CAPTURE_L 0x78 +#define PHY_REG_TX_CAPTURE_U 0x7C +#define PHY_REG_RX_CAPTURE_L 0x8C +#define PHY_REG_RX_CAPTURE_U 0x90 + +/* Memory status registers */ +#define PHY_REG_TX_MEMORY_STATUS_L 0x80 +#define PHY_REG_TX_MEMORY_STATUS_U 0x84 + +/* Interrupt config register */ +#define PHY_REG_TS_INT_CONFIG 0x88 + +/* XIF mode config register */ +#define PHY_MAC_XIF_MODE 0x24 +#define PHY_MAC_XIF_1STEP_ENA_M ICE_M(0x1, 5) +#define PHY_MAC_XIF_TS_BIN_MODE_M ICE_M(0x1, 11) +#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20) +#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21) + +#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0) +#define PHY_TS_INT_CONFIG_ENA_M BIT(6) + +/* Macros to derive offsets for TimeStampLow and TimeStampHigh */ +#define PHY_TSTAMP_L(x) (((x) * 8) + 0) +#define PHY_TSTAMP_U(x) (((x) * 8) + 4) + +#define PHY_REG_DESKEW_0 0x94 +#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0) +#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7) +#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3 +#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10) + +#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset)) +#define PHY_REVISION_ETH56G 0x10200 +#define PHY_VENDOR_TXLANE_THRESH 0x2000C + +#define PHY_MAC_TSU_CONFIG 0x40 +#define PHY_MAC_TSU_CFG_RX_MODE_M ICE_M(0x7, 0) +#define PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M ICE_M(0x7, 4) +#define PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M ICE_M(0x7, 8) +#define PHY_MAC_TSU_CFG_TX_MODE_M ICE_M(0x7, 12) +#define PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M ICE_M(0x1F, 16) +#define PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M ICE_M(0x1F, 21) +#define PHY_MAC_TSU_CFG_BLKS_PER_CLK_M ICE_M(0x1, 28) +#define PHY_MAC_RX_MODULO 0x44 +#define PHY_MAC_RX_OFFSET 0x48 +#define PHY_MAC_RX_OFFSET_M ICE_M(0xFFFFFF, 0) +#define PHY_MAC_TX_MODULO 0x4C +#define PHY_MAC_BLOCKTIME 0x50 +#define PHY_MAC_MARKERTIME 0x54 +#define PHY_MAC_TX_OFFSET 0x58 +#define PHY_GPCS_BITSLIP 0x5C + +#define PHY_PTP_INT_STATUS 0x7FD140 + +/* ETH56G registers shared per quad */ +/* GPCS config register */ +#define PHY_GPCS_CONFIG_REG0 0x268 +#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24) +/* 1-step PTP config */ +#define PHY_PTP_1STEP_CONFIG 0x270 +#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4) +#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8) +#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane)) +#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0) +#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1) +#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31) #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index 5f30fb131f74..cb08746556a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -3,42 +3,51 @@ #include "ice.h" #include "ice_eswitch.h" -#include "ice_devlink.h" +#include "devlink/devlink.h" +#include "devlink/port.h" #include "ice_sriov.h" #include "ice_tc_lib.h" #include "ice_dcb_lib.h" /** - * ice_repr_get_sw_port_id - get port ID associated with representor - * @repr: pointer to port representor + * ice_repr_inc_tx_stats - increment Tx statistic by one packet + * @repr: repr to increment stats on + * @len: length of the packet + * @xmit_status: value returned by xmit function */ -static int ice_repr_get_sw_port_id(struct ice_repr *repr) +void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, + int xmit_status) { - return repr->src_vsi->back->hw.port_info->lport; + struct ice_repr_pcpu_stats *stats; + + if (unlikely(xmit_status != NET_XMIT_SUCCESS && + xmit_status != NET_XMIT_CN)) { + this_cpu_inc(repr->stats->tx_drops); + return; + } + + stats = this_cpu_ptr(repr->stats); + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); } /** - * ice_repr_get_phys_port_name - get phys port name - * @netdev: pointer to port representor netdev - * @buf: write here port name - * @len: max length of buf + * ice_repr_inc_rx_stats - increment Rx statistic by one packet + * @netdev: repr netdev to increment stats on + * @len: length of the packet */ -static int -ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len) +void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len) { - struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_repr *repr = np->repr; - int res; - - /* Devlink port is registered and devlink core is taking care of name formatting. */ - if (repr->vf->devlink_port.devlink) - return -EOPNOTSUPP; + struct ice_repr *repr = ice_netdev_to_repr(netdev); + struct ice_repr_pcpu_stats *stats; - res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr), - repr->id); - if (res <= 0) - return -EOPNOTSUPP; - return 0; + stats = this_cpu_ptr(repr->stats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += len; + u64_stats_update_end(&stats->syncp); } /** @@ -50,12 +59,13 @@ static void ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_repr *repr = np->repr; struct ice_eth_stats *eth_stats; struct ice_vsi *vsi; - if (ice_is_vf_disabled(np->repr->vf)) + if (repr->ops.ready(repr)) return; - vsi = np->repr->src_vsi; + vsi = repr->src_vsi; ice_update_vsi_stats(vsi); eth_stats = &vsi->eth_stats; @@ -76,7 +86,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) * ice_netdev_to_repr - Get port representor for given netdevice * @netdev: pointer to port representor netdev */ -struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) +struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev) { struct ice_netdev_priv *np = netdev_priv(netdev); @@ -84,7 +94,7 @@ struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) } /** - * ice_repr_open - Enable port representor's network interface + * ice_repr_vf_open - Enable port representor's network interface * @netdev: network interface device structure * * The open entry point is called when a port representor's network @@ -93,7 +103,7 @@ struct ice_repr *ice_netdev_to_repr(struct net_device *netdev) * * Returns 0 on success */ -static int ice_repr_open(struct net_device *netdev) +static int ice_repr_vf_open(struct net_device *netdev) { struct ice_repr *repr = ice_netdev_to_repr(netdev); struct ice_vf *vf; @@ -109,8 +119,16 @@ static int ice_repr_open(struct net_device *netdev) return 0; } +static int ice_repr_sf_open(struct net_device *netdev) +{ + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); + + return 0; +} + /** - * ice_repr_stop - Disable port representor's network interface + * ice_repr_vf_stop - Disable port representor's network interface * @netdev: network interface device structure * * The stop entry point is called when a port representor's network @@ -119,7 +137,7 @@ static int ice_repr_open(struct net_device *netdev) * * Returns 0 on success */ -static int ice_repr_stop(struct net_device *netdev) +static int ice_repr_vf_stop(struct net_device *netdev) { struct ice_repr *repr = ice_netdev_to_repr(netdev); struct ice_vf *vf; @@ -135,42 +153,47 @@ static int ice_repr_stop(struct net_device *netdev) return 0; } +static int ice_repr_sf_stop(struct net_device *netdev) +{ + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + /** * ice_repr_sp_stats64 - get slow path stats for port representor * @dev: network interface device structure * @stats: netlink stats structure - * - * RX/TX stats are being swapped here to be consistent with VF stats. In slow - * path, port representor receives data when the corresponding VF is sending it - * (and vice versa), TX and RX bytes/packets are effectively swapped on port - * representor. */ static int ice_repr_sp_stats64(const struct net_device *dev, struct rtnl_link_stats64 *stats) { - struct ice_netdev_priv *np = netdev_priv(dev); - int vf_id = np->repr->vf->vf_id; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; - u64 pkts, bytes; - - tx_ring = np->vsi->tx_rings[vf_id]; - ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp, - tx_ring->ring_stats->stats, - &pkts, &bytes); - stats->rx_packets = pkts; - stats->rx_bytes = bytes; - - rx_ring = np->vsi->rx_rings[vf_id]; - ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp, - rx_ring->ring_stats->stats, - &pkts, &bytes); - stats->tx_packets = pkts; - stats->tx_bytes = bytes; - stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed + - rx_ring->ring_stats->rx_stats.alloc_buf_failed; - + struct ice_repr *repr = ice_netdev_to_repr(dev); + int i; + + for_each_possible_cpu(i) { + u64 tbytes, tpkts, tdrops, rbytes, rpkts; + struct ice_repr_pcpu_stats *repr_stats; + unsigned int start; + + repr_stats = per_cpu_ptr(repr->stats, i); + do { + start = u64_stats_fetch_begin(&repr_stats->syncp); + tbytes = repr_stats->tx_bytes; + tpkts = repr_stats->tx_packets; + tdrops = repr_stats->tx_drops; + rbytes = repr_stats->rx_bytes; + rpkts = repr_stats->rx_packets; + } while (u64_stats_fetch_retry(&repr_stats->syncp, start)); + + stats->tx_bytes += tbytes; + stats->tx_packets += tpkts; + stats->tx_dropped += tdrops; + stats->rx_bytes += rbytes; + stats->rx_packets += rpkts; + } return 0; } @@ -196,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr, { switch (flower->command) { case FLOW_CLS_REPLACE: - return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower); + return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower, + true); case FLOW_CLS_DESTROY: return ice_del_cls_flower(repr->src_vsi, flower); default: @@ -239,11 +263,20 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type, } } -static const struct net_device_ops ice_repr_netdev_ops = { - .ndo_get_phys_port_name = ice_repr_get_phys_port_name, +static const struct net_device_ops ice_repr_vf_netdev_ops = { .ndo_get_stats64 = ice_repr_get_stats64, - .ndo_open = ice_repr_open, - .ndo_stop = ice_repr_stop, + .ndo_open = ice_repr_vf_open, + .ndo_stop = ice_repr_vf_stop, + .ndo_start_xmit = ice_eswitch_port_start_xmit, + .ndo_setup_tc = ice_repr_setup_tc, + .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, + .ndo_get_offload_stats = ice_repr_ndo_get_offload_stats, +}; + +static const struct net_device_ops ice_repr_sf_netdev_ops = { + .ndo_get_stats64 = ice_repr_get_stats64, + .ndo_open = ice_repr_sf_open, + .ndo_stop = ice_repr_sf_stop, .ndo_start_xmit = ice_eswitch_port_start_xmit, .ndo_setup_tc = ice_repr_setup_tc, .ndo_has_offload_stats = ice_repr_ndo_has_offload_stats, @@ -256,18 +289,20 @@ static const struct net_device_ops ice_repr_netdev_ops = { */ bool ice_is_port_repr_netdev(const struct net_device *netdev) { - return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops); + return netdev && (netdev->netdev_ops == &ice_repr_vf_netdev_ops || + netdev->netdev_ops == &ice_repr_sf_netdev_ops); } /** * ice_repr_reg_netdev - register port representor netdev * @netdev: pointer to port representor netdev + * @ops: new ops for netdev */ static int -ice_repr_reg_netdev(struct net_device *netdev) +ice_repr_reg_netdev(struct net_device *netdev, const struct net_device_ops *ops) { eth_hw_addr_random(netdev); - netdev->netdev_ops = &ice_repr_netdev_ops; + netdev->netdev_ops = ops; ice_set_ethtool_repr_ops(netdev); netdev->hw_features |= NETIF_F_HW_TC; @@ -278,60 +313,58 @@ ice_repr_reg_netdev(struct net_device *netdev) return register_netdev(netdev); } -static void ice_repr_remove_node(struct devlink_port *devlink_port) +static int ice_repr_ready_vf(struct ice_repr *repr) { - devl_lock(devlink_port->devlink); - devl_rate_leaf_destroy(devlink_port); - devl_unlock(devlink_port->devlink); + return !ice_check_vf_ready_for_cfg(repr->vf); +} + +static int ice_repr_ready_sf(struct ice_repr *repr) +{ + return !repr->sf->active; } /** - * ice_repr_rem - remove representor from VF + * ice_repr_destroy - remove representor from VF * @repr: pointer to representor structure */ -static void ice_repr_rem(struct ice_repr *repr) +void ice_repr_destroy(struct ice_repr *repr) { - kfree(repr->q_vector); + free_percpu(repr->stats); free_netdev(repr->netdev); kfree(repr); } -/** - * ice_repr_rem_vf - remove representor from VF - * @repr: pointer to representor structure - */ -void ice_repr_rem_vf(struct ice_repr *repr) +static void ice_repr_rem_vf(struct ice_repr *repr) { - ice_repr_remove_node(&repr->vf->devlink_port); + ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); + ice_pass_vf_tx_lldp(repr->src_vsi, true); unregister_netdev(repr->netdev); ice_devlink_destroy_vf_port(repr->vf); ice_virtchnl_set_dflt_ops(repr->vf); - ice_repr_rem(repr); } -static void ice_repr_set_tx_topology(struct ice_pf *pf) +static void ice_repr_rem_sf(struct ice_repr *repr) { - struct devlink *devlink; + unregister_netdev(repr->netdev); + ice_devlink_destroy_sf_port(repr->sf); +} +static void ice_repr_set_tx_topology(struct ice_pf *pf, struct devlink *devlink) +{ /* only export if ADQ and DCB disabled and eswitch enabled*/ if (ice_is_adq_active(pf) || ice_is_dcb_active(pf) || !ice_is_switchdev_running(pf)) return; - devlink = priv_to_devlink(pf); ice_devlink_rate_init_tx_topology(devlink, ice_get_main_vsi(pf)); } /** - * ice_repr_add - add representor for generic VSI - * @pf: pointer to PF structure + * ice_repr_create - add representor for generic VSI * @src_vsi: pointer to VSI structure of device to represent - * @parent_mac: device MAC address */ -static struct ice_repr * -ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) +static struct ice_repr *ice_repr_create(struct ice_vsi *src_vsi) { - struct ice_q_vector *q_vector; struct ice_netdev_priv *np; struct ice_repr *repr; int err; @@ -346,78 +379,153 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac) goto err_alloc; } + repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats); + if (!repr->stats) { + err = -ENOMEM; + goto err_stats; + } + repr->src_vsi = src_vsi; + repr->id = src_vsi->vsi_num; np = netdev_priv(repr->netdev); np->repr = repr; - q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); - if (!q_vector) { - err = -ENOMEM; - goto err_alloc_q_vector; - } - repr->q_vector = q_vector; - repr->q_id = repr->id; + repr->netdev->min_mtu = ETH_MIN_MTU; + repr->netdev->max_mtu = ICE_MAX_MTU; - ether_addr_copy(repr->parent_mac, parent_mac); + SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(src_vsi->back)); return repr; -err_alloc_q_vector: +err_stats: free_netdev(repr->netdev); err_alloc: kfree(repr); return ERR_PTR(err); } -struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) +static int ice_repr_add_vf(struct ice_repr *repr) { - struct ice_repr *repr; - struct ice_vsi *vsi; + struct ice_vf *vf = repr->vf; + struct devlink *devlink; int err; - vsi = ice_get_vf_vsi(vf); - if (!vsi) - return ERR_PTR(-ENOENT); - err = ice_devlink_create_vf_port(vf); if (err) - return ERR_PTR(err); + return err; - repr = ice_repr_add(vf->pf, vsi, vf->hw_lan_addr); - if (IS_ERR(repr)) { - err = PTR_ERR(repr); - goto err_repr_add; - } + SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); + err = ice_repr_reg_netdev(repr->netdev, &ice_repr_vf_netdev_ops); + if (err) + goto err_netdev; + + err = ice_drop_vf_tx_lldp(repr->src_vsi, true); + if (err) + goto err_drop_lldp; + + err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac); + if (err) + goto err_cfg_vsi; + + ice_virtchnl_set_repr_ops(vf); + + devlink = priv_to_devlink(vf->pf); + ice_repr_set_tx_topology(vf->pf, devlink); + + return 0; + +err_cfg_vsi: + ice_pass_vf_tx_lldp(repr->src_vsi, true); +err_drop_lldp: + unregister_netdev(repr->netdev); +err_netdev: + ice_devlink_destroy_vf_port(vf); + return err; +} +/** + * ice_repr_create_vf - add representor for VF VSI + * @vf: VF to create port representor on + * + * Set correct representor type for VF and functions pointer. + * + * Return: created port representor on success, error otherwise + */ +struct ice_repr *ice_repr_create_vf(struct ice_vf *vf) +{ + struct ice_vsi *vsi = ice_get_vf_vsi(vf); + struct ice_repr *repr; + + if (!vsi) + return ERR_PTR(-EINVAL); + + repr = ice_repr_create(vsi); + if (IS_ERR(repr)) + return repr; + + repr->type = ICE_REPR_TYPE_VF; repr->vf = vf; + repr->ops.add = ice_repr_add_vf; + repr->ops.rem = ice_repr_rem_vf; + repr->ops.ready = ice_repr_ready_vf; - repr->netdev->min_mtu = ETH_MIN_MTU; - repr->netdev->max_mtu = ICE_MAX_MTU; + ether_addr_copy(repr->parent_mac, vf->hw_lan_addr); - SET_NETDEV_DEV(repr->netdev, ice_pf_to_dev(vf->pf)); - SET_NETDEV_DEVLINK_PORT(repr->netdev, &vf->devlink_port); - err = ice_repr_reg_netdev(repr->netdev); + return repr; +} + +static int ice_repr_add_sf(struct ice_repr *repr) +{ + struct ice_dynamic_port *sf = repr->sf; + int err; + + err = ice_devlink_create_sf_port(sf); + if (err) + return err; + + SET_NETDEV_DEVLINK_PORT(repr->netdev, &sf->devlink_port); + err = ice_repr_reg_netdev(repr->netdev, &ice_repr_sf_netdev_ops); if (err) goto err_netdev; - ice_virtchnl_set_repr_ops(vf); - ice_repr_set_tx_topology(vf->pf); + ice_repr_set_tx_topology(sf->vsi->back, priv_to_devlink(sf->vsi->back)); - return repr; + return 0; err_netdev: - ice_repr_rem(repr); -err_repr_add: - ice_devlink_destroy_vf_port(vf); - return ERR_PTR(err); + ice_devlink_destroy_sf_port(sf); + return err; } -struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) +/** + * ice_repr_create_sf - add representor for SF VSI + * @sf: SF to create port representor on + * + * Set correct representor type for SF and functions pointer. + * + * Return: created port representor on success, error otherwise + */ +struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf) { - if (!vsi->vf) - return NULL; + struct ice_repr *repr = ice_repr_create(sf->vsi); + + if (IS_ERR(repr)) + return repr; + + repr->type = ICE_REPR_TYPE_SF; + repr->sf = sf; + repr->ops.add = ice_repr_add_sf; + repr->ops.rem = ice_repr_rem_sf; + repr->ops.ready = ice_repr_ready_sf; - return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); + ether_addr_copy(repr->parent_mac, sf->hw_addr); + + return repr; +} + +struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id) +{ + return xa_load(&pf->eswitch.reprs, id); } /** @@ -439,15 +547,3 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr) netif_carrier_off(repr->netdev); netif_tx_stop_all_queues(repr->netdev); } - -/** - * ice_repr_set_traffic_vsi - set traffic VSI for port representor - * @repr: repr on with VSI will be set - * @vsi: pointer to VSI that will be used by port representor to pass traffic - */ -void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi) -{ - struct ice_netdev_priv *np = netdev_priv(repr->netdev); - - np->vsi = vsi; -} diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index f9aede315716..35bd93165e1e 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -6,32 +6,53 @@ #include <net/dst_metadata.h> +struct ice_repr_pcpu_stats { + struct u64_stats_sync syncp; + u64 rx_packets; + u64 rx_bytes; + u64 tx_packets; + u64 tx_bytes; + u64 tx_drops; +}; + +enum ice_repr_type { + ICE_REPR_TYPE_VF, + ICE_REPR_TYPE_SF, +}; + struct ice_repr { struct ice_vsi *src_vsi; - struct ice_vf *vf; - struct ice_q_vector *q_vector; struct net_device *netdev; struct metadata_dst *dst; struct ice_esw_br_port *br_port; - int q_id; + struct ice_repr_pcpu_stats __percpu *stats; u32 id; u8 parent_mac[ETH_ALEN]; -#ifdef CONFIG_ICE_SWITCHDEV - /* info about slow path rule */ - struct ice_rule_query_data sp_rule; -#endif + enum ice_repr_type type; + union { + struct ice_vf *vf; + struct ice_dynamic_port *sf; + }; + struct { + int (*add)(struct ice_repr *repr); + void (*rem)(struct ice_repr *repr); + int (*ready)(struct ice_repr *repr); + } ops; }; -struct ice_repr *ice_repr_add_vf(struct ice_vf *vf); -void ice_repr_rem_vf(struct ice_repr *repr); +struct ice_repr *ice_repr_create_vf(struct ice_vf *vf); +struct ice_repr *ice_repr_create_sf(struct ice_dynamic_port *sf); + +void ice_repr_destroy(struct ice_repr *repr); void ice_repr_start_tx_queues(struct ice_repr *repr); void ice_repr_stop_tx_queues(struct ice_repr *repr); -void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi); - -struct ice_repr *ice_netdev_to_repr(struct net_device *netdev); +struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); -struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); +void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, + int xmit_status); +void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); +struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h index ead75fe2bcda..183dd5457d6a 100644 --- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h @@ -46,11 +46,10 @@ struct ice_sbq_evt_desc { u8 data[24]; }; -enum ice_sbq_msg_dev { - rmn_0 = 0x02, - rmn_1 = 0x03, - rmn_2 = 0x04, - cgu = 0x06 +enum ice_sbq_dev_id { + ice_sbq_dev_phy_0 = 0x02, + ice_sbq_dev_cgu = 0x06, + ice_sbq_dev_phy_0_peer = 0x0D, }; enum ice_sbq_msg_opcode { diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index a1525992d14b..6ca13c5dcb14 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -28,9 +28,8 @@ ice_sched_add_root_node(struct ice_port_info *pi, if (!root) return -ENOMEM; - /* coverity[suspicious_sizeof] */ root->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[0], - sizeof(*root), GFP_KERNEL); + sizeof(*root->children), GFP_KERNEL); if (!root->children) { devm_kfree(ice_hw_to_dev(hw), root); return -ENOMEM; @@ -186,10 +185,9 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer, if (!node) return -ENOMEM; if (hw->max_children[layer]) { - /* coverity[suspicious_sizeof] */ node->children = devm_kcalloc(ice_hw_to_dev(hw), hw->max_children[layer], - sizeof(*node), GFP_KERNEL); + sizeof(*node->children), GFP_KERNEL); if (!node->children) { devm_kfree(ice_hw_to_dev(hw), node); return -ENOMEM; @@ -1128,12 +1126,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * 5 or less sw_entry_point_layer */ /* calculate the VSI layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET; + else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) + /* qgroup and VSI layers are same */ + return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET; return hw->sw_entry_point_layer; } @@ -1150,13 +1147,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw) * 7 or less sw_entry_point_layer */ /* calculate the aggregator layer based on number of layers. */ - if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) { - u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; - - if (layer > hw->sw_entry_point_layer) - return layer; - } - return hw->sw_entry_point_layer; + if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) + return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET; + else + return hw->sw_entry_point_layer; } /** @@ -1510,10 +1504,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, { struct ice_sched_node *vsi_node, *qgrp_node; struct ice_vsi_ctx *vsi_ctx; + u8 qgrp_layer, vsi_layer; u16 max_children; - u8 qgrp_layer; qgrp_layer = ice_sched_get_qgrp_layer(pi->hw); + vsi_layer = ice_sched_get_vsi_layer(pi->hw); max_children = pi->hw->max_children[qgrp_layer]; vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); @@ -1524,6 +1519,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, if (!vsi_node) return NULL; + /* If the queue group and VSI layer are same then queues + * are all attached directly to VSI + */ + if (qgrp_layer == vsi_layer) + return vsi_node; + /* get the first queue group node from VSI sub-tree */ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer); while (qgrp_node) { @@ -3199,7 +3200,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, u8 profile_type; int status; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (!pi || layer_num >= pi->hw->num_tx_sched_layers) return NULL; switch (rl_type) { case ICE_MIN_BW: @@ -3215,8 +3216,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi, return NULL; } - if (!pi) - return NULL; hw = pi->hw; list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], list_entry) @@ -3446,7 +3445,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type, struct ice_aqc_rl_profile_info *rl_prof_elem; int status = 0; - if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM) + if (layer_num >= pi->hw->num_tx_sched_layers) return -EINVAL; /* Check the existing list for RL profile */ list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num], diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 1aef05ea5a57..7b668083be07 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -6,6 +6,17 @@ #include "ice_common.h" +/** + * DOC: ice_sched.h + * + * This header file stores everything that is needed for broadly understood + * scheduler. It consists of defines related to layers, structures related to + * aggregator, functions declarations and others. + */ + +#define ICE_SCHED_5_LAYERS 5 +#define ICE_SCHED_9_LAYERS 9 + #define SCHED_NODE_NAME_MAX_LEN 32 #define ICE_QGRP_LAYER_OFFSET 2 diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c new file mode 100644 index 000000000000..1a2c94375ca7 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c @@ -0,0 +1,329 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Intel Corporation. */ +#include "ice.h" +#include "ice_lib.h" +#include "ice_txrx.h" +#include "ice_fltr.h" +#include "ice_sf_eth.h" +#include "devlink/devlink.h" +#include "devlink/port.h" + +static const struct net_device_ops ice_sf_netdev_ops = { + .ndo_open = ice_open, + .ndo_stop = ice_stop, + .ndo_start_xmit = ice_start_xmit, + .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, + .ndo_change_mtu = ice_change_mtu, + .ndo_get_stats64 = ice_get_stats64, + .ndo_tx_timeout = ice_tx_timeout, + .ndo_bpf = ice_xdp, + .ndo_xdp_xmit = ice_xdp_xmit, + .ndo_xsk_wakeup = ice_xsk_wakeup, +}; + +/** + * ice_sf_cfg_netdev - Allocate, configure and register a netdev + * @dyn_port: subfunction associated with configured netdev + * @devlink_port: subfunction devlink port to be linked with netdev + * + * Return: 0 on success, negative value on failure + */ +static int ice_sf_cfg_netdev(struct ice_dynamic_port *dyn_port, + struct devlink_port *devlink_port) +{ + struct ice_vsi *vsi = dyn_port->vsi; + struct ice_netdev_priv *np; + struct net_device *netdev; + int err; + + netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, + vsi->alloc_rxq); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev); + set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); + vsi->netdev = netdev; + np = netdev_priv(netdev); + np->vsi = vsi; + + ice_set_netdev_features(netdev); + + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_RX_SG; + netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; + + eth_hw_addr_set(netdev, dyn_port->hw_addr); + ether_addr_copy(netdev->perm_addr, dyn_port->hw_addr); + netdev->netdev_ops = &ice_sf_netdev_ops; + SET_NETDEV_DEVLINK_PORT(netdev, devlink_port); + + err = register_netdev(netdev); + if (err) { + free_netdev(netdev); + vsi->netdev = NULL; + return -ENOMEM; + } + set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + return 0; +} + +static void ice_sf_decfg_netdev(struct ice_vsi *vsi) +{ + unregister_netdev(vsi->netdev); + clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); + free_netdev(vsi->netdev); + vsi->netdev = NULL; + clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); +} + +/** + * ice_sf_dev_probe - subfunction driver probe function + * @adev: pointer to the auxiliary device + * @id: pointer to the auxiliary_device id + * + * Configure VSI and netdev resources for the subfunction device. + * + * Return: zero on success or an error code on failure. + */ +static int ice_sf_dev_probe(struct auxiliary_device *adev, + const struct auxiliary_device_id *id) +{ + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + struct ice_dynamic_port *dyn_port = sf_dev->dyn_port; + struct ice_vsi *vsi = dyn_port->vsi; + struct ice_pf *pf = dyn_port->pf; + struct device *dev = &adev->dev; + struct ice_sf_priv *priv; + struct devlink *devlink; + int err; + + vsi->type = ICE_VSI_SF; + vsi->port_info = pf->hw.port_info; + vsi->flags = ICE_VSI_FLAG_INIT; + + priv = ice_allocate_sf(&adev->dev, pf); + if (IS_ERR(priv)) { + dev_err(dev, "Subfunction devlink alloc failed"); + return PTR_ERR(priv); + } + + priv->dev = sf_dev; + sf_dev->priv = priv; + devlink = priv_to_devlink(priv); + + devl_lock(devlink); + + err = ice_vsi_cfg(vsi); + if (err) { + dev_err(dev, "Subfunction vsi config failed"); + goto err_free_devlink; + } + vsi->sf = dyn_port; + + ice_eswitch_update_repr(&dyn_port->repr_id, vsi); + + err = ice_devlink_create_sf_dev_port(sf_dev); + if (err) { + dev_err(dev, "Cannot add ice virtual devlink port for subfunction"); + goto err_vsi_decfg; + } + + err = ice_sf_cfg_netdev(dyn_port, &sf_dev->priv->devlink_port); + if (err) { + dev_err(dev, "Subfunction netdev config failed"); + goto err_devlink_destroy; + } + + err = devl_port_fn_devlink_set(&dyn_port->devlink_port, devlink); + if (err) { + dev_err(dev, "Can't link devlink instance to SF devlink port"); + goto err_netdev_decfg; + } + + ice_napi_add(vsi); + + devl_register(devlink); + devl_unlock(devlink); + + dyn_port->attached = true; + + return 0; + +err_netdev_decfg: + ice_sf_decfg_netdev(vsi); +err_devlink_destroy: + ice_devlink_destroy_sf_dev_port(sf_dev); +err_vsi_decfg: + ice_vsi_decfg(vsi); +err_free_devlink: + devl_unlock(devlink); + devlink_free(devlink); + return err; +} + +/** + * ice_sf_dev_remove - subfunction driver remove function + * @adev: pointer to the auxiliary device + * + * Deinitalize VSI and netdev resources for the subfunction device. + */ +static void ice_sf_dev_remove(struct auxiliary_device *adev) +{ + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + struct ice_dynamic_port *dyn_port = sf_dev->dyn_port; + struct ice_vsi *vsi = dyn_port->vsi; + struct devlink *devlink; + + devlink = priv_to_devlink(sf_dev->priv); + devl_lock(devlink); + + ice_vsi_close(vsi); + + ice_sf_decfg_netdev(vsi); + ice_devlink_destroy_sf_dev_port(sf_dev); + devl_unregister(devlink); + devl_unlock(devlink); + devlink_free(devlink); + ice_vsi_decfg(vsi); + + dyn_port->attached = false; +} + +static const struct auxiliary_device_id ice_sf_dev_id_table[] = { + { .name = "ice.sf", }, + { }, +}; + +MODULE_DEVICE_TABLE(auxiliary, ice_sf_dev_id_table); + +static struct auxiliary_driver ice_sf_driver = { + .name = "sf", + .probe = ice_sf_dev_probe, + .remove = ice_sf_dev_remove, + .id_table = ice_sf_dev_id_table +}; + +static DEFINE_XARRAY_ALLOC1(ice_sf_aux_id); + +/** + * ice_sf_driver_register - Register new auxiliary subfunction driver + * + * Return: zero on success or an error code on failure. + */ +int ice_sf_driver_register(void) +{ + return auxiliary_driver_register(&ice_sf_driver); +} + +/** + * ice_sf_driver_unregister - Unregister new auxiliary subfunction driver + * + */ +void ice_sf_driver_unregister(void) +{ + auxiliary_driver_unregister(&ice_sf_driver); +} + +/** + * ice_sf_dev_release - Release device associated with auxiliary device + * @device: pointer to the device + * + * Since most of the code for subfunction deactivation is handled in + * the remove handler, here just free tracking resources. + */ +static void ice_sf_dev_release(struct device *device) +{ + struct auxiliary_device *adev = to_auxiliary_dev(device); + struct ice_sf_dev *sf_dev = ice_adev_to_sf_dev(adev); + + xa_erase(&ice_sf_aux_id, adev->id); + kfree(sf_dev); +} + +/** + * ice_sf_eth_activate - Activate Ethernet subfunction port + * @dyn_port: the dynamic port instance for this subfunction + * @extack: extack for reporting error messages + * + * Activate the dynamic port as an Ethernet subfunction. Setup the netdev + * resources associated and initialize the auxiliary device. + * + * Return: zero on success or an error code on failure. + */ +int +ice_sf_eth_activate(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack) +{ + struct ice_pf *pf = dyn_port->pf; + struct ice_sf_dev *sf_dev; + struct pci_dev *pdev; + int err; + u32 id; + + err = xa_alloc(&ice_sf_aux_id, &id, NULL, xa_limit_32b, + GFP_KERNEL); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF ID"); + return err; + } + + sf_dev = kzalloc(sizeof(*sf_dev), GFP_KERNEL); + if (!sf_dev) { + err = -ENOMEM; + NL_SET_ERR_MSG_MOD(extack, "Could not allocate SF memory"); + goto xa_erase; + } + pdev = pf->pdev; + + sf_dev->dyn_port = dyn_port; + sf_dev->adev.id = id; + sf_dev->adev.name = "sf"; + sf_dev->adev.dev.release = ice_sf_dev_release; + sf_dev->adev.dev.parent = &pdev->dev; + + err = auxiliary_device_init(&sf_dev->adev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to initialize SF device"); + goto sf_dev_free; + } + + err = auxiliary_device_add(&sf_dev->adev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to add SF device"); + goto aux_dev_uninit; + } + + dyn_port->sf_dev = sf_dev; + + return 0; + +aux_dev_uninit: + auxiliary_device_uninit(&sf_dev->adev); +sf_dev_free: + kfree(sf_dev); +xa_erase: + xa_erase(&ice_sf_aux_id, id); + + return err; +} + +/** + * ice_sf_eth_deactivate - Deactivate Ethernet subfunction port + * @dyn_port: the dynamic port instance for this subfunction + * + * Deactivate the Ethernet subfunction, removing its auxiliary device and the + * associated resources. + */ +void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port) +{ + struct ice_sf_dev *sf_dev = dyn_port->sf_dev; + + auxiliary_device_delete(&sf_dev->adev); + auxiliary_device_uninit(&sf_dev->adev); +} diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.h b/drivers/net/ethernet/intel/ice/ice_sf_eth.h new file mode 100644 index 000000000000..c558cad0a183 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2024, Intel Corporation. */ + +#ifndef _ICE_SF_ETH_H_ +#define _ICE_SF_ETH_H_ + +#include <linux/auxiliary_bus.h> +#include "ice.h" + +struct ice_sf_dev { + struct auxiliary_device adev; + struct ice_dynamic_port *dyn_port; + struct ice_sf_priv *priv; +}; + +struct ice_sf_priv { + struct ice_sf_dev *dev; + struct devlink_port devlink_port; +}; + +static inline struct +ice_sf_dev *ice_adev_to_sf_dev(struct auxiliary_device *adev) +{ + return container_of(adev, struct ice_sf_dev, adev); +} + +int ice_sf_driver_register(void); +void ice_sf_driver_unregister(void); + +int ice_sf_eth_activate(struct ice_dynamic_port *dyn_port, + struct netlink_ext_ack *extack); +void ice_sf_eth_deactivate(struct ice_dynamic_port *dyn_port); +#endif /* _ICE_SF_ETH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c new file mode 100644 index 000000000000..3d7e96721cf9 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023, Intel Corporation. */ + +#include "ice_vsi_vlan_ops.h" +#include "ice_vsi_vlan_lib.h" +#include "ice_vlan_mode.h" +#include "ice.h" +#include "ice_sf_vsi_vlan_ops.h" + +void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi) +{ + struct ice_vsi_vlan_ops *vlan_ops; + + if (ice_is_dvm_ena(&vsi->back->hw)) + vlan_ops = &vsi->outer_vlan_ops; + else + vlan_ops = &vsi->inner_vlan_ops; + + vlan_ops->add_vlan = ice_vsi_add_vlan; + vlan_ops->del_vlan = ice_vsi_del_vlan; +} diff --git a/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h new file mode 100644 index 000000000000..8c44eafceea0 --- /dev/null +++ b/drivers/net/ethernet/intel/ice/ice_sf_vsi_vlan_ops.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023, Intel Corporation. */ + +#ifndef _ICE_SF_VSI_VLAN_OPS_H_ +#define _ICE_SF_VSI_VLAN_OPS_H_ + +#include "ice_vsi_vlan_ops.h" + +struct ice_vsi; + +void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi); + +#endif /* _ICE_SF_VSI_VLAN_OPS_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index a958fcf3e6be..0e4dc1a5cff0 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf) hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) { hash_del_rcu(&vf->entry); + ice_deinitialize_vf_entry(vf); ice_put_vf(vf); } } @@ -62,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf) if (vf->lan_vsi_idx != ICE_NO_VSI) { ice_vf_vsi_release(vf); vf->num_mac = 0; + vf->num_mac_lldp = 0; } last_vector_idx = vf->first_vector_idx + vf->num_msix - 1; @@ -123,27 +125,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) } /** - * ice_sriov_free_msix_res - Reset/free any used MSIX resources - * @pf: pointer to the PF structure - * - * Since no MSIX entries are taken from the pf->irq_tracker then just clear - * the pf->sriov_base_vector. - * - * Returns 0 on success, and -EINVAL on error. - */ -static int ice_sriov_free_msix_res(struct ice_pf *pf) -{ - if (!pf) - return -EINVAL; - - bitmap_free(pf->sriov_irq_bm); - pf->sriov_irq_size = 0; - pf->sriov_base_vector = 0; - - return 0; -} - -/** * ice_free_vfs - Free all VFs * @pf: pointer to the PF structure */ @@ -170,15 +151,14 @@ void ice_free_vfs(struct ice_pf *pf) else dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n"); - ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf)); - mutex_lock(&vfs->table_lock); ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); - ice_eswitch_detach(pf, vf); + ice_eswitch_detach_vf(pf, vf); ice_dis_vf_qs(vf); + ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) { /* disable VF qp mappings and set VF disable state */ @@ -195,15 +175,9 @@ void ice_free_vfs(struct ice_pf *pf) wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx)); } - /* clear malicious info since the VF is getting released */ - list_del(&vf->mbx_info.list_entry); - mutex_unlock(&vf->cfg_lock); } - if (ice_sriov_free_msix_res(pf)) - dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n"); - vfs->num_qps_per = 0; ice_free_vf_entries(pf); @@ -227,7 +201,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) struct ice_vsi *vsi; params.type = ICE_VSI_VF; - params.pi = ice_vf_get_port_info(vf); + params.port_info = ice_vf_get_port_info(vf); params.vf = vf; params.flags = ICE_VSI_FLAG_INIT; @@ -362,47 +336,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf) * @vf: VF to calculate the register index for * @q_vector: a q_vector associated to the VF */ -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) +void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector) { if (!vf || !q_vector) - return -EINVAL; + return; /* always add one to account for the OICR being the first MSIX */ - return vf->first_vector_idx + q_vector->v_idx + 1; -} - -/** - * ice_sriov_set_msix_res - Set any used MSIX resources - * @pf: pointer to PF structure - * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs - * - * This function allows SR-IOV resources to be taken from the end of the PF's - * allowed HW MSIX vectors so that the irq_tracker will not be affected. We - * just set the pf->sriov_base_vector and return success. - * - * If there are not enough resources available, return an error. This should - * always be caught by ice_set_per_vf_res(). - * - * Return 0 on success, and -EINVAL when there are not enough MSIX vectors - * in the PF's space available for SR-IOV. - */ -static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) -{ - u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; - int vectors_used = ice_get_max_used_msix_vector(pf); - int sriov_base_vector; - - sriov_base_vector = total_vectors - num_msix_needed; - - /* make sure we only grab irq_tracker entries from the list end and - * that we have enough available MSIX vectors - */ - if (sriov_base_vector < vectors_used) - return -EINVAL; - - pf->sriov_base_vector = sriov_base_vector; - - return 0; + q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF; + q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx; } /** @@ -429,11 +370,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed) */ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) { - int vectors_used = ice_get_max_used_msix_vector(pf); u16 num_msix_per_vf, num_txq, num_rxq, avail_qs; int msix_avail_per_vf, msix_avail_for_sriov; struct device *dev = ice_pf_to_dev(pf); - int err; lockdep_assert_held(&pf->vfs.table_lock); @@ -441,8 +380,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return -EINVAL; /* determine MSI-X resources per VF */ - msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors - - vectors_used; + msix_avail_for_sriov = pf->virt_irq_tracker.num_entries; msix_avail_per_vf = msix_avail_for_sriov / num_vfs; if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) { num_msix_per_vf = ICE_NUM_VF_MSIX_MED; @@ -481,13 +419,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) return -ENOSPC; } - err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs); - if (err) { - dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n", - num_vfs, err); - return err; - } - /* only allow equal Tx/Rx queue count (i.e. queue pairs) */ pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq); pf->vfs.num_msix_per = num_msix_per_vf; @@ -498,52 +429,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs) } /** - * ice_sriov_get_irqs - get irqs for SR-IOV usacase - * @pf: pointer to PF structure - * @needed: number of irqs to get - * - * This returns the first MSI-X vector index in PF space that is used by this - * VF. This index is used when accessing PF relative registers such as - * GLINT_VECT2FUNC and GLINT_DYN_CTL. - * This will always be the OICR index in the AVF driver so any functionality - * using vf->first_vector_idx for queue configuration_id: id of VF which will - * use this irqs - * - * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are - * allocated from the end of global irq index. First bit in sriov_irq_bm means - * last irq index etc. It simplifies extension of SRIOV vectors. - * They will be always located from sriov_base_vector to the last irq - * index. While increasing/decreasing sriov_base_vector can be moved. - */ -static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed) -{ - int res = bitmap_find_next_zero_area(pf->sriov_irq_bm, - pf->sriov_irq_size, 0, needed, 0); - /* conversion from number in bitmap to global irq index */ - int index = pf->sriov_irq_size - res - needed; - - if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector) - return -ENOENT; - - bitmap_set(pf->sriov_irq_bm, res, needed); - return index; -} - -/** - * ice_sriov_free_irqs - free irqs used by the VF - * @pf: pointer to PF structure - * @vf: pointer to VF structure - */ -static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf) -{ - /* Move back from first vector index to first index in bitmap */ - int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix; - - bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix); - vf->first_vector_idx = 0; -} - -/** * ice_init_vf_vsi_res - initialize/setup VF VSI resources * @vf: VF to initialize/setup the VSI for * @@ -556,7 +441,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf) struct ice_vsi *vsi; int err; - vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); if (vf->first_vector_idx < 0) return -ENOMEM; @@ -599,7 +484,7 @@ static int ice_start_vfs(struct ice_pf *pf) goto teardown; } - retval = ice_eswitch_attach(pf, vf); + retval = ice_eswitch_attach_vf(pf, vf); if (retval) { dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d", vf->vf_id, retval); @@ -833,11 +718,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs) pci_dev_get(vfdev); - /* set default number of MSI-X */ - vf->num_msix = pf->vfs.num_msix_per; - vf->num_vf_qs = pf->vfs.num_qps_per; - ice_vc_set_default_allowlist(vf); - hash_add_rcu(vfs->table, &vf->entry, vf_id); } @@ -861,16 +741,10 @@ err_free_entries: */ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) { - int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors; struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; int ret; - pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL); - if (!pf->sriov_irq_bm) - return -ENOMEM; - pf->sriov_irq_size = total_vectors; - /* Disable global interrupt 0 so we don't try to handle the VFLR. */ wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S); @@ -897,7 +771,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs) goto err_unroll_sriov; } - ice_eswitch_reserve_cp_queues(pf, num_vfs); ret = ice_start_vfs(pf); if (ret) { dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret); @@ -924,7 +797,6 @@ err_unroll_intr: /* rearm interrupts here */ ice_irq_dynamic_ena(hw, NULL, NULL); clear_bit(ICE_OICR_INTR_DIS, pf->state); - bitmap_free(pf->sriov_irq_bm); return ret; } @@ -998,16 +870,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev) { struct ice_pf *pf = pci_get_drvdata(pdev); - return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf); -} - -static int ice_sriov_move_base_vector(struct ice_pf *pf, int move) -{ - if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf)) - return -ENOMEM; - - pf->sriov_base_vector -= move; - return 0; + return pf->virt_irq_tracker.num_entries; } static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) @@ -1026,7 +889,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) continue; ice_dis_vf_mappings(tmp_vf); - ice_sriov_free_irqs(pf, tmp_vf); + ice_virt_free_irqs(pf, tmp_vf->first_vector_idx, + tmp_vf->num_msix); vf_ids[to_remap] = tmp_vf->vf_id; to_remap += 1; @@ -1038,7 +902,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id) continue; tmp_vf->first_vector_idx = - ice_sriov_get_irqs(pf, tmp_vf->num_msix); + ice_virt_get_irqs(pf, tmp_vf->num_msix); /* there is no need to rebuild VSI as we are only changing the * vector indexes not amount of MSI-X or queues */ @@ -1103,30 +967,30 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) return -ENOENT; vsi = ice_get_vf_vsi(vf); - if (!vsi) + if (!vsi) { + ice_put_vf(vf); return -ENOENT; + } prev_msix = vf->num_msix; prev_queues = vf->num_vf_qs; - if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) { - ice_put_vf(vf); - return -ENOSPC; - } - ice_dis_vf_mappings(vf); - ice_sriov_free_irqs(pf, vf); + ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix); /* Remap all VFs beside the one is now configured */ ice_sriov_remap_vectors(pf, vf->vf_id); vf->num_msix = msix_vec_count; vf->num_vf_qs = queues; - vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); if (vf->first_vector_idx < 0) goto unroll; - if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) { + vsi->req_txq = queues; + vsi->req_rxq = queues; + + if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) { /* Try to rebuild with previous values */ needs_rebuild = true; goto unroll; @@ -1148,13 +1012,18 @@ unroll: vf->num_msix = prev_msix; vf->num_vf_qs = prev_queues; - vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); - if (vf->first_vector_idx < 0) + + vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix); + if (vf->first_vector_idx < 0) { + ice_put_vf(vf); return -EINVAL; + } if (needs_rebuild) { - ice_vf_reconfig_vsi(vf); - ice_vf_init_host_cfg(vf, vsi); + vsi->req_txq = prev_queues; + vsi->req_rxq = prev_queues; + + ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); } ice_ena_vf_mappings(vf); @@ -1423,21 +1292,23 @@ out_put_vf: } /** - * ice_set_vf_mac - * @netdev: network interface device structure + * __ice_set_vf_mac - program VF MAC address + * @pf: PF to be configure * @vf_id: VF identifier * @mac: MAC address * * program VF MAC address + * Return: zero on success or an error code on failure */ -int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac) { - struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct device *dev; struct ice_vf *vf; int ret; + dev = ice_pf_to_dev(pf); if (is_multicast_ether_addr(mac)) { - netdev_err(netdev, "%pM not a valid unicast address\n", mac); + dev_err(dev, "%pM not a valid unicast address\n", mac); return -EINVAL; } @@ -1466,13 +1337,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) if (is_zero_ether_addr(mac)) { /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ vf->pf_set_mac = false; - netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", - vf->vf_id); + dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n", + vf->vf_id); } else { /* PF will add MAC rule for the VF */ vf->pf_set_mac = true; - netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", - mac, vf_id); + dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", + mac, vf_id); } ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); @@ -1484,6 +1355,20 @@ out_put_vf: } /** + * ice_set_vf_mac - .ndo_set_vf_mac handler + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: MAC address + * + * program VF MAC address + * Return: zero on success or an error code on failure + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac); +} + +/** * ice_set_vf_trust * @netdev: network interface device structure * @vf_id: VF identifier @@ -1518,6 +1403,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) mutex_lock(&vf->cfg_lock); + while (!trusted && vf->num_mac_lldp) + ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false); + vf->trusted = trusted; ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n", @@ -1869,6 +1757,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) } /** + * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event + * @vf: pointer to the VF structure + */ +void ice_print_vf_tx_mdd_event(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + struct device *dev; + + dev = ice_pf_to_dev(pf); + + dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n", + vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id, + vf->dev_lan_addr, + test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags) + ? "on" : "off"); +} + +/** * ice_print_vfs_mdd_events - print VFs malicious driver detect event * @pf: pointer to the PF structure * @@ -1876,8 +1782,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf) */ void ice_print_vfs_mdd_events(struct ice_pf *pf) { - struct device *dev = ice_pf_to_dev(pf); - struct ice_hw *hw = &pf->hw; struct ice_vf *vf; unsigned int bkt; @@ -1904,10 +1808,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf) if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) { vf->mdd_tx_events.last_printed = vf->mdd_tx_events.count; - - dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n", - vf->mdd_tx_events.count, hw->pf_id, vf->vf_id, - vf->dev_lan_addr); + ice_print_vf_tx_mdd_event(vf); } } mutex_unlock(&pf->vfs.table_lock); diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 8488df38b586..96549ca5c52c 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -28,6 +28,7 @@ #ifdef CONFIG_PCI_IOV void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); +int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); @@ -49,7 +50,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); -int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); +void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); int ice_get_vf_stats(struct net_device *netdev, int vf_id, @@ -58,6 +59,7 @@ void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event); void ice_print_vfs_mdd_events(struct ice_pf *pf); void ice_print_vf_rx_mdd_event(struct ice_vf *vf); +void ice_print_vf_tx_mdd_event(struct ice_vf *vf); bool ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto); u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev); @@ -69,6 +71,7 @@ static inline void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { } static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { } static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { } +static inline void ice_print_vf_tx_mdd_event(struct ice_vf *vf) { } static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { } static inline int @@ -79,6 +82,13 @@ ice_sriov_configure(struct pci_dev __always_unused *pdev, } static inline int +__ice_set_vf_mac(struct ice_pf __always_unused *pf, + u16 __always_unused vf_id, const u8 __always_unused *mac) +{ + return -EOPNOTSUPP; +} + +static inline int ice_set_vf_mac(struct net_device __always_unused *netdev, int __always_unused vf_id, u8 __always_unused *mac) { @@ -130,11 +140,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev, return -EOPNOTSUPP; } -static inline int +static inline void ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf, struct ice_q_vector __always_unused *q_vector) { - return 0; } static inline int diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index b4ea935e8300..9d9a7edd3618 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3,6 +3,7 @@ #include "ice_lib.h" #include "ice_switch.h" +#include "ice_trace.h" #define ICE_ETH_DA_OFFSET 0 #define ICE_ETH_ETHTYPE_OFFSET 12 @@ -42,6 +43,7 @@ enum { ICE_PKT_KMALLOC = BIT(9), ICE_PKT_PPPOE = BIT(10), ICE_PKT_L2TPV3 = BIT(11), + ICE_PKT_PFCP = BIT(12), }; struct ice_dummy_pkt_offsets { @@ -1110,6 +1112,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = { 0x00, 0x00, }; +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV4_OFOS, 14 }, + { ICE_UDP_ILOS, 34 }, + { ICE_PFCP, 42 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x08, 0x00, /* ICE_ETYPE_OL 12 */ + + 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */ + 0x00, 0x01, 0x00, 0x00, + 0x00, 0x11, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + +ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = { + { ICE_MAC_OFOS, 0 }, + { ICE_ETYPE_OL, 12 }, + { ICE_IPV6_OFOS, 14 }, + { ICE_UDP_ILOS, 54 }, + { ICE_PFCP, 62 }, + { ICE_PROTOCOL_LAST, 0 }, +}; + +ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = { + 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x86, 0xdd, /* ICE_ETYPE_OL 12 */ + + 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */ + 0x00, 0x10, 0x11, 0x00, /* Next header UDP */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */ + 0x00, 0x18, 0x00, 0x00, + + 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, /* 2 bytes for 4 byte alignment */ +}; + ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = { { ICE_MAC_OFOS, 0 }, { ICE_ETYPE_OL, 12 }, @@ -1343,6 +1416,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = { ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU), ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6), ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC), + ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6), + ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP), ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 | ICE_PKT_INNER_UDP), ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6), @@ -1397,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw) recps[i].root_rid = i; INIT_LIST_HEAD(&recps[i].filt_rules); INIT_LIST_HEAD(&recps[i].filt_replay_rules); - INIT_LIST_HEAD(&recps[i].rg_list); mutex_init(&recps[i].filt_rule_lock); } @@ -1825,7 +1899,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || - lkup_type == ICE_SW_LKUP_DFLT) { + lkup_type == ICE_SW_LKUP_DFLT || + lkup_type == ICE_SW_LKUP_LAST) { sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP); } else if (lkup_type == ICE_SW_LKUP_VLAN) { if (opc == ice_aqc_opc_alloc_res) @@ -1887,6 +1962,15 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) status = -ENOENT; + if (!status) { + if (opc == ice_aqc_opc_add_sw_rules) + hw->switch_info->rule_cnt += num_rules; + else if (opc == ice_aqc_opc_remove_sw_rules) + hw->switch_info->rule_cnt -= num_rules; + } + + trace_ice_aq_sw_rules(hw->switch_info); + return status; } @@ -2075,6 +2159,18 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, } /** + * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported + * @hw: pointer to the hardware structure + */ +void ice_init_chk_recipe_reuse_support(struct ice_hw *hw) +{ + struct ice_nvm_info *nvm = &hw->flash.nvm; + + hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) || + nvm->major > 0x4; +} + +/** * ice_alloc_recipe - add recipe resource * @hw: pointer to the hardware structure * @rid: recipe ID returned as response to AQ call @@ -2083,21 +2179,97 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) { DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); u16 buf_len = __struct_size(sw_buf); + u16 res_type; int status; sw_buf->num_elems = cpu_to_le16(1); - sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE << - ICE_AQC_RES_TYPE_S) | - ICE_AQC_RES_TYPE_FLAG_SHARED); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE); + if (hw->recp_reuse) + res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED; + else + res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED; + sw_buf->res_type = cpu_to_le16(res_type); status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, ice_aqc_opc_alloc_res); - if (!status) + if (!status) { *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); + hw->switch_info->recp_cnt++; + } + + return status; +} + +/** + * ice_free_recipe_res - free recipe resource + * @hw: pointer to the hardware structure + * @rid: recipe ID to free + * + * Return: 0 on success, and others on error + */ +static int ice_free_recipe_res(struct ice_hw *hw, u16 rid) +{ + int status; + + status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); + if (!status) + hw->switch_info->recp_cnt--; return status; } /** + * ice_release_recipe_res - disassociate and free recipe resource + * @hw: pointer to the hardware structure + * @recp: the recipe struct resource to unassociate and free + * + * Return: 0 on success, and others on error + */ +static int ice_release_recipe_res(struct ice_hw *hw, + struct ice_sw_recipe *recp) +{ + DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); + struct ice_switch_info *sw = hw->switch_info; + u64 recp_assoc; + u32 rid, prof; + int status; + + for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) { + for_each_set_bit(prof, recipe_to_profile[rid], + ICE_MAX_NUM_PROFILES) { + status = ice_aq_get_recipe_to_profile(hw, prof, + &recp_assoc, + NULL); + if (status) + return status; + + bitmap_from_arr64(r_bitmap, &recp_assoc, + ICE_MAX_NUM_RECIPES); + bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap, + ICE_MAX_NUM_RECIPES); + bitmap_to_arr64(&recp_assoc, r_bitmap, + ICE_MAX_NUM_RECIPES); + ice_aq_map_recipe_to_profile(hw, prof, + recp_assoc, NULL); + + clear_bit(rid, profile_to_recipe[prof]); + clear_bit(prof, recipe_to_profile[rid]); + } + + status = ice_free_recipe_res(hw, rid); + if (status) + return status; + + sw->recp_list[rid].recp_created = false; + sw->recp_list[rid].adv_rule = false; + memset(&sw->recp_list[rid].lkup_exts, 0, + sizeof(sw->recp_list[rid].lkup_exts)); + clear_bit(rid, recp->r_bitmap); + } + + return 0; +} + +/** * ice_get_recp_to_prof_map - updates recipe to profile mapping * @hw: pointer to hardware structure * @@ -2127,25 +2299,12 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) } /** - * ice_collect_result_idx - copy result index values - * @buf: buffer that contains the result index - * @recp: the recipe struct to copy data into - */ -static void -ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, - struct ice_sw_recipe *recp) -{ - if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) - set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, - recp->res_idxs); -} - -/** * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries * @hw: pointer to hardware structure * @recps: struct that we need to populate * @rid: recipe ID that we are populating * @refresh_required: true if we should get recipe to profile mapping from FW + * @is_add: flag of adding recipe * * This function is used to populate all the necessary entries into our * bookkeeping so that we have a current list of all the recipes that are @@ -2153,7 +2312,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, */ static int ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, - bool *refresh_required) + bool *refresh_required, bool is_add) { DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS); struct ice_aqc_recipe_data_elem *tmp; @@ -2197,18 +2356,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; - struct ice_recp_grp_entry *rg_entry; u8 i, prof, idx, prot = 0; bool is_root; u16 off = 0; - rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), - GFP_KERNEL); - if (!rg_entry) { - status = -ENOMEM; - goto err_unroll; - } - idx = root_bufs.recipe_indx; is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; @@ -2221,11 +2372,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, prof = find_first_bit(recipe_to_profile[idx], ICE_MAX_NUM_PROFILES); for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { - u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; - - rg_entry->fv_idx[i] = lkup_indx; - rg_entry->fv_mask[i] = - le16_to_cpu(root_bufs.content.mask[i + 1]); + u8 lkup_indx = root_bufs.content.lkup_indx[i]; + u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]); /* If the recipe is a chained recipe then all its * child recipe's result will have a result index. @@ -2236,42 +2384,38 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a * valid offset value. */ - if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || - rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || - rg_entry->fv_idx[i] == 0) + if (!lkup_indx || + (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) || + test_bit(lkup_indx, + hw->switch_info->prof_res_bm[prof])) continue; - ice_find_prot_off(hw, ICE_BLK_SW, prof, - rg_entry->fv_idx[i], &prot, &off); + ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx, + &prot, &off); lkup_exts->fv_words[fv_word_idx].prot_id = prot; lkup_exts->fv_words[fv_word_idx].off = off; - lkup_exts->field_mask[fv_word_idx] = - rg_entry->fv_mask[i]; + lkup_exts->field_mask[fv_word_idx] = lkup_mask; fv_word_idx++; } - /* populate rg_list with the data from the child entry of this - * recipe - */ - list_add(&rg_entry->l_entry, &recps[rid].rg_list); /* Propagate some data to the recipe database */ - recps[idx].is_root = !!is_root; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; - recps[idx].need_pass_l2 = root_bufs.content.act_ctrl & - ICE_AQ_RECIPE_ACT_NEED_PASS_L2; - recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl & - ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; + recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_NEED_PASS_L2); + recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl & + ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2); bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { - recps[idx].chain_idx = root_bufs.content.result_indx & - ~ICE_AQ_RECIPE_RESULT_EN; - set_bit(recps[idx].chain_idx, recps[idx].res_idxs); - } else { - recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; + set_bit(root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs); } - if (!is_root) + if (!is_root) { + if (hw->recp_reuse && is_add) + recps[idx].recp_created = true; + continue; + } /* Only do the following for root recipes entries */ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap, @@ -2283,19 +2427,11 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Complete initialization of the root recipe entry */ lkup_exts->n_val_words = fv_word_idx; - recps[rid].big_recp = (num_recps > 1); - recps[rid].n_grp_count = (u8)num_recps; - recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, - recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), - GFP_KERNEL); - if (!recps[rid].root_buf) { - status = -ENOMEM; - goto err_unroll; - } /* Copy result indexes */ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); - recps[rid].recp_created = true; + if (is_add) + recps[rid].recp_created = true; err_unroll: kfree(tmp); @@ -2446,6 +2582,9 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) fi->lan_en = true; } } + + if (fi->flag & ICE_FLTR_TX_ONLY) + fi->lan_en = false; } /** @@ -2759,7 +2898,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi, lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC || lkup_type == ICE_SW_LKUP_PROMISC || lkup_type == ICE_SW_LKUP_PROMISC_VLAN || - lkup_type == ICE_SW_LKUP_DFLT) + lkup_type == ICE_SW_LKUP_DFLT || + lkup_type == ICE_SW_LKUP_LAST) rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR : ICE_AQC_SW_RULES_T_VSI_LIST_SET; else if (lkup_type == ICE_SW_LKUP_VLAN) @@ -3006,7 +3146,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle_arr[2]; /* A rule already exists with the new VSI being added */ - if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id) + if (cur_fltr->vsi_handle == new_fltr->vsi_handle) return -EEXIST; vsi_handle_arr[0] = cur_fltr->vsi_handle; @@ -3054,7 +3194,7 @@ ice_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in @@ -3124,7 +3264,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle, list_head = &sw->recp_list[recp_id].filt_rules; list_for_each_entry(list_itr, list_head, list_entry) { - if (list_itr->vsi_list_info) { + if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) { map_info = list_itr->vsi_list_info; if (test_bit(vsi_handle, map_info->vsi_map)) { *vsi_list_id = map_info->vsi_list_id; @@ -3821,6 +3961,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set, } else if (f_info.flag & ICE_FLTR_TX) { f_info.src_id = ICE_SRC_ID_VSI; f_info.src = hw_vsi_id; + f_info.flag |= ICE_FLTR_TX_ONLY; } f_list_entry.fltr_info = f_info; @@ -4528,6 +4669,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = { ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6), ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22), ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14), + ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22), ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6), ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10), ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0), @@ -4561,6 +4703,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { { ICE_NVGRE, ICE_GRE_OF_HW }, { ICE_GTP, ICE_UDP_OF_HW }, { ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW }, + { ICE_PFCP, ICE_UDP_ILOS_HW }, { ICE_PPPOE, ICE_PPPOE_HW }, { ICE_L2TPV3, ICE_L2TPV3_HW }, { ICE_VLAN_EX, ICE_VLAN_OF_HW }, @@ -4573,12 +4716,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = { * @hw: pointer to the hardware structure * @lkup_exts: extension sequence to match * @rinfo: information regarding the rule e.g. priority and action info + * @is_add: flag of adding recipe * * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found. */ static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, - const struct ice_adv_rule_info *rinfo) + const struct ice_adv_rule_info *rinfo, bool is_add) { bool refresh_required = true; struct ice_sw_recipe *recp; @@ -4592,16 +4736,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, * entry update it in our SW bookkeeping and continue with the * matching. */ - if (!recp[i].recp_created) + if (hw->recp_reuse) { if (ice_get_recp_frm_fw(hw, hw->switch_info->recp_list, i, - &refresh_required)) + &refresh_required, is_add)) continue; - - /* Skip inverse action recipes */ - if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & - ICE_AQ_RECIPE_ACT_INV_ACT) - continue; + } /* if number of words we are looking for match */ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { @@ -4644,7 +4784,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, */ if (found && recp[i].tun_type == rinfo->tun_type && recp[i].need_pass_l2 == rinfo->need_pass_l2 && - recp[i].allow_pass_l2 == rinfo->allow_pass_l2) + recp[i].allow_pass_l2 == rinfo->allow_pass_l2 && + recp[i].priority == rinfo->priority) return i; /* Return the recipe ID */ } } @@ -4727,110 +4868,55 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, } /** - * ice_create_first_fit_recp_def - Create a recipe grouping - * @hw: pointer to the hardware structure - * @lkup_exts: an array of protocol header extractions - * @rg_list: pointer to a list that stores new recipe groups - * @recp_cnt: pointer to a variable that stores returned number of recipe groups - * - * Using first fit algorithm, take all the words that are still not done - * and start grouping them in 4-word groups. Each group makes up one - * recipe. - */ -static int -ice_create_first_fit_recp_def(struct ice_hw *hw, - struct ice_prot_lkup_ext *lkup_exts, - struct list_head *rg_list, - u8 *recp_cnt) -{ - struct ice_pref_recipe_group *grp = NULL; - u8 j; - - *recp_cnt = 0; - - /* Walk through every word in the rule to check if it is not done. If so - * then this word needs to be part of a new recipe. - */ - for (j = 0; j < lkup_exts->n_val_words; j++) - if (!test_bit(j, lkup_exts->done)) { - if (!grp || - grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { - struct ice_recp_grp_entry *entry; - - entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*entry), - GFP_KERNEL); - if (!entry) - return -ENOMEM; - list_add(&entry->l_entry, rg_list); - grp = &entry->r_group; - (*recp_cnt)++; - } - - grp->pairs[grp->n_val_pairs].prot_id = - lkup_exts->fv_words[j].prot_id; - grp->pairs[grp->n_val_pairs].off = - lkup_exts->fv_words[j].off; - grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; - grp->n_val_pairs++; - } - - return 0; -} - -/** * ice_fill_fv_word_index - fill in the field vector indices for a recipe group * @hw: pointer to the hardware structure - * @fv_list: field vector with the extraction sequence information - * @rg_list: recipe groupings with protocol-offset pairs + * @rm: recipe management list entry * * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ static int -ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, - struct list_head *rg_list) +ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm) { struct ice_sw_fv_list_entry *fv; - struct ice_recp_grp_entry *rg; struct ice_fv_word *fv_ext; + u8 i; - if (list_empty(fv_list)) - return 0; + if (list_empty(&rm->fv_list)) + return -EINVAL; - fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, + fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry, list_entry); fv_ext = fv->fv_ptr->ew; - list_for_each_entry(rg, rg_list, l_entry) { - u8 i; - - for (i = 0; i < rg->r_group.n_val_pairs; i++) { - struct ice_fv_word *pr; - bool found = false; - u16 mask; - u8 j; - - pr = &rg->r_group.pairs[i]; - mask = rg->r_group.mask[i]; + /* Add switch id as the first word. */ + rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX; + rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK; + rm->n_ext_words++; - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv_ext[j].prot_id == pr->prot_id && - fv_ext[j].off == pr->off) { - found = true; + for (i = 1; i < rm->n_ext_words; i++) { + struct ice_fv_word *fv_word = &rm->ext_words[i - 1]; + u16 fv_mask = rm->word_masks[i - 1]; + bool found = false; + u8 j; - /* Store index of field vector */ - rg->fv_idx[i] = j; - rg->fv_mask[i] = mask; - break; - } + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) { + if (fv_ext[j].prot_id == fv_word->prot_id && + fv_ext[j].off == fv_word->off) { + found = true; - /* Protocol/offset could not be found, caller gave an - * invalid pair - */ - if (!found) - return -EINVAL; + /* Store index of field vector */ + rm->fv_idx[i] = j; + rm->fv_mask[i] = fv_mask; + break; + } } + + /* Protocol/offset could not be found, caller gave an invalid + * pair. + */ + if (!found) + return -EINVAL; } return 0; @@ -4904,335 +4990,223 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, } /** - * ice_add_sw_recipe - function to call AQ calls to create switch recipe - * @hw: pointer to hardware structure - * @rm: recipe management list entry - * @profiles: bitmap of profiles that will be associated. + * ice_calc_recp_cnt - calculate number of recipes based on word count + * @word_cnt: number of lookup words + * + * Word count should include switch ID word and regular lookup words. + * Returns: number of recipes required to fit @word_cnt, including extra recipes + * needed for recipe chaining (if needed). */ -static int -ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, - unsigned long *profiles) +static int ice_calc_recp_cnt(u8 word_cnt) { - DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); - struct ice_aqc_recipe_content *content; - struct ice_aqc_recipe_data_elem *tmp; - struct ice_aqc_recipe_data_elem *buf; - struct ice_recp_grp_entry *entry; - u16 free_res_idx; - u16 recipe_count; - u8 chain_idx; - u8 recps = 0; - int status; + /* All words fit in a single recipe, no need for chaining. */ + if (word_cnt <= ICE_NUM_WORDS_RECIPE) + return 1; - /* When more than one recipe are required, another recipe is needed to - * chain them together. Matching a tunnel metadata ID takes up one of - * the match fields in the chaining recipe reducing the number of - * chained recipes by one. + /* Recipe chaining required. Result indexes are fitted right after + * regular lookup words. In some cases a new recipe must be added in + * order to fit result indexes. + * + * While the word count increases, every 5 words an extra recipe needs + * to be added. However, by adding a recipe, one word for its result + * index must also be added, therefore every 4 words recipe count + * increases by 1. This calculation does not apply to word count == 1, + * which is handled above. */ - /* check number of free result indices */ - bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); - free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); + return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1); +} - ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", - free_res_idx, rm->n_grp_count); +static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid, + const struct ice_sw_recipe *rm) +{ + int i; - if (rm->n_grp_count > 1) { - if (rm->n_grp_count > free_res_idx) - return -ENOSPC; + recp->recipe_indx = rid; + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M; - rm->n_grp_count++; + for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { + recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; + recp->content.mask[i] = cpu_to_le16(0); } - if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) - return -ENOSPC; + set_bit(rid, (unsigned long *)recp->recipe_bitmap); + recp->content.act_ctrl_fwd_priority = rm->priority; - tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; + if (rm->need_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; - buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), - GFP_KERNEL); - if (!buf) { - status = -ENOMEM; - goto err_mem; - } - - bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); - recipe_count = ICE_MAX_NUM_RECIPES; - status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, - NULL); - if (status || recipe_count == 0) - goto err_unroll; + if (rm->allow_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; +} - /* Allocate the recipe resources, and configure them according to the - * match fields from protocol headers and extracted field vectors. - */ - chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); - list_for_each_entry(entry, &rm->rg_list, l_entry) { - u8 i; +static void bookkeep_recipe(struct ice_sw_recipe *recipe, + struct ice_aqc_recipe_data_elem *r, + const struct ice_sw_recipe *rm) +{ + memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap)); - status = ice_alloc_recipe(hw, &entry->rid); - if (status) - goto err_unroll; + recipe->priority = r->content.act_ctrl_fwd_priority; + recipe->tun_type = rm->tun_type; + recipe->need_pass_l2 = rm->need_pass_l2; + recipe->allow_pass_l2 = rm->allow_pass_l2; + recipe->recp_created = true; +} - content = &buf[recps].content; +/* For memcpy in ice_add_sw_recipe. */ +static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) == + sizeof_field(struct ice_sw_recipe, r_bitmap)); - /* Clear the result index of the located recipe, as this will be - * updated, if needed, later in the recipe creation process. - */ - tmp[0].content.result_indx = 0; +/** + * ice_add_sw_recipe - function to call AQ calls to create switch recipe + * @hw: pointer to hardware structure + * @rm: recipe management list entry + * @profiles: bitmap of profiles that will be associated. + */ +static int +ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, + unsigned long *profiles) +{ + struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL; + DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); + struct ice_aqc_recipe_data_elem *root; + struct ice_sw_recipe *recipe; + u16 free_res_idx, rid; + int lookup = 0; + int recp_cnt; + int status; + int word; + int i; - buf[recps] = tmp[0]; - buf[recps].recipe_indx = (u8)entry->rid; - /* if the recipe is a non-root recipe RID should be programmed - * as 0 for the rules to be applied correctly. - */ - content->rid = 0; - memset(&content->lkup_indx, 0, - sizeof(content->lkup_indx)); - - /* All recipes use look-up index 0 to match switch ID. */ - content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask - * to be 0 - */ - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - content->lkup_indx[i] = 0x80; - content->mask[i] = 0; - } + recp_cnt = ice_calc_recp_cnt(rm->n_ext_words); - for (i = 0; i < entry->r_group.n_val_pairs; i++) { - content->lkup_indx[i + 1] = entry->fv_idx[i]; - content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]); - } + bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); + bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); - if (rm->n_grp_count > 1) { - /* Checks to see if there really is a valid result index - * that can be used. - */ - if (chain_idx >= ICE_MAX_FV_WORDS) { - ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = -ENOSPC; - goto err_unroll; - } + /* Check number of free result indices */ + free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); - entry->chain_idx = chain_idx; - content->result_indx = - ICE_AQ_RECIPE_RESULT_EN | - FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, - chain_idx); - clear_bit(chain_idx, result_idx_bm); - chain_idx = find_first_bit(result_idx_bm, - ICE_MAX_FV_WORDS); - } + ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", + free_res_idx, recp_cnt); - /* fill recipe dependencies */ - bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, - ICE_MAX_NUM_RECIPES); - set_bit(buf[recps].recipe_indx, - (unsigned long *)buf[recps].recipe_bitmap); - content->act_ctrl_fwd_priority = rm->priority; + /* Last recipe doesn't need result index */ + if (recp_cnt - 1 > free_res_idx) + return -ENOSPC; - if (rm->need_pass_l2) - content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; + if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES) + return -E2BIG; - if (rm->allow_pass_l2) - content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; - recps++; - } + buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; - if (rm->n_grp_count == 1) { - rm->root_rid = buf[0].recipe_indx; - set_bit(buf[0].recipe_indx, rm->r_bitmap); - buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; - if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { - memcpy(buf[0].recipe_bitmap, rm->r_bitmap, - sizeof(buf[0].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; - } - /* Applicable only for ROOT_RECIPE, set the fwd_priority for - * the recipe which is getting created if specified - * by user. Usually any advanced switch filter, which results - * into new extraction sequence, ended up creating a new recipe - * of type ROOT and usually recipes are associated with profiles - * Switch rule referreing newly created recipe, needs to have - * either/or 'fwd' or 'join' priority, otherwise switch rule - * evaluation will not happen correctly. In other words, if - * switch rule to be evaluated on priority basis, then recipe - * needs to have priority, otherwise it will be evaluated last. - */ - buf[0].content.act_ctrl_fwd_priority = rm->priority; - } else { - struct ice_recp_grp_entry *last_chain_entry; - u16 rid, i; + /* Setup the non-root subrecipes. These do not contain lookups for other + * subrecipes results. Set associated recipe only to own recipe index. + * Each non-root subrecipe needs a free result index from FV. + * + * Note: only done if there is more than one recipe. + */ + for (i = 0; i < recp_cnt - 1; i++) { + struct ice_aqc_recipe_content *content; + u8 result_idx; - /* Allocate the last recipe that will chain the outcomes of the - * other recipes together - */ status = ice_alloc_recipe(hw, &rid); if (status) - goto err_unroll; + return status; - content = &buf[recps].content; + fill_recipe_template(&buf[i], rid, rm); - buf[recps].recipe_indx = (u8)rid; - content->rid = (u8)rid; - content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT; - /* the new entry created should also be part of rg_list to - * make sure we have complete recipe + result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + /* Check if there really is a valid result index that can be + * used. */ - last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*last_chain_entry), - GFP_KERNEL); - if (!last_chain_entry) { - status = -ENOMEM; - goto err_unroll; - } - last_chain_entry->rid = rid; - memset(&content->lkup_indx, 0, sizeof(content->lkup_indx)); - /* All recipes use look-up index 0 to match switch ID. */ - content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; - content->mask[i] = 0; + if (result_idx >= ICE_MAX_FV_WORDS) { + ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); + return -ENOSPC; } + clear_bit(result_idx, result_idx_bm); + + content = &buf[i].content; + content->result_indx = ICE_AQ_RECIPE_RESULT_EN | + FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, + result_idx); - i = 1; - /* update r_bitmap with the recp that is used for chaining */ + /* Set recipe association to be used for root recipe */ set_bit(rid, rm->r_bitmap); - /* this is the recipe that chains all the other recipes so it - * should not have a chaining ID to indicate the same - */ - last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; - list_for_each_entry(entry, &rm->rg_list, l_entry) { - last_chain_entry->fv_idx[i] = entry->chain_idx; - content->lkup_indx[i] = entry->chain_idx; - content->mask[i++] = cpu_to_le16(0xFFFF); - set_bit(entry->rid, rm->r_bitmap); - } - list_add(&last_chain_entry->l_entry, &rm->rg_list); - if (sizeof(buf[recps].recipe_bitmap) >= - sizeof(rm->r_bitmap)) { - memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, - sizeof(buf[recps].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; + + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE) { + content->lkup_indx[word] = rm->fv_idx[lookup]; + content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]); + + lookup++; + word++; } - content->act_ctrl_fwd_priority = rm->priority; - recps++; - rm->root_rid = (u8)rid; + recipe = &hw->switch_info->recp_list[rid]; + set_bit(result_idx, recipe->res_idxs); + bookkeep_recipe(recipe, &buf[i], rm); } - status = ice_acquire_change_lock(hw, ICE_RES_WRITE); - if (status) - goto err_unroll; - status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); - ice_release_change_lock(hw); + /* Setup the root recipe */ + status = ice_alloc_recipe(hw, &rid); if (status) - goto err_unroll; - - /* Every recipe that just got created add it to the recipe - * book keeping list - */ - list_for_each_entry(entry, &rm->rg_list, l_entry) { - struct ice_switch_info *sw = hw->switch_info; - bool is_root, idx_found = false; - struct ice_sw_recipe *recp; - u16 idx, buf_idx = 0; - - /* find buffer index for copying some data */ - for (idx = 0; idx < rm->n_grp_count; idx++) - if (buf[idx].recipe_indx == entry->rid) { - buf_idx = idx; - idx_found = true; - } + return status; - if (!idx_found) { - status = -EIO; - goto err_unroll; - } + recipe = &hw->switch_info->recp_list[rid]; + root = &buf[recp_cnt - 1]; + fill_recipe_template(root, rid, rm); - recp = &sw->recp_list[entry->rid]; - is_root = (rm->root_rid == entry->rid); - recp->is_root = is_root; + /* Set recipe association, use previously set bitmap and own rid */ + set_bit(rid, rm->r_bitmap); + memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap)); - recp->root_rid = entry->rid; - recp->big_recp = (is_root && rm->n_grp_count > 1); + /* For non-root recipes rid should be 0, for root it should be correct + * rid value ored with 0x80 (is root bit). + */ + root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT; - memcpy(&recp->ext_words, entry->r_group.pairs, - entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); + /* Fill remaining lookups in root recipe */ + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = rm->fv_idx[lookup]; + root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]); - memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, - sizeof(recp->r_bitmap)); + lookup++; + word++; + } - /* Copy non-result fv index values and masks to recipe. This - * call will also update the result recipe bitmask. + /* Fill result indexes as lookups */ + i = 0; + while (i < recp_cnt - 1 && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = buf[i].content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN; + root->content.mask[word] = cpu_to_le16(0xffff); + /* For bookkeeping, it is needed to mark FV index as used for + * intermediate result. */ - ice_collect_result_idx(&buf[buf_idx], recp); + set_bit(root->content.lkup_indx[word], recipe->res_idxs); - /* for non-root recipes, also copy to the root, this allows - * easier matching of a complete chained recipe - */ - if (!is_root) - ice_collect_result_idx(&buf[buf_idx], - &sw->recp_list[rm->root_rid]); - - recp->n_ext_words = entry->r_group.n_val_pairs; - recp->chain_idx = entry->chain_idx; - recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; - recp->n_grp_count = rm->n_grp_count; - recp->tun_type = rm->tun_type; - recp->need_pass_l2 = rm->need_pass_l2; - recp->allow_pass_l2 = rm->allow_pass_l2; - recp->recp_created = true; + i++; + word++; } - rm->root_buf = buf; - kfree(tmp); - return status; -err_unroll: -err_mem: - kfree(tmp); - devm_kfree(ice_hw_to_dev(hw), buf); - return status; -} + rm->root_rid = rid; + bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm); -/** - * ice_create_recipe_group - creates recipe group - * @hw: pointer to hardware structure - * @rm: recipe management list entry - * @lkup_exts: lookup elements - */ -static int -ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, - struct ice_prot_lkup_ext *lkup_exts) -{ - u8 recp_count = 0; - int status; - - rm->n_grp_count = 0; + /* Program the recipe */ + status = ice_acquire_change_lock(hw, ICE_RES_WRITE); + if (status) + return status; - /* Create recipes for words that are marked not done by packing them - * as best fit. - */ - status = ice_create_first_fit_recp_def(hw, lkup_exts, - &rm->rg_list, &recp_count); - if (!status) { - rm->n_grp_count += recp_count; - rm->n_ext_words = lkup_exts->n_val_words; - memcpy(&rm->ext_words, lkup_exts->fv_words, - sizeof(rm->ext_words)); - memcpy(rm->word_masks, lkup_exts->field_mask, - sizeof(rm->word_masks)); - } + status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL); + ice_release_change_lock(hw); + if (status) + return status; - return status; + return 0; } /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule @@ -5268,6 +5242,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, case ICE_SW_TUN_GTPC: prof_type = ICE_PROF_TUN_GTPC; break; + case ICE_SW_TUN_PFCP: + prof_type = ICE_PROF_TUN_PFCP; + break; case ICE_SW_TUN_AND_NON_TUN: default: prof_type = ICE_PROF_ALL; @@ -5278,6 +5255,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo, } /** + * ice_subscribe_recipe - subscribe to an existing recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + * + * Return: 0 on success, and others on error + */ +static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid) +{ + DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1); + u16 buf_len = __struct_size(sw_buf); + u16 res_type; + int status; + + /* Prepare buffer to allocate resource */ + sw_buf->num_elems = cpu_to_le16(1); + res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED | + ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL; + sw_buf->res_type = cpu_to_le16(res_type); + + sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid); + + status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, + ice_aqc_opc_alloc_res); + + return status; +} + +/** + * ice_subscribable_recp_shared - share an existing subscribable recipe + * @hw: pointer to the hardware structure + * @rid: recipe ID to subscribe to + */ +static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid) +{ + struct ice_sw_recipe *recps = hw->switch_info->recp_list; + u16 sub_rid; + + for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES) + ice_subscribe_recipe(hw, sub_rid); +} + +/** * ice_add_adv_recipe - Add an advanced recipe that is not part of the default * @hw: pointer to hardware structure * @lkups: lookup elements or match criteria for the advanced recipe, one @@ -5293,12 +5313,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); struct ice_prot_lkup_ext *lkup_exts; - struct ice_recp_grp_entry *r_entry; struct ice_sw_fv_list_entry *fvit; - struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; struct ice_sw_recipe *rm; int status = 0; + u16 rid_tmp; u8 i; if (!lkups_cnt) @@ -5336,7 +5355,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * headers being programmed. */ INIT_LIST_HEAD(&rm->fv_list); - INIT_LIST_HEAD(&rm->rg_list); /* Get bitmap of field vectors (profiles) that are compatible with the * rule request; only these will be searched in the subsequent call to @@ -5348,12 +5366,10 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; - /* Group match words into recipes using preferred recipe grouping - * criteria. - */ - status = ice_create_recipe_group(hw, rm, lkup_exts); - if (status) - goto err_unroll; + /* Copy FV words and masks from lkup_exts to recipe struct. */ + rm->n_ext_words = lkup_exts->n_val_words; + memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words)); + memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks)); /* set the recipe priority if specified */ rm->priority = (u8)rinfo->priority; @@ -5364,7 +5380,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, /* Find offsets from the field vector. Pick the first one for all the * recipes. */ - status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); + status = ice_fill_fv_word_index(hw, rm); if (status) goto err_unroll; @@ -5376,10 +5392,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, } /* Look for a recipe which matches our requested fv / mask list */ - *rid = ice_find_recp(hw, lkup_exts, rinfo); - if (*rid < ICE_MAX_NUM_RECIPES) + *rid = ice_find_recp(hw, lkup_exts, rinfo, true); + if (*rid < ICE_MAX_NUM_RECIPES) { /* Success if found a recipe that match the existing criteria */ + if (hw->recp_reuse) + ice_subscribable_recp_shared(hw, *rid); + goto err_unroll; + } rm->tun_type = rinfo->tun_type; /* Recipe we need does not exist, add a recipe */ @@ -5398,14 +5418,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id, &recp_assoc, NULL); if (status) - goto err_unroll; + goto err_free_recipe; bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES); bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) - goto err_unroll; + goto err_free_recipe; bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES); status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id, @@ -5413,7 +5433,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ice_release_change_lock(hw); if (status) - goto err_unroll; + goto err_free_recipe; /* Update profile to recipe bitmap array */ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap, @@ -5427,18 +5447,22 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, *rid = rm->root_rid; memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts, sizeof(*lkup_exts)); -err_unroll: - list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { - list_del(&r_entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), r_entry); + goto err_unroll; + +err_free_recipe: + if (hw->recp_reuse) { + for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) { + if (!ice_free_recipe_res(hw, rid_tmp)) + clear_bit(rid_tmp, rm->r_bitmap); + } } +err_unroll: list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { list_del(&fvit->list_entry); devm_kfree(ice_hw_to_dev(hw), fvit); } - devm_kfree(ice_hw_to_dev(hw), rm->root_buf); kfree(rm); err_free_lkup_exts: @@ -5552,6 +5576,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_SW_TUN_VXLAN: match |= ICE_PKT_TUN_UDP; break; + case ICE_SW_TUN_PFCP: + match |= ICE_PKT_PFCP; + break; default: break; } @@ -5692,6 +5719,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt, case ICE_GTP: len = sizeof(struct ice_udp_gtp_hdr); break; + case ICE_PFCP: + len = sizeof(struct ice_pfcp_hdr); + break; case ICE_PPPOE: len = sizeof(struct ice_pppoe_hdr); break; @@ -5948,7 +5978,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw, /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) - return 0; + return -EEXIST; /* Update the previously created VSI list set with * the new VSI ID passed in @@ -6293,8 +6323,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, if (!itr->vsi_list_info || !test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) continue; - /* Clearing it so that the logic can add it back */ - clear_bit(vsi_handle, itr->vsi_list_info->vsi_map); f_entry.fltr_info.vsi_handle = vsi_handle; f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; /* update the src in case it is VSI num */ @@ -6440,7 +6468,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, return -EIO; } - rid = ice_find_recp(hw, &lkup_exts, rinfo); + rid = ice_find_recp(hw, &lkup_exts, rinfo, false); /* If did not find a recipe that match the existing criteria */ if (rid == ICE_MAX_NUM_RECIPES) return -EINVAL; @@ -6484,14 +6512,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, ice_aqc_opc_remove_sw_rules, NULL); if (!status || status == -ENOENT) { struct ice_switch_info *sw = hw->switch_info; + struct ice_sw_recipe *r_list = sw->recp_list; mutex_lock(rule_lock); list_del(&list_elem->list_entry); devm_kfree(ice_hw_to_dev(hw), list_elem->lkups); devm_kfree(ice_hw_to_dev(hw), list_elem); mutex_unlock(rule_lock); - if (list_empty(&sw->recp_list[rid].filt_rules)) - sw->recp_list[rid].adv_rule = false; + if (list_empty(&r_list[rid].filt_rules)) { + r_list[rid].adv_rule = false; + + /* All rules for this recipe are now removed */ + if (hw->recp_reuse) + ice_release_recipe_res(hw, + &r_list[rid]); + } } kfree(s_rule); } diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index 89ffa1b51b5a..671d7a5f359f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -8,8 +8,9 @@ #define ICE_SW_CFG_MAX_BUF_LEN 2048 #define ICE_DFLT_VSI_INVAL 0xff -#define ICE_FLTR_RX BIT(0) -#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_RX BIT(0) +#define ICE_FLTR_TX BIT(1) +#define ICE_FLTR_TX_ONLY BIT(2) #define ICE_VSI_INVAL_ID 0xffff #define ICE_INVAL_Q_HANDLE 0xFFFF @@ -21,6 +22,8 @@ #define ICE_PROFID_IPV6_GTPC_NO_TEID 45 #define ICE_PROFID_IPV6_GTPU_TEID 46 #define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70 +#define ICE_PROFID_IPV4_PFCP_NODE 79 +#define ICE_PROFID_IPV6_PFCP_SESSION 82 #define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n)) #define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l)) @@ -213,7 +216,6 @@ struct ice_sw_recipe { /* For a chained recipe the root recipe is what should be used for * programming rules */ - u8 is_root; u8 root_rid; u8 recp_created; @@ -224,19 +226,8 @@ struct ice_sw_recipe { */ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS]; u16 word_masks[ICE_MAX_CHAIN_WORDS]; - - /* if this recipe is a collection of other recipe */ - u8 big_recp; - - /* if this recipe is part of another bigger recipe then chain index - * corresponding to this recipe - */ - u8 chain_idx; - - /* if this recipe is a collection of other recipe then count of other - * recipes and recipe IDs of those recipes - */ - u8 n_grp_count; + u8 fv_idx[ICE_MAX_CHAIN_WORDS]; + u16 fv_mask[ICE_MAX_CHAIN_WORDS]; /* Bit map specifying the IDs associated with this group of recipe */ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); @@ -269,10 +260,6 @@ struct ice_sw_recipe { u8 need_pass_l2:1; u8 allow_pass_l2:1; - struct list_head rg_list; - - /* AQ buffer associated with this recipe */ - struct ice_aqc_recipe_data_elem *root_buf; /* This struct saves the fv_words for a given lookup */ struct ice_prot_lkup_ext lkup_exts; }; @@ -429,5 +416,6 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc, int ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc, struct ice_sq_cd *cd); +void ice_init_chk_recipe_reuse_support(struct ice_hw *hw); #endif /* _ICE_SWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 688ccb0615ab..fb9ea7f8ef44 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -12,14 +12,11 @@ /** * ice_tc_count_lkups - determine lookup count for switch filter * @flags: TC-flower flags - * @headers: Pointer to TC flower filter header structure * @fltr: Pointer to outer TC filter structure * - * Determine lookup count based on TC flower input for switch filter. + * Return: lookup count based on TC flower input for a switch filter. */ -static int -ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, - struct ice_tc_flower_fltr *fltr) +static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr) { int lkups_cnt = 1; /* 0th lookup is metadata */ @@ -37,7 +34,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers, if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) lkups_cnt++; - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS) + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) + lkups_cnt++; + + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) lkups_cnt++; if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | @@ -140,6 +140,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type) return ICE_GTP; case TNL_GTPC: return ICE_GTP_NO_PAY; + case TNL_PFCP: + return ICE_PFCP; default: return 0; } @@ -159,6 +161,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type) return ICE_SW_TUN_GTPU; case TNL_GTPC: return ICE_SW_TUN_GTPC; + case TNL_PFCP: + return ICE_SW_TUN_PFCP; default: return ICE_NON_TUN; } @@ -221,8 +225,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } - if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS && - (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { + if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) { list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type); if (fltr->gtp_pdu_info_masks.pdu_type) { @@ -239,6 +242,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr, i++; } + if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) { + struct ice_pfcp_hdr *hdr_h, *hdr_m; + + hdr_h = &list[i].h_u.pfcp_hdr; + hdr_m = &list[i].m_u.pfcp_hdr; + list[i].type = ICE_PFCP; + + hdr_h->flags = fltr->pfcp_meta_keys.type; + hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01; + + hdr_h->seid = fltr->pfcp_meta_keys.seid; + hdr_m->seid = fltr->pfcp_meta_masks.seid; + + i++; + } + if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 | ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) { list[i].type = ice_proto_type_from_ipv4(false); @@ -374,8 +393,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags, if (tc_fltr->tunnel_type != TNL_LAST) { i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i); - headers = &tc_fltr->inner_headers; - inner = true; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (tc_fltr->tunnel_type != TNL_PFCP) { + headers = &tc_fltr->inner_headers; + inner = true; + } } if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) { @@ -629,6 +651,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev) */ if (netif_is_gtp(tunnel_dev)) return TNL_GTPU; + if (netif_is_pfcp(tunnel_dev)) + return TNL_PFCP; return TNL_LAST; } @@ -642,35 +666,41 @@ static bool ice_tc_is_dev_uplink(struct net_device *dev) return netif_is_ice(dev) || ice_is_tunnel_supported(dev); } -static int ice_tc_setup_redirect_action(struct net_device *filter_dev, - struct ice_tc_flower_fltr *fltr, - struct net_device *target_dev) +static int ice_tc_setup_action(struct net_device *filter_dev, + struct ice_tc_flower_fltr *fltr, + struct net_device *target_dev, + enum ice_sw_fwd_act_type action) { struct ice_repr *repr; - fltr->action.fltr_act = ICE_FWD_TO_VSI; + if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided"); + return -EINVAL; + } + + fltr->action.fltr_act = action; if (ice_is_port_repr_netdev(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { repr = ice_netdev_to_repr(target_dev); fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_is_port_repr_netdev(filter_dev) && - ice_tc_is_dev_uplink(target_dev)) { + ice_tc_is_dev_uplink(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { repr = ice_netdev_to_repr(filter_dev); fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; } else if (ice_tc_is_dev_uplink(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { + ice_is_port_repr_netdev(target_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS) { repr = ice_netdev_to_repr(target_dev); fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; } else { NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); + "The action is not supported for this netdevice"); return -EINVAL; } @@ -683,48 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev, { fltr->action.fltr_act = ICE_DROP_PACKET; - if (ice_is_port_repr_netdev(filter_dev)) { - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_tc_is_dev_uplink(filter_dev)) { - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else { + if (!ice_tc_is_dev_uplink(filter_dev) && + !(ice_is_port_repr_netdev(filter_dev) && + fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) { NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); - return -EINVAL; - } - - return 0; -} - -static int ice_tc_setup_mirror_action(struct net_device *filter_dev, - struct ice_tc_flower_fltr *fltr, - struct net_device *target_dev) -{ - struct ice_repr *repr; - - fltr->action.fltr_act = ICE_MIRROR_PACKET; - - if (ice_is_port_repr_netdev(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { - repr = ice_netdev_to_repr(target_dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_is_port_repr_netdev(filter_dev) && - ice_tc_is_dev_uplink(target_dev)) { - repr = ice_netdev_to_repr(filter_dev); - - fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi; - fltr->direction = ICE_ESWITCH_FLTR_EGRESS; - } else if (ice_tc_is_dev_uplink(filter_dev) && - ice_is_port_repr_netdev(target_dev)) { - repr = ice_netdev_to_repr(target_dev); - - fltr->dest_vsi = repr->src_vsi; - fltr->direction = ICE_ESWITCH_FLTR_INGRESS; - } else { - NL_SET_ERR_MSG_MOD(fltr->extack, - "Unsupported netdevice in switchdev mode"); + "The action is not supported for this netdevice"); return -EINVAL; } @@ -746,16 +739,19 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, break; case FLOW_ACTION_REDIRECT: - err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev); + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_FWD_TO_VSI); if (err) return err; break; case FLOW_ACTION_MIRRED: - err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev); + err = ice_tc_setup_action(filter_dev, fltr, + act->dev, ICE_MIRROR_PACKET); if (err) return err; + break; default: @@ -766,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev, return 0; } +static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr) +{ + return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP); +} + +static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + if (!ice_is_switchdev_running(vsi->back)) + return false; + + uplink = vsi->back->eswitch.uplink_vsi; + return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET && + ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID; +} + +static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr) +{ + struct ice_vsi *vsi = fltr->src_vsi, *uplink; + + uplink = vsi->back->eswitch.uplink_vsi; + return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) && + fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + fltr->dest_vsi == uplink; +} + +static struct ice_tc_flower_fltr * +ice_find_pf_tx_lldp_fltr(struct ice_pf *pf) +{ + struct ice_tc_flower_fltr *fltr; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) + if (ice_is_fltr_pf_tx_lldp(fltr)) + return fltr; + + return NULL; +} + +static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf) +{ + struct ice_vf *vf; + unsigned int bkt; + + ice_for_each_vf(pf, bkt, vf) + if (vf->lldp_tx_ena) + return true; + + return false; +} + +int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit) +{ + struct ice_rule_query_data remove_entry = { + .rid = vsi->vf->lldp_recipe_id, + .rule_id = vsi->vf->lldp_rule_id, + .vsi_handle = vsi->idx, + }; + struct ice_pf *pf = vsi->back; + int err; + + if (vsi->vf->lldp_tx_ena) + return 0; + + if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back)) + return -EINVAL; + + if (!deinit && ice_any_vf_lldp_tx_ena(pf)) + return -EINVAL; + + err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry); + if (!err) + vsi->vf->lldp_tx_ena = true; + + return err; +} + +int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init) +{ + struct ice_rule_query_data rule_added; + struct ice_adv_rule_info rinfo = { + .priority = 7, + .src_vsi = vsi->idx, + .sw_act = { + .src = vsi->idx, + .flag = ICE_FLTR_TX, + .fltr_act = ICE_DROP_PACKET, + .vsi_handle = vsi->idx, + }, + .flags_info.act_valid = true, + }; + struct ice_adv_lkup_elem list[3]; + struct ice_pf *pf = vsi->back; + int err; + + if (!init && !vsi->vf->lldp_tx_ena) + return 0; + + memset(list, 0, sizeof(list)); + ice_rule_add_direction_metadata(&list[0]); + ice_rule_add_src_vsi_metadata(&list[1]); + list[2].type = ICE_ETYPE_OL; + list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP); + list[2].m_u.ethertype.ethtype_id = htons(0xFFFF); + + err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo, + &rule_added); + if (err) { + dev_err(&pf->pdev->dev, + "Failed to add an LLDP rule to VSI 0x%X: %d\n", + vsi->idx, err); + } else { + vsi->vf->lldp_recipe_id = rule_added.rid; + vsi->vf->lldp_rule_id = rule_added.rule_id; + vsi->vf->lldp_tx_ena = false; + } + + return err; +} + +static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi) +{ + struct ice_tc_flower_fltr *fltr; + struct ice_pf *pf = vsi->back; + + hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) { + if (!ice_is_fltr_vf_tx_lldp(fltr)) + continue; + ice_pass_vf_tx_lldp(fltr->src_vsi, true); + break; + } +} + +static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf) +{ + int i; + + /* Make the VF LLDP fwd to uplink rule dormant */ + ice_for_each_vsi(pf, i) { + struct ice_vsi *vf_vsi = pf->vsi[i]; + + if (vf_vsi && vf_vsi->type == ICE_VSI_VF) + ice_drop_vf_tx_lldp(vf_vsi, false); + } +} + static int ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct ice_adv_rule_info rule_info = { 0 }; struct ice_rule_query_data rule_added; struct ice_hw *hw = &vsi->back->hw; @@ -784,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) return -EOPNOTSUPP; } - lkups_cnt = ice_tc_count_lkups(flags, headers, fltr); + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_pass_vf_tx_lldp(vsi, false); + + lkups_cnt = ice_tc_count_lkups(flags, fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; @@ -813,11 +959,27 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) rule_info.sw_act.src = hw->pf_id; rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE; } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && + !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) { + /* PF to Uplink */ + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS && fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) { /* VF to Uplink */ rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.src = vsi->idx; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; + /* This is a specific case. The destination VSI index is + * overwritten by the source VSI index. This type of filter + * should allow the packet to go to the LAN, not to the + * VSI passed here. It should set LAN_EN bit only. However, + * the VSI must be a valid one. Setting source VSI index + * here is safe. Even if the result from switch is set LAN_EN + * and LB_EN (which normally will pass the packet to this VSI) + * packet won't be seen on the VSI, because local loopback is + * turned off. + */ + rule_info.sw_act.vsi_handle = vsi->idx; } else { /* VF to VF */ rule_info.sw_act.flag |= ICE_FLTR_TX; @@ -834,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error"); goto exit; } + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_add_pf_lldp_drop_rule(vsi); + /* store the output params, which are needed later for removing * advanced switch filter */ @@ -973,7 +1141,6 @@ static int ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr) { - struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers; struct ice_adv_rule_info rule_info = {0}; struct ice_rule_query_data rule_added; struct ice_adv_lkup_elem *list; @@ -1009,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, return PTR_ERR(dest_vsi); } - lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr); + lkups_cnt = ice_tc_count_lkups(flags, tc_fltr); list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC); if (!list) return -ENOMEM; @@ -1044,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, tc_fltr->action.fwd.q.hw_queue, lkups_cnt); break; case ICE_DROP_PACKET: - rule_info.sw_act.flag |= ICE_FLTR_RX; - rule_info.sw_act.src = hw->pf_id; + if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) { + rule_info.sw_act.flag |= ICE_FLTR_TX; + rule_info.sw_act.src = vsi->idx; + } else { + rule_info.sw_act.flag |= ICE_FLTR_RX; + rule_info.sw_act.src = hw->pf_id; + } rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI; break; default: @@ -1059,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi, "Unable to add filter because it already exist"); ret = -EINVAL; goto exit; + } else if (ret == -ENOSPC) { + NL_SET_ERR_MSG_MOD(tc_fltr->extack, + "Unable to add filter: insufficient space available."); + goto exit; } else if (ret) { NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter due to error"); @@ -1352,6 +1528,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, struct ice_tc_flower_fltr *fltr) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct netlink_ext_ack *extack = fltr->extack; struct flow_match_control enc_control; fltr->tunnel_type = ice_tc_tun_get_type(dev); @@ -1372,6 +1549,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, flow_rule_match_enc_control(rule, &enc_control); + if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack)) + return -EOPNOTSUPP; + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; @@ -1409,7 +1589,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, } } - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) { struct flow_match_enc_opts match; flow_rule_match_enc_opts(rule, &match); @@ -1420,7 +1601,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0], sizeof(struct gtp_pdu_session_info)); - fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS; + fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) && + fltr->tunnel_type == TNL_PFCP) { + struct flow_match_enc_opts match; + + flow_rule_match_enc_opts(rule, &match); + + memcpy(&fltr->pfcp_meta_keys, match.key->data, + sizeof(struct pfcp_metadata)); + memcpy(&fltr->pfcp_meta_masks, match.mask->data, + sizeof(struct pfcp_metadata)); + + fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS; } return 0; @@ -1432,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, * @filter_dev: Pointer to device on which filter is being added * @f: Pointer to struct flow_cls_offload * @fltr: Pointer to filter structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was parsed successfully, -EINVAL if the flower + * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured + * for the given VSI. */ static int ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr *fltr) + struct ice_tc_flower_fltr *fltr, bool ingress) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; struct flow_rule *rule = flow_cls_offload_flow_rule(f); @@ -1481,10 +1681,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return err; } - /* header pointers should point to the inner headers, outer - * header were already set by ice_parse_tunnel_attr - */ - headers = &fltr->inner_headers; + /* PFCP is considered non-tunneled - don't swap headers. */ + if (fltr->tunnel_type != TNL_PFCP) { + /* Header pointers should point to the inner headers, + * outer header were already set by + * ice_parse_tunnel_attr(). + */ + headers = &fltr->inner_headers; + } } else if (dissector->used_keys & (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | @@ -1516,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID; } + if (!ingress) { + bool switchdev = + ice_is_eswitch_mode_switchdev(vsi->back); + + if (switchdev != (n_proto_key == ETH_P_LLDP)) { + NL_SET_ERR_MSG_FMT_MOD(fltr->extack, + "%sLLDP filtering is not supported on egress in %s mode", + switchdev ? "Non-" : "", + switchdev ? "switchdev" : + "legacy"); + return -EOPNOTSUPP; + } + } + headers->l2_key.n_proto = cpu_to_be16(n_proto_key); headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask); headers->l3_key.ip_proto = match.key->ip_proto; @@ -1638,6 +1856,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + fltr->extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -1687,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, return -EINVAL; } } + + /* Ingress filter on representor results in an egress filter in HW + * and vice versa + */ + ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress; + fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS : + ICE_ESWITCH_FLTR_EGRESS; + return 0; } @@ -1900,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) struct ice_pf *pf = vsi->back; int err; + if (ice_is_fltr_pf_tx_lldp(fltr)) + ice_handle_del_pf_lldp_drop_rule(pf); + + if (ice_is_fltr_vf_tx_lldp(fltr)) + return ice_drop_vf_tx_lldp(vsi, false); + rule_rem.rid = fltr->rid; rule_rem.rule_id = fltr->rule_id; rule_rem.vsi_handle = fltr->dest_vsi_handle; @@ -1936,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) * @vsi: Pointer to VSI * @f: Pointer to flower offload structure * @__fltr: Pointer to struct ice_tc_flower_fltr + * @ingress: if the rule is added to an ingress block * * This function parses TC-flower input fields, parses action, * and adds a filter. + * + * Return: 0 if the filter was successfully added, + * negative error code otherwise. */ static int ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, struct flow_cls_offload *f, - struct ice_tc_flower_fltr **__fltr) + struct ice_tc_flower_fltr **__fltr, bool ingress) { struct ice_tc_flower_fltr *fltr; int err; @@ -1960,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi, fltr->src_vsi = vsi; INIT_HLIST_NODE(&fltr->tc_flower_node); - err = ice_parse_cls_flower(netdev, vsi, f, fltr); + err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress); if (err < 0) goto err; @@ -2003,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie) * @netdev: Pointer to filter device * @vsi: Pointer to VSI * @cls_flower: Pointer to flower offload structure + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added, + * negative error code otherwise. */ -int -ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower) +int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, bool ingress) { struct netlink_ext_ack *extack = cls_flower->common.extack; struct net_device *vsi_netdev = vsi->netdev; @@ -2041,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, } /* prep and add TC-flower filter in HW */ - err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr); + err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress); if (err) return err; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h index 65d387163a46..8a3ab2f22af9 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h @@ -4,6 +4,9 @@ #ifndef _ICE_TC_LIB_H_ #define _ICE_TC_LIB_H_ +#include <linux/bits.h> +#include <net/pfcp.h> + #define ICE_TC_FLWR_FIELD_DST_MAC BIT(0) #define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1) #define ICE_TC_FLWR_FIELD_VLAN BIT(2) @@ -22,7 +25,7 @@ #define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15) #define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16) #define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17) -#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18) +#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18) #define ICE_TC_FLWR_FIELD_CVLAN BIT(19) #define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20) #define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21) @@ -34,6 +37,7 @@ #define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27) #define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28) #define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29) +#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30) #define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF @@ -161,6 +165,8 @@ struct ice_tc_flower_fltr { __be32 tenant_id; struct gtp_pdu_session_info gtp_pdu_info_keys; struct gtp_pdu_session_info gtp_pdu_info_masks; + struct pfcp_metadata pfcp_meta_keys; + struct pfcp_metadata pfcp_meta_masks; u32 flags; u8 tunnel_type; struct ice_tc_flower_action action; @@ -205,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf) } struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue); -int -ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, - struct flow_cls_offload *cls_flower); -int -ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower); +int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower, bool ingress); +int ice_del_cls_flower(struct ice_vsi *vsi, + struct flow_cls_offload *cls_flower); void ice_replay_tc_fltrs(struct ice_pf *pf); bool ice_is_tunnel_supported(struct net_device *dev); +int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init); +int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit); static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act) { diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h index b2f5c9fe0149..07aab6e130cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_trace.h +++ b/drivers/net/ethernet/intel/ice/ice_trace.h @@ -69,7 +69,7 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template, TP_fast_assign(__entry->q_vector = q_vector; __entry->dim = dim; - __assign_str(devname, q_vector->rx.rx_ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", __get_str(devname), @@ -96,7 +96,7 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template, TP_fast_assign(__entry->q_vector = q_vector; __entry->dim = dim; - __assign_str(devname, q_vector->tx.tx_ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d", __get_str(devname), @@ -128,7 +128,7 @@ DECLARE_EVENT_CLASS(ice_tx_template, TP_fast_assign(__entry->ring = ring; __entry->desc = desc; __entry->buf = buf; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname), __entry->ring, __entry->desc, __entry->buf) @@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(ice_rx_template, TP_fast_assign(__entry->ring = ring; __entry->desc = desc; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname), __entry->ring, __entry->desc) @@ -180,7 +180,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template, TP_fast_assign(__entry->ring = ring; __entry->desc = desc; __entry->skb = skb; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname), __entry->ring, __entry->desc, __entry->skb) @@ -203,7 +203,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template, TP_fast_assign(__entry->ring = ring; __entry->skb = skb; - __assign_str(devname, ring->netdev->name);), + __assign_str(devname);), TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname), __entry->skb, __entry->ring) @@ -330,6 +330,24 @@ DEFINE_EVENT(ice_esw_br_port_template, TP_ARGS(port) ); +DECLARE_EVENT_CLASS(ice_switch_stats_template, + TP_PROTO(struct ice_switch_info *sw_info), + TP_ARGS(sw_info), + TP_STRUCT__entry(__field(u16, rule_cnt) + __field(u8, recp_cnt)), + TP_fast_assign(__entry->rule_cnt = sw_info->rule_cnt; + __entry->recp_cnt = sw_info->recp_cnt;), + TP_printk("rules=%u recipes=%u", + __entry->rule_cnt, + __entry->recp_cnt) +); + +DEFINE_EVENT(ice_switch_stats_template, + ice_aq_sw_rules, + TP_PROTO(struct ice_switch_info *sw_info), + TP_ARGS(sw_info) +); + /* End tracepoints */ #endif /* _ICE_TRACE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 97d41d6ebf1f..0e5107fe62ad 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring) if (rx_ring->vsi->type == ICE_VSI_PF) if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) xdp_rxq_info_unreg(&rx_ring->xdp_rxq); - rx_ring->xdp_prog = NULL; + WRITE_ONCE(rx_ring->xdp_prog, NULL); if (rx_ring->xsk_pool) { kfree(rx_ring->xdp_buf); rx_ring->xdp_buf = NULL; @@ -522,44 +522,19 @@ err: } /** - * ice_rx_frame_truesize - * @rx_ring: ptr to Rx ring - * @size: size - * - * calculate the truesize with taking into the account PAGE_SIZE of - * underlying arch - */ -static unsigned int -ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) -{ - unsigned int truesize; - -#if (PAGE_SIZE < 8192) - truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ -#else - truesize = rx_ring->rx_offset ? - SKB_DATA_ALIGN(rx_ring->rx_offset + size) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) : - SKB_DATA_ALIGN(size); -#endif - return truesize; -} - -/** * ice_run_xdp - Executes an XDP program on initialized xdp_buff * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action - * @rx_buf: Rx buffer to store the XDP action * @eop_desc: Last descriptor in packet to read metadata from * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ -static void +static u32 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, - struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc) + union ice_32b_rx_flex_desc *eop_desc) { unsigned int ret = ICE_XDP_PASS; u32 act; @@ -598,7 +573,7 @@ out_failure: ret = ICE_XDP_CONSUMED; } exit: - ice_set_rx_bufs_act(xdp, rx_ring, ret); + return ret; } /** @@ -837,16 +812,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf) if (!dev_page_is_reusable(page)) return false; -#if (PAGE_SIZE < 8192) /* if we are only owner of page we can reuse it */ if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1)) return false; -#else +#if (PAGE_SIZE >= 8192) #define ICE_LAST_OFFSET \ - (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048) + (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072) if (rx_buf->page_offset > ICE_LAST_OFFSET) return false; -#endif /* PAGE_SIZE < 8192) */ +#endif /* PAGE_SIZE >= 8192) */ /* If we have drained the page fragment pool we need to update * the pagecnt_bias and page count so that we fully restock the @@ -885,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, xdp_buff_set_frags_flag(xdp); } - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); + if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) return -ENOMEM; - } __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page, rx_buf->page_offset, size); @@ -949,12 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, struct ice_rx_buf *rx_buf; rx_buf = &rx_ring->rx_buf[ntc]; - rx_buf->pgcnt = -#if (PAGE_SIZE < 8192) - page_count(rx_buf->page); -#else - 0; -#endif prefetchw(rx_buf->page); if (!size) @@ -971,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, } /** + * ice_get_pgcnts - grab page_count() for gathered fragments + * @rx_ring: Rx descriptor ring to store the page counts on + * + * This function is intended to be called right before running XDP + * program so that the page recycling mechanism will be able to take + * a correct decision regarding underlying pages; this is done in such + * way as XDP program can change the refcount of page + */ +static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) +{ + u32 nr_frags = rx_ring->nr_frags + 1; + u32 idx = rx_ring->first_desc; + struct ice_rx_buf *rx_buf; + u32 cnt = rx_ring->count; + + for (int i = 0; i < nr_frags; i++) { + rx_buf = &rx_ring->rx_buf[idx]; + rx_buf->pgcnt = page_count(rx_buf->page); + + if (++idx == cnt) + idx = 0; + } +} + +/** * ice_build_skb - Build skb around an existing buffer * @rx_ring: Rx descriptor ring to transact packets on * @xdp: xdp_buff pointing to the data @@ -1051,8 +1042,7 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; @@ -1082,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) rx_buf->page_offset + headlen, size, xdp->frame_sz); } else { - /* buffer is unused, change the act that should be taken later - * on; data was copied onto skb's linear part so there's no + /* buffer is unused, restore biased page count in Rx buffer; + * data was copied onto skb's linear part so there's no * need for adjusting page offset and we can reuse this buffer * as-is */ - rx_buf->act = ICE_SKB_CONSUMED; + rx_buf->pagecnt_bias++; } if (unlikely(xdp_buff_has_frags(xdp))) { @@ -1135,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) } /** + * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags + * @rx_ring: Rx ring with all the auxiliary data + * @xdp: XDP buffer carrying linear + frags part + * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage + * @ntc: a current next_to_clean value to be stored at rx_ring + * @verdict: return code from XDP program execution + * + * Walk through gathered fragments and satisfy internal page + * recycle mechanism; we take here an action related to verdict + * returned by XDP program; + */ +static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, + u32 *xdp_xmit, u32 ntc, u32 verdict) +{ + u32 nr_frags = rx_ring->nr_frags + 1; + u32 idx = rx_ring->first_desc; + u32 cnt = rx_ring->count; + u32 post_xdp_frags = 1; + struct ice_rx_buf *buf; + int i; + + if (unlikely(xdp_buff_has_frags(xdp))) + post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags; + + for (i = 0; i < post_xdp_frags; i++) { + buf = &rx_ring->rx_buf[idx]; + + if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + *xdp_xmit |= verdict; + } else if (verdict & ICE_XDP_CONSUMED) { + buf->pagecnt_bias++; + } else if (verdict == ICE_XDP_PASS) { + ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); + } + + ice_put_rx_buf(rx_ring, buf); + + if (++idx == cnt) + idx = 0; + } + /* handle buffers that represented frags released by XDP prog; + * for these we keep pagecnt_bias as-is; refcount from struct page + * has been decremented within XDP prog and we do not have to increase + * the biased refcnt + */ + for (; i < nr_frags; i++) { + buf = &rx_ring->rx_buf[idx]; + ice_put_rx_buf(rx_ring, buf); + if (++idx == cnt) + idx = 0; + } + + xdp->data = NULL; + rx_ring->first_desc = ntc; + rx_ring->nr_frags = 0; +} + +/** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process @@ -1151,20 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) unsigned int total_rx_bytes = 0, total_rx_pkts = 0; unsigned int offset = rx_ring->rx_offset; struct xdp_buff *xdp = &rx_ring->xdp; - u32 cached_ntc = rx_ring->first_desc; struct ice_tx_ring *xdp_ring = NULL; struct bpf_prog *xdp_prog = NULL; u32 ntc = rx_ring->next_to_clean; + u32 cached_ntu, xdp_verdict; u32 cnt = rx_ring->count; u32 xdp_xmit = 0; - u32 cached_ntu; bool failure; - u32 first; - - /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */ -#if (PAGE_SIZE < 8192) - xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); -#endif xdp_prog = READ_ONCE(rx_ring->xdp_prog); if (xdp_prog) { @@ -1224,12 +1266,9 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) hard_start = page_address(rx_buf->page) + rx_buf->page_offset - offset; xdp_prepare_buff(xdp, hard_start, offset, size, !!offset); -#if (PAGE_SIZE > 4096) - /* At larger PAGE_SIZE, frame_sz depend on len size */ - xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); -#endif xdp_buff_clear_frags_flag(xdp); } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { + ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); break; } if (++ntc == cnt) @@ -1239,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (ice_is_non_eop(rx_ring, rx_desc)) continue; - ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc); - if (rx_buf->act == ICE_XDP_PASS) + ice_get_pgcnts(rx_ring); + xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); + if (xdp_verdict == ICE_XDP_PASS) goto construct_skb; total_rx_bytes += xdp_get_buff_len(xdp); total_rx_pkts++; - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; + ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + continue; construct_skb: if (likely(ice_ring_uses_build_skb(rx_ring))) @@ -1257,18 +1296,12 @@ construct_skb: /* exit if we failed to retrieve a buffer */ if (!skb) { rx_ring->ring_stats->rx_stats.alloc_page_failed++; - rx_buf->act = ICE_XDP_CONSUMED; - if (unlikely(xdp_buff_has_frags(xdp))) - ice_set_rx_bufs_act(xdp, rx_ring, - ICE_XDP_CONSUMED); - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; - break; + xdp_verdict = ICE_XDP_CONSUMED; } - xdp->data = NULL; - rx_ring->first_desc = ntc; - rx_ring->nr_frags = 0; + ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); + + if (!skb) + break; stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S); if (unlikely(ice_test_staterr(rx_desc->wb.status_error0, @@ -1297,23 +1330,6 @@ construct_skb: total_rx_pkts++; } - first = rx_ring->first_desc; - while (cached_ntc != first) { - struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; - - if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - xdp_xmit |= buf->act; - } else if (buf->act & ICE_XDP_CONSUMED) { - buf->pagecnt_bias++; - } else if (buf->act == ICE_XDP_PASS) { - ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz); - } - - ice_put_rx_buf(rx_ring, buf); - if (++cached_ntc >= cnt) - cached_ntc = 0; - } rx_ring->next_to_clean = ntc; /* return up to cleaned_count buffers to hardware */ failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); @@ -1392,14 +1408,14 @@ static void ice_net_dim(struct ice_q_vector *q_vector) struct dim_sample dim_sample; __ice_update_sample(q_vector, tx, &dim_sample, true); - net_dim(&tx->dim, dim_sample); + net_dim(&tx->dim, &dim_sample); } if (ITR_IS_DYNAMIC(rx)) { struct dim_sample dim_sample; __ice_update_sample(q_vector, rx, &dim_sample, false); - net_dim(&rx->dim, dim_sample); + net_dim(&rx->dim, &dim_sample); } } @@ -1522,10 +1538,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ ice_for_each_tx_ring(tx_ring, q_vector->tx) { + struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); bool wd; - if (tx_ring->xsk_pool) - wd = ice_xmit_zc(tx_ring); + if (xsk_pool) + wd = ice_xmit_zc(tx_ring, xsk_pool); else if (ice_ring_is_xdp(tx_ring)) wd = true; else @@ -1551,6 +1568,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) budget_per_ring = budget; ice_for_each_rx_ring(rx_ring, q_vector->rx) { + struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); int cleaned; /* A dedicated path for zero-copy allows making a single @@ -1558,7 +1576,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget) * ice_clean_rx_irq function and makes the codebase cleaner. */ cleaned = rx_ring->xsk_pool ? - ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : + ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) : ice_clean_rx_irq(rx_ring, budget_per_ring); work_done += cleaned; /* if we clean as many as budgeted, we must not be done */ @@ -1791,6 +1809,7 @@ dma_error: static int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) { + const struct ice_tx_ring *tx_ring = off->tx_ring; u32 l4_len = 0, l3_len = 0, l2_len = 0; struct sk_buff *skb = first->skb; union { @@ -1940,6 +1959,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) l3_len = l4.hdr - ip.hdr; offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S; + if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) && + !(first->tx_flags & ICE_TX_FLAGS_TSO) && + !skb_csum_is_sctp(skb)) { + /* Set GCS */ + u16 csum_start = (skb->csum_start - skb->mac_header) / 2; + u16 csum_offset = skb->csum_offset / 2; + u16 gcs_params; + + gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) | + FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) | + FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M, + ICE_TX_GCS_DESC_CSUM_PSH); + + /* Unlike legacy HW checksums, GCS requires a context + * descriptor. + */ + off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX; + off->cd_gcs_params = gcs_params; + /* Fill out CSO info in data descriptors */ + off->td_offset |= offset; + off->td_cmd |= cmd; + return 1; + } + /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: @@ -2397,17 +2440,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ eth = (struct ethhdr *)skb_mac_header(skb); - if (unlikely((skb->priority == TC_PRIO_CONTROL || - eth->h_proto == htons(ETH_P_LLDP)) && - vsi->type == ICE_VSI_PF && - vsi->port_info->qos_cfg.is_sw_lldp)) + + if ((ice_is_switchdev_running(vsi->back) || + ice_lag_is_switchdev_running(vsi->back)) && + vsi->type != ICE_VSI_SF) + ice_eswitch_set_target_vsi(skb, &offload); + else if (unlikely((skb->priority == TC_PRIO_CONTROL || + eth->h_proto == htons(ETH_P_LLDP)) && + vsi->type == ICE_VSI_PF && + vsi->port_info->qos_cfg.is_sw_lldp)) offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S); ice_tstamp(tx_ring, skb, first, &offload); - if (ice_is_switchdev_running(vsi->back)) - ice_eswitch_set_target_vsi(skb, &offload); if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) { struct ice_tx_ctx_desc *cdesc; @@ -2421,7 +2467,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring) /* setup context descriptor */ cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params); cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2); - cdesc->rsvd = cpu_to_le16(0); + cdesc->gcs = cpu_to_le16(offload.cd_gcs_params); cdesc->qw1 = cpu_to_le64(offload.cd_qw1); } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index af955b0e5dc5..a4b1e9514632 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -193,6 +193,7 @@ struct ice_tx_offload_params { u32 td_l2tag1; u32 cd_tunnel_params; u16 cd_l2tag2; + u16 cd_gcs_params; u8 header_len; }; @@ -201,7 +202,6 @@ struct ice_rx_buf { struct page *page; unsigned int page_offset; unsigned int pgcnt; - unsigned int act; unsigned int pagecnt_bias; }; @@ -359,12 +359,15 @@ struct ice_rx_ring { struct ice_rx_ring *next; /* pointer to next ring in q_vector */ struct xsk_buff_pool *xsk_pool; u32 nr_frags; - dma_addr_t dma; /* physical address of ring */ + u16 max_frame; u16 rx_buf_len; + dma_addr_t dma; /* physical address of ring */ u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1) #define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2) +#define ICE_RX_FLAGS_MULTIDEV BIT(3) +#define ICE_RX_FLAGS_RING_GCS BIT(4) u8 flags; /* CL5 - 5th cacheline starts here */ struct xdp_rxq_info xdp_rxq; @@ -405,6 +408,7 @@ struct ice_tx_ring { #define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2) u8 flags; u8 dcb_tc; /* Traffic class of ring */ + u16 quanta_prof_id; } ____cacheline_internodealigned_in_smp; static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index f8f1d2bdc1be..45cfaabc41cb 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <linux/filter.h> +#include <linux/net/intel/libie/rx.h> #include "ice_txrx_lib.h" #include "ice_eswitch.h" @@ -39,30 +40,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) } /** - * ice_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by - * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of - * Rx desc. - */ -static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) -{ - struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) - return PKT_HASH_TYPE_L2; - - return PKT_HASH_TYPE_NONE; -} - -/** * ice_get_rx_hash - get RX hash value from descriptor * @rx_desc: specific descriptor * @@ -91,14 +68,33 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, const union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; - if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded)) return; hash = ice_get_rx_hash(rx_desc); if (likely(hash)) - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); +} + +/** + * ice_rx_gcs - Set generic checksum in skb + * @skb: skb currently being received and modified + * @rx_desc: receive descriptor + */ +static void ice_rx_gcs(struct sk_buff *skb, + const union ice_32b_rx_flex_desc *rx_desc) +{ + const struct ice_32b_rx_flex_desc_nic *desc; + u16 csum; + + desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc; + skb->ip_summed = CHECKSUM_COMPLETE; + csum = (__force u16)desc->raw_csum; + skb->csum = csum_unfold((__force __sum16)swab16(csum)); } /** @@ -114,34 +110,35 @@ static void ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u16 ptype) { - struct ice_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u16 rx_status0, rx_status1; bool ipv4, ipv6; - rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); - rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); - - decoded = ice_decode_rx_desc_ptype(ptype); - /* Start with CHECKSUM_NONE and by default csum_level = 0 */ skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); - /* check if Rx checksum is enabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(ring->netdev, decoded)) return; - /* check if HW has decoded the packet and checksum */ - if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) + rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); + + if ((ring->flags & ICE_RX_FLAGS_RING_GCS) && + rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC && + (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP || + decoded.inner_prot == LIBETH_RX_PT_INNER_UDP || + decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) { + ice_rx_gcs(skb, rx_desc); return; + } - if (!(decoded.known && decoded.outer_ip)) + /* check if HW has decoded the packet and checksum */ + if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; - ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) { ring->vsi->back->hw_rx_eipe_error++; @@ -169,19 +166,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case ICE_RX_PTYPE_INNER_PROT_TCP: - case ICE_RX_PTYPE_INNER_PROT_UDP: - case ICE_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - break; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -236,7 +224,16 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring, ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype); /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_ring->netdev); + if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) { + struct net_device *netdev = ice_eswitch_get_target(rx_ring, + rx_desc); + + if (ice_is_port_repr_netdev(netdev)) + ice_repr_inc_rx_stats(netdev, skb->len); + skb->protocol = eth_type_trans(skb, netdev); + } else { + skb->protocol = eth_type_trans(skb, rx_ring->netdev); + } ice_rx_csum(rx_ring, skb, rx_desc, ptype); @@ -527,42 +524,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) return 0; } -/* Define a ptype index -> XDP hash type lookup table. - * It uses the same ptype definitions as ice_decode_rx_desc_ptype[], - * avoiding possible copy-paste errors. - */ -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0 - -/* A few supplementary definitions for when XDP hash types do not coincide - * with what can be generated from ptype definitions - * by means of preprocessor concatenation. - */ -#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2 -#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4 - -static const enum xdp_rss_hash_type -ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { - ICE_PTYPES -}; - -#undef XDP_RSS_L3_NONE -#undef XDP_RSS_L4_NONE -#undef XDP_RSS_TYPE_PAY2 -#undef XDP_RSS_TYPE_PAY3 -#undef XDP_RSS_TYPE_PAY4 - -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - /** * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor * @eop_desc: End of Packet descriptor @@ -570,12 +531,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { static enum xdp_rss_hash_type ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) { - u16 ptype = ice_get_ptype(eop_desc); - - if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES)) - return 0; - - return ice_ptype_to_xdp_hash[ptype]; + return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h index afcead4baef4..6cf32b404127 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h @@ -6,49 +6,6 @@ #include "ice.h" /** - * ice_set_rx_bufs_act - propagate Rx buffer action to frags - * @xdp: XDP buffer representing frame (linear and frags part) - * @rx_ring: Rx ring struct - * act: action to store onto Rx buffers related to XDP buffer parts - * - * Set action that should be taken before putting Rx buffer from first frag - * to the last. - */ -static inline void -ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring, - const unsigned int act) -{ - u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags; - u32 nr_frags = rx_ring->nr_frags + 1; - u32 idx = rx_ring->first_desc; - u32 cnt = rx_ring->count; - struct ice_rx_buf *buf; - - for (int i = 0; i < nr_frags; i++) { - buf = &rx_ring->rx_buf[idx]; - buf->act = act; - - if (++idx == cnt) - idx = 0; - } - - /* adjust pagecnt_bias on frags freed by XDP prog */ - if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) { - u32 delta = rx_ring->nr_frags - sinfo_frags; - - while (delta) { - if (idx == 0) - idx = cnt - 1; - else - idx--; - buf = &rx_ring->rx_buf[idx]; - buf->pagecnt_bias--; - delta--; - } - } -} - -/** * ice_test_staterr - tests bits in Rx descriptor status and error fields * @status_err_n: Rx descriptor status_error0 or status_error1 bits * @stat_err_bits: value to mask @@ -154,7 +111,6 @@ static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring) } void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx); -int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring); int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, bool frame); void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val); diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index 9ff92dba5823..3d68f465952d 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -18,6 +18,8 @@ #include "ice_sbq_cmd.h" #include "ice_vlan_mode.h" #include "ice_fwlog.h" +#include <linux/wait.h> +#include <net/dscp.h> static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc) { @@ -61,6 +63,7 @@ static inline u32 ice_round_to_num(u32 N, u32 R) ICE_DBG_AQ_DESC | \ ICE_DBG_AQ_DESC_BUF | \ ICE_DBG_AQ_CMD) +#define ICE_DBG_PARSER BIT_ULL(28) #define ICE_DBG_USER BIT_ULL(31) @@ -71,6 +74,14 @@ enum ice_aq_res_ids { ICE_GLOBAL_CFG_LOCK_RES_ID }; +enum ice_fec_stats_types { + ICE_FEC_CORR_LOW, + ICE_FEC_CORR_HIGH, + ICE_FEC_UNCORR_LOW, + ICE_FEC_UNCORR_HIGH, + ICE_FEC_MAX +}; + /* FW update timeout definitions are in milliseconds */ #define ICE_NVM_TIMEOUT 180000 #define ICE_CHANGE_LOCK_TIMEOUT 1000 @@ -150,7 +161,7 @@ enum ice_vsi_type { ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */ ICE_VSI_CHNL = 4, ICE_VSI_LB = 6, - ICE_VSI_SWITCHDEV_CTRL = 7, + ICE_VSI_SF = 9, }; struct ice_link_status { @@ -204,6 +215,7 @@ struct ice_phy_info { enum ice_fltr_ptype { /* NONE - used for undef/error */ ICE_FLTR_PTYPE_NONF_NONE = 0, + ICE_FLTR_PTYPE_NONF_ETH, ICE_FLTR_PTYPE_NONF_IPV4_UDP, ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_SCTP, @@ -296,6 +308,7 @@ struct ice_hw_common_caps { bool pcie_reset_avoidance; /* Post update reset restriction */ bool reset_restrict_support; + bool tx_sched_topo_comp_mode_en; }; /* IEEE 1588 TIME_SYNC specific info */ @@ -321,12 +334,14 @@ enum ice_time_ref_freq { ICE_TIME_REF_FREQ_156_250 = 4, ICE_TIME_REF_FREQ_245_760 = 5, - NUM_ICE_TIME_REF_FREQ + NUM_ICE_TIME_REF_FREQ, + + ICE_TIME_REF_FREQ_INVALID = -1, }; /* Clock source specification */ enum ice_clk_src { - ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */ + ICE_CLK_SRC_TCXO = 0, /* Temperature compensated oscillator */ ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */ NUM_ICE_CLK_SRC @@ -355,6 +370,7 @@ struct ice_ts_func_info { #define ICE_TS_TMR1_ENA_M BIT(26) #define ICE_TS_LL_TX_TS_READ_M BIT(28) #define ICE_TS_LL_TX_TS_INT_READ_M BIT(29) +#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30) struct ice_ts_dev_info { /* Device specific info */ @@ -369,6 +385,16 @@ struct ice_ts_dev_info { u8 tmr1_ena; u8 ts_ll_read; u8 ts_ll_int_read; + u8 ll_phy_tmr_update; +}; + +#define ICE_NAC_TOPO_PRIMARY_M BIT(0) +#define ICE_NAC_TOPO_DUAL_M BIT(1) +#define ICE_NAC_TOPO_ID_M GENMASK(0xF, 0) + +struct ice_nac_topology { + u32 mode; + u8 id; }; /* Function specific capabilities */ @@ -392,6 +418,7 @@ struct ice_hw_dev_caps { u32 num_flow_director_fltr; /* Number of FD filters available */ struct ice_ts_dev_info ts_dev_info; u32 num_funcs; + struct ice_nac_topology nac_topo; /* bitmap of supported sensors * bit 0 - internal temperature sensor * bit 31:1 - Reserved @@ -481,6 +508,8 @@ struct ice_bank_info { u32 orom_size; /* Size of OROM bank */ u32 netlist_ptr; /* Pointer to 1st Netlist bank */ u32 netlist_size; /* Size of Netlist bank */ + u32 active_css_hdr_len; /* Active CSS header length */ + u32 inactive_css_hdr_len; /* Inactive CSS header length */ enum ice_flash_bank nvm_bank; /* Active NVM bank */ enum ice_flash_bank orom_bank; /* Active OROM bank */ enum ice_flash_bank netlist_bank; /* Active Netlist bank */ @@ -667,7 +696,6 @@ struct ice_dcb_app_priority_table { #define ICE_MAX_USER_PRIORITY 8 #define ICE_DCBX_MAX_APPS 64 -#define ICE_DSCP_NUM_VAL 64 #define ICE_LLDPDU_SIZE 1500 #define ICE_TLV_STATUS_OPER 0x1 #define ICE_TLV_STATUS_SYNC 0x2 @@ -690,9 +718,9 @@ struct ice_dcbx_cfg { u8 pfc_mode; struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS]; /* when DSCP mapping defined by user set its bit to 1 */ - DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL); + DECLARE_BITMAP(dscp_mapped, DSCP_MAX); /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */ - u8 dscp_map[ICE_DSCP_NUM_VAL]; + u8 dscp_map[DSCP_MAX]; u8 dcbx_mode; #define ICE_DCBX_MODE_CEE 0x1 #define ICE_DCBX_MODE_IEEE 0x2 @@ -715,6 +743,7 @@ struct ice_port_info { u16 sw_id; /* Initial switch ID belongs to port */ u16 pf_vf_num; u8 port_state; + u8 local_fwd_mode; #define ICE_SCHED_PORT_STATE_INIT 0x0 #define ICE_SCHED_PORT_STATE_READY 0x1 u8 lport; @@ -738,6 +767,8 @@ struct ice_switch_info { struct ice_sw_recipe *recp_list; u16 prof_res_bm_init; u16 max_used_prof_index; + u16 rule_cnt; + u8 recp_cnt; DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; @@ -817,11 +848,41 @@ struct ice_mbx_data { u16 async_watermark_val; }; -/* PHY model */ -enum ice_phy_model { - ICE_PHY_UNSUP = -1, - ICE_PHY_E810 = 1, - ICE_PHY_E82X, +#define ICE_PORTS_PER_QUAD 4 +#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD) + +#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0) + +struct ice_e810_params { + /* The wait queue lock also protects the low latency interface */ + wait_queue_head_t atqbal_wq; + unsigned int atqbal_flags; +}; + +struct ice_eth56g_params { + u8 num_phys; + bool onestep_ena; + bool sfd_ena; + u32 peer_delay; +}; + +union ice_phy_params { + struct ice_e810_params e810; + struct ice_eth56g_params eth56g; +}; + +/* Global Link Topology */ +enum ice_global_link_topo { + ICE_LINK_TOPO_UP_TO_2_LINKS, + ICE_LINK_TOPO_UP_TO_4_LINKS, + ICE_LINK_TOPO_UP_TO_8_LINKS, + ICE_LINK_TOPO_RESERVED, +}; + +struct ice_ptp_hw { + union ice_phy_params phy; + u8 num_lports; + u8 ports_per_phy; }; /* Port hardware description */ @@ -845,10 +906,12 @@ struct ice_hw { u8 revision_id; u8 pf_id; /* device profile info */ - enum ice_phy_model phy_model; + u8 logical_pf_id; u16 max_burst_size; /* driver sets this value */ + u8 recp_reuse:1; /* indicates whether FW supports recipe reuse */ + /* Tx Scheduler values */ u8 num_tx_sched_layers; u8 num_tx_sched_phys_layers; @@ -906,12 +969,8 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_MAX_QUAD 2 -#define ICE_QUADS_PER_PHY_E82X 2 -#define ICE_PORTS_PER_PHY_E82X 8 -#define ICE_PORTS_PER_QUAD 4 -#define ICE_PORTS_PER_PHY_E810 4 -#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) + struct ice_ptp_hw ptp; + s8 lane_num; /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; @@ -1084,17 +1143,13 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800 /* CSS Header words */ +#define ICE_NVM_CSS_HDR_LEN_L 0x02 +#define ICE_NVM_CSS_HDR_LEN_H 0x03 #define ICE_NVM_CSS_SREV_L 0x14 #define ICE_NVM_CSS_SREV_H 0x15 -/* Length of CSS header section in words */ -#define ICE_CSS_HEADER_LENGTH 330 - -/* Offset of Shadow RAM copy in the NVM bank area. */ -#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32) - -/* Size in bytes of Option ROM trailer */ -#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH) +/* Length of Authentication header section in words */ +#define ICE_NVM_AUTH_HEADER_LEN 0x08 /* The Link Topology Netlist section is stored as a series of words. It is * stored in the NVM as a TLV, with the first two words containing the type @@ -1163,4 +1218,9 @@ struct ice_aq_get_set_rss_lut_params { #define ICE_FW_API_REPORT_DFLT_CFG_MIN 7 #define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3 +/* AQ API version for Health Status support */ +#define ICE_FW_API_HEALTH_REPORT_MAJ 1 +#define ICE_FW_API_HEALTH_REPORT_MIN 7 +#define ICE_FW_API_HEALTH_REPORT_PATCH 6 + #endif /* _ICE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index d10a4be965b5..48cd533e93b7 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf) vsi->num_vlan = 0; vf->num_mac = 0; + vf->num_mac_lldp = 0; memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events)); memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events)); } @@ -256,23 +257,21 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf) * * It brings the VSI down and then reconfigures it with the hardware. */ -int ice_vf_reconfig_vsi(struct ice_vf *vf) +static int ice_vf_reconfig_vsi(struct ice_vf *vf) { struct ice_vsi *vsi = ice_get_vf_vsi(vf); - struct ice_vsi_cfg_params params = {}; struct ice_pf *pf = vf->pf; int err; if (WARN_ON(!vsi)) return -EINVAL; - params = ice_vsi_to_params(vsi); - params.flags = ICE_VSI_FLAG_NO_INIT; + vsi->flags = ICE_VSI_FLAG_NO_INIT; ice_vsi_decfg(vsi); ice_fltr_remove_all(vsi); - err = ice_vsi_cfg(vsi, ¶ms); + err = ice_vsi_cfg(vsi); if (err) { dev_err(ice_pf_to_dev(pf), "Failed to reconfigure the VF%u's VSI, error %d\n", @@ -337,6 +336,13 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); } else { + /* clear possible previous port vlan config */ + err = ice_vsi_clear_port_vlan(vsi); + if (err) { + dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n", + vf->vf_id, err); + return err; + } err = ice_vsi_add_vlan_zero(vsi); } @@ -712,6 +718,23 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m) } /** + * ice_reset_vf_mbx_cnt - reset VF mailbox message count + * @vf: pointer to the VF structure + * + * This function clears the VF mailbox message count, and should be called on + * VF reset. + */ +static void ice_reset_vf_mbx_cnt(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); + else + ice_mbx_clear_malvf(&vf->mbx_info); +} + +/** * ice_reset_all_vfs - reset all allocated VFs in one go * @pf: pointer to the PF structure * @@ -737,7 +760,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) /* clear all malicious info if the VFs are getting reset */ ice_for_each_vf(pf, bkt, vf) - ice_mbx_clear_malvf(&vf->mbx_info); + ice_reset_vf_mbx_cnt(vf); /* If VFs have been disabled, there is no need to reset */ if (test_and_set_bit(ICE_VF_DIS, pf->state)) { @@ -768,7 +791,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_for_each_vf(pf, bkt, vf) { mutex_lock(&vf->cfg_lock); - ice_eswitch_detach(pf, vf); + ice_eswitch_detach_vf(pf, vf); vf->driver_caps = 0; ice_vc_set_default_allowlist(vf); @@ -784,7 +807,7 @@ void ice_reset_all_vfs(struct ice_pf *pf) ice_vf_rebuild_vsi(vf); ice_vf_post_vsi_rebuild(vf); - ice_eswitch_attach(pf, vf); + ice_eswitch_attach_vf(pf, vf); mutex_unlock(&vf->cfg_lock); } @@ -950,10 +973,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) goto out_unlock; } - ice_eswitch_update_repr(vf->repr_id, vsi); + ice_eswitch_update_repr(&vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ - ice_mbx_clear_malvf(&vf->mbx_info); + ice_reset_vf_mbx_cnt(vf); out_unlock: if (lag && lag->bonded && lag->primary && @@ -992,10 +1015,13 @@ void ice_initialize_vf_entry(struct ice_vf *vf) /* assign default capabilities */ vf->spoofchk = true; - vf->num_vf_qs = vfs->num_qps_per; ice_vc_set_default_allowlist(vf); ice_virtchnl_set_dflt_ops(vf); + /* set default number of MSI-X */ + vf->num_msix = vfs->num_msix_per; + vf->num_vf_qs = vfs->num_qps_per; + /* ctrl_vsi_idx will be set to a valid value only when iAVF * creates its first fdir rule. */ @@ -1003,11 +1029,22 @@ void ice_initialize_vf_entry(struct ice_vf *vf) ice_vf_fdir_init(vf); /* Initialize mailbox info for this VF */ - ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); + if (ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + ice_mbx_vf_clear_cnt_e830(&pf->hw, vf->vf_id); + else + ice_mbx_init_vf_info(&pf->hw, &vf->mbx_info); mutex_init(&vf->cfg_lock); } +void ice_deinitialize_vf_entry(struct ice_vf *vf) +{ + struct ice_pf *pf = vf->pf; + + if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT)) + list_del(&vf->mbx_info.list_entry); +} + /** * ice_dis_vf_qs - Disable the VF queues * @vf: pointer to the VF structure @@ -1240,7 +1277,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf) struct ice_vsi *vsi; params.type = ICE_VSI_CTRL; - params.pi = ice_vf_get_port_info(vf); + params.port_info = ice_vf_get_port_info(vf); params.vf = vf; params.flags = ICE_VSI_FLAG_INIT; @@ -1365,3 +1402,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi) rcu_read_unlock(); return ctrl_vsi; } + +/** + * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses + * @vf: a VF to add the address to + * @vsi: the corresponding VSI + * @incr: is the rule added or removed + */ +void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, + bool incr) +{ + bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags); + bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; + bool is_ena; + + if (WARN_ON(!vsi)) { + vf->num_mac_lldp = 0; + return; + } + + vf->num_mac_lldp += incr ? 1 : -1; + is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw; + + if (was_ena != is_ena) + ice_vsi_cfg_sw_lldp(vsi, false, is_ena); +} diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index fec16919ec19..482f4285fd35 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -12,6 +12,7 @@ #include <net/devlink.h> #include <linux/avf/virtchnl.h> #include "ice_type.h" +#include "ice_flow.h" #include "ice_virtchnl_fdir.h" #include "ice_vsi_vlan_ops.h" @@ -52,6 +53,19 @@ struct ice_mdd_vf_events { u16 last_printed; }; +/* Structure to store fdir fv entry */ +struct ice_fdir_prof_info { + struct ice_parser_profile prof; + u64 fdir_active_cnt; +}; + +struct ice_vf_qs_bw { + u32 committed; + u32 peak; + u16 queue_id; + u8 tc; +}; + /* VF operations */ struct ice_vf_ops { enum ice_disq_rst_src reset_type; @@ -91,6 +105,7 @@ struct ice_vf { u16 lan_vsi_idx; /* index into PF struct */ u16 ctrl_vsi_idx; struct ice_vf_fdir fdir; + struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS]; /* first vector index of this VF in the PF space */ int first_vector_idx; struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */ @@ -109,6 +124,10 @@ struct ice_vf { u8 spoofchk:1; u8 link_forced:1; u8 link_up:1; /* only valid if VF link is forced */ + u8 lldp_tx_ena:1; + + u32 ptp_caps; + unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ @@ -116,6 +135,7 @@ struct ice_vf { unsigned long vf_caps; /* VF's adv. capabilities */ u8 num_req_qs; /* num of queue pairs requested by VF */ u16 num_mac; + u16 num_mac_lldp; u16 num_vf_qs; /* num of queue configured per VF */ u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */ #define ICE_INNER_VLAN_STRIP_ENA BIT(0) @@ -131,7 +151,11 @@ struct ice_vf { /* devlink port data */ struct devlink_port devlink_port; + u16 lldp_recipe_id; + u16 lldp_rule_id; + u16 num_msix; /* num of MSI-X configured on this VF */ + struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF]; }; /* Flags for controlling behavior of ice_reset_vf */ @@ -161,6 +185,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf) return vf->port_vlan_info.tpid; } +static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf) +{ + return vf->num_mac_lldp && vf->trusted; +} + /* VF Hash Table access functions * * These functions provide abstraction for interacting with the VF hash table. @@ -226,6 +255,8 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m); int ice_reset_vf(struct ice_vf *vf, u32 flags); void ice_reset_all_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi); +void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi, + bool incr); #else /* CONFIG_PCI_IOV */ static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id) { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h index 91ba7fe0eaee..5392b0404986 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h @@ -23,8 +23,8 @@ #warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files" #endif -int ice_vf_reconfig_vsi(struct ice_vf *vf); void ice_initialize_vf_entry(struct ice_vf *vf); +void ice_deinitialize_vf_entry(struct ice_vf *vf); void ice_dis_vf_qs(struct ice_vf *vf); int ice_check_vf_init(struct ice_vf *vf); enum virtchnl_status_code ice_err_to_virt_err(int err); diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c index 40cb4ba0789c..75c8113e58ee 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c @@ -211,6 +211,38 @@ ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info, } /** + * ice_mbx_vf_dec_trig_e830 - Decrements the VF mailbox queue counter + * @hw: pointer to the HW struct + * @event: pointer to the control queue receive event + * + * This function triggers to decrement the counter + * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes + * the buffers at the PF mailbox queue. + */ +void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event) +{ + u16 vfid = le16_to_cpu(event->desc.retval); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1); +} + +/** + * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count + * @hw: pointer to the HW struct + * @vf_id: VF ID in the PF space + * + * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should + * be called when a VF is created and on VF reset. + */ +void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id) +{ + u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id)); + + wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg); +} + +/** * ice_mbx_vf_state_handler - Handle states of the overflow algorithm * @hw: pointer to the HW struct * @mbx_data: pointer to structure containing mailbox data diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h index 44bc030d17e0..684de89e5c5e 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.h @@ -19,6 +19,9 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct ice_sq_cd *cd); u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed); +void ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event); +void ice_mbx_vf_clear_cnt_e830(const struct ice_hw *hw, u16 vf_id); int ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data, struct ice_mbx_vf_info *vf_info, bool *report_malvf); @@ -47,5 +50,11 @@ static inline void ice_mbx_init_snapshot(struct ice_hw *hw) { } +static inline void +ice_mbx_vf_dec_trig_e830(const struct ice_hw *hw, + const struct ice_rq_event_info *event) +{ +} + #endif /* CONFIG_PCI_IOV */ #endif /* _ICE_VF_MBX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1ff9818b4c84..eeeb9968e477 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -461,6 +461,10 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_FDIR_PF; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_TC_U32 && + vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_TC_U32; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; @@ -491,6 +495,12 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_USO) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_USO; + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS) + vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS; + + if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) + vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP; + vfres->num_vsis = 1; /* Tx and Rx queue are equal for VF */ vfres->num_queue_pairs = vsi->num_txq; @@ -555,7 +565,7 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) * * check for the valid queue ID */ -static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid) +static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid) { /* allocated Tx and Rx queues should be always equal for VF VSI */ return qid < vsi->alloc_txq; @@ -1031,6 +1041,191 @@ error_param: } /** + * ice_vc_get_qos_caps - Get current QoS caps from PF + * @vf: pointer to the VF info + * + * Get VF's QoS capabilities, such as TC number, arbiter and + * bandwidth from PF. + * + * Return: 0 on success or negative error value. + */ +static int ice_vc_get_qos_caps(struct ice_vf *vf) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_qos_cap_list *cap_list = NULL; + u8 tc_prio[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct virtchnl_qos_cap_elem *cfg = NULL; + struct ice_vsi_ctx *vsi_ctx; + struct ice_pf *pf = vf->pf; + struct ice_port_info *pi; + struct ice_vsi *vsi; + u8 numtc, tc; + u16 len = 0; + int ret, i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + pi = pf->hw.port_info; + numtc = vsi->tc_cfg.numtc; + + vsi_ctx = ice_get_vsi_ctx(pi->hw, vf->lan_vsi_idx); + if (!vsi_ctx) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + len = struct_size(cap_list, cap, numtc); + cap_list = kzalloc(len, GFP_KERNEL); + if (!cap_list) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + len = 0; + goto err; + } + + cap_list->vsi_id = vsi->vsi_num; + cap_list->num_elem = numtc; + + /* Store the UP2TC configuration from DCB to a user priority bitmap + * of each TC. Each element of prio_of_tc represents one TC. Each + * bitmap indicates the user priorities belong to this TC. + */ + for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) { + tc = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[i]; + tc_prio[tc] |= BIT(i); + } + + for (i = 0; i < numtc; i++) { + cfg = &cap_list->cap[i]; + cfg->tc_num = i; + cfg->tc_prio = tc_prio[i]; + cfg->arbiter = pi->qos_cfg.local_dcbx_cfg.etscfg.tsatable[i]; + cfg->weight = VIRTCHNL_STRICT_WEIGHT; + cfg->type = VIRTCHNL_BW_SHAPER; + cfg->shaper.committed = vsi_ctx->sched.bw_t_info[i].cir_bw.bw; + cfg->shaper.peak = vsi_ctx->sched.bw_t_info[i].eir_bw.bw; + } + +err: + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_QOS_CAPS, v_ret, + (u8 *)cap_list, len); + kfree(cap_list); + return ret; +} + +/** + * ice_vf_cfg_qs_bw - Configure per queue bandwidth + * @vf: pointer to the VF info + * @num_queues: number of queues to be configured + * + * Configure per queue bandwidth. + * + * Return: 0 on success or negative error value. + */ +static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues) +{ + struct ice_hw *hw = &vf->pf->hw; + struct ice_vsi *vsi; + int ret; + u16 i; + + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -EINVAL; + + for (i = 0; i < num_queues; i++) { + u32 p_rate, min_rate; + u8 tc; + + p_rate = vf->qs_bw[i].peak; + min_rate = vf->qs_bw[i].committed; + tc = vf->qs_bw[i].tc; + if (p_rate) + ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MAX_BW, p_rate); + else + ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MAX_BW); + if (ret) + return ret; + + if (min_rate) + ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MIN_BW, min_rate); + else + ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc, + vf->qs_bw[i].queue_id, + ICE_MIN_BW); + + if (ret) + return ret; + } + + return 0; +} + +/** + * ice_vf_cfg_q_quanta_profile - Configure quanta profile + * @vf: pointer to the VF info + * @quanta_prof_idx: pointer to the quanta profile index + * @quanta_size: quanta size to be set + * + * This function chooses available quanta profile and configures the register. + * The quanta profile is evenly divided by the number of device ports, and then + * available to the specific PF and VFs. The first profile for each PF is a + * reserved default profile. Only quanta size of the rest unused profile can be + * modified. + * + * Return: 0 on success or negative error value. + */ +static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size, + u16 *quanta_prof_idx) +{ + const u16 n_desc = calc_quanta_desc(quanta_size); + struct ice_hw *hw = &vf->pf->hw; + const u16 n_cmd = 2 * n_desc; + struct ice_pf *pf = vf->pf; + u16 per_pf, begin_id; + u8 n_used; + u32 reg; + + begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs * + hw->logical_pf_id; + + if (quanta_size == ICE_DFLT_QUANTA) { + *quanta_prof_idx = begin_id; + } else { + per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / + hw->dev_caps.num_funcs; + n_used = pf->num_quanta_prof_used; + if (n_used < per_pf) { + *quanta_prof_idx = begin_id + 1 + n_used; + pf->num_quanta_prof_used++; + } else { + return -EINVAL; + } + } + + reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) | + FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) | + FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc); + wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg); + + return 0; +} + +/** * ice_vc_cfg_promiscuous_mode_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1505,13 +1700,12 @@ error_param: * ice_cfg_interrupt * @vf: pointer to the VF info * @vsi: the VSI being configured - * @vector_id: vector ID * @map: vector map for mapping vectors to queues * @q_vector: structure for interrupt vector * configure the IRQ to queue map */ -static int -ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, +static enum virtchnl_status_code +ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, struct virtchnl_vector_map *map, struct ice_q_vector *q_vector) { @@ -1531,7 +1725,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_rx++; q_vector->rx.itr_idx = map->rxitr_idx; vsi->rx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_rxq_interrupt(vsi, vsi_q_id, + q_vector->vf_reg_idx, q_vector->rx.itr_idx); } @@ -1545,7 +1740,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, q_vector->num_ring_tx++; q_vector->tx.itr_idx = map->txitr_idx; vsi->tx_rings[vsi_q_id]->q_vector = q_vector; - ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id, + ice_cfg_txq_interrupt(vsi, vsi_q_id, + q_vector->vf_reg_idx, q_vector->tx.itr_idx); } @@ -1619,8 +1815,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) } /* lookout for the invalid queue index */ - v_ret = (enum virtchnl_status_code) - ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector); + v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector); if (v_ret) goto error_param; } @@ -1632,6 +1827,164 @@ error_param: } /** + * ice_vc_cfg_q_bw - Configure per queue bandwidth + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command descriptor + * + * Configure VF queues bandwidth. + * + * Return: 0 on success or negative error value. + */ +static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_queues_bw_cfg *qbw = + (struct virtchnl_queues_bw_cfg *)msg; + struct ice_vsi *vsi; + u16 i; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || + !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF || + qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + for (i = 0; i < qbw->num_queues; i++) { + if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 && + qbw->cfg[i].shaper.peak > vf->max_tx_rate) { + dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n", + qbw->cfg[i].queue_id, vf->vf_id, + vf->max_tx_rate); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 && + qbw->cfg[i].shaper.committed < vf->min_tx_rate) { + dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n", + qbw->cfg[i].queue_id, vf->vf_id, + vf->min_tx_rate); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (qbw->cfg[i].queue_id > vf->num_vf_qs) { + dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) { + dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n", + vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + } + + for (i = 0; i < qbw->num_queues; i++) { + vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id; + vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak; + vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed; + vf->qs_bw[i].tc = qbw->cfg[i].tc; + } + + if (ice_vf_cfg_qs_bw(vf, qbw->num_queues)) + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + +err: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW, + v_ret, NULL, 0); +} + +/** + * ice_vc_cfg_q_quanta - Configure per queue quanta + * @vf: pointer to the VF info + * @msg: pointer to the msg buffer which holds the command descriptor + * + * Configure VF queues quanta. + * + * Return: 0 on success or negative error value. + */ +static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg) +{ + u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i; + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; + struct virtchnl_quanta_cfg *qquanta = + (struct virtchnl_quanta_cfg *)msg; + struct ice_vsi *vsi; + int ret; + + start_qid = qquanta->queue_select.start_queue_id; + num_queues = qquanta->queue_select.num_queues; + + if (check_add_overflow(start_qid, num_queues, &end_qid)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + vsi = ice_get_vf_vsi(vf); + if (!vsi) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (end_qid > ICE_MAX_RSS_QS_PER_VF || + end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { + dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n", + vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + quanta_size = qquanta->quanta_size; + if (quanta_size > ICE_MAX_QUANTA_SIZE || + quanta_size < ICE_MIN_QUANTA_SIZE) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + if (quanta_size % 64) { + dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n"); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err; + } + + ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size, + &quanta_prof_id); + if (ret) { + v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED; + goto err; + } + + for (i = start_qid; i < end_qid; i++) + vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id; + +err: + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA, + v_ret, NULL, 0); +} + +/** * ice_vc_cfg_qs_msg * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1648,6 +2001,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) struct ice_vsi *vsi; u8 act_prt, pri_prt; int i = -1, q_idx; + bool ena_ts; lag = pf->lag; mutex_lock(&pf->lag_mutex); @@ -1711,8 +2065,8 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Tx queue info from VF into VSI */ if (qpi->txq.ring_len > 0) { - vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; - vsi->tx_rings[i]->count = qpi->txq.ring_len; + vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr; + vsi->tx_rings[q_idx]->count = qpi->txq.ring_len; /* Disable any existing queue first */ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) @@ -1721,7 +2075,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n", - vf->vf_id, i); + vf->vf_id, q_idx); goto error_param; } } @@ -1729,39 +2083,37 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Rx queue info from VF into VSI */ if (qpi->rxq.ring_len > 0) { u16 max_frame_size = ice_vc_get_max_frame_size(vf); + struct ice_rx_ring *ring = vsi->rx_rings[q_idx]; u32 rxdid; - vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; - vsi->rx_rings[i]->count = qpi->rxq.ring_len; + ring->dma = qpi->rxq.dma_ring_addr; + ring->count = qpi->rxq.ring_len; if (qpi->rxq.crc_disable) - vsi->rx_rings[q_idx]->flags |= - ICE_RX_FLAGS_CRC_STRIP_DIS; + ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS; else - vsi->rx_rings[q_idx]->flags &= - ~ICE_RX_FLAGS_CRC_STRIP_DIS; + ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS; if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || qpi->rxq.databuffer_size < 1024)) goto error_param; - vsi->rx_buf_len = qpi->rxq.databuffer_size; - vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; + ring->rx_buf_len = qpi->rxq.databuffer_size; if (qpi->rxq.max_pkt_size > max_frame_size || qpi->rxq.max_pkt_size < 64) goto error_param; - vsi->max_frame = qpi->rxq.max_pkt_size; + ring->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is * not expected to account for it in the MTU * calculation */ if (ice_vf_is_port_vlan_ena(vf)) - vsi->max_frame += VLAN_HLEN; + ring->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", - vf->vf_id, i); + vf->vf_id, q_idx); goto error_param; } @@ -1779,9 +2131,14 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) rxdid = ICE_RXDID_LEGACY_1; } + ena_ts = ((vf->driver_caps & + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) && + (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) && + (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP)); + ice_write_qrxflxp_cntxt(&vsi->back->hw, - vsi->rxq_map[q_idx], - rxdid, 0x03, false); + vsi->rxq_map[q_idx], rxdid, + ICE_RXDID_PRIO, ena_ts); } } @@ -1909,6 +2266,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr) } /** + * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address + * @mac: address to check + * + * Return: true if the address is one of the three possible LLDP multicast + * addresses, false otherwise. + */ +static bool ice_is_mc_lldp_eth_addr(const u8 *mac) +{ + const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00}; + + if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base))) + return false; + + return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00); +} + +/** + * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC + * @vf: a VF to add the address to + * @mac: address to check + * + * Return: true if the VF is allowed to add such MAC address, false otherwise. + */ +static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac) +{ + struct device *dev = ice_pf_to_dev(vf->pf); + + if (is_unicast_ether_addr(mac) && + !ice_can_vf_change_mac((struct ice_vf *)vf)) { + dev_err(dev, + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + return false; + } + + if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) { + dev_warn(dev, + "An untrusted VF %u is attempting to configure an LLDP multicast address\n", + vf->vf_id); + return false; + } + + return true; +} + +/** * ice_vc_add_mac_addr - attempt to add the MAC address passed in * @vf: pointer to the VF info * @vsi: pointer to the VF's VSI @@ -1926,10 +2328,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, if (ether_addr_equal(mac_addr, vf->dev_lan_addr)) return 0; - if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) { - dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); + if (!ice_vc_can_add_mac(vf, mac_addr)) return -EPERM; - } ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI); if (ret == -EEXIST) { @@ -1944,6 +2344,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, return ret; } else { vf->num_mac++; + if (ice_is_mc_lldp_eth_addr(mac_addr)) + ice_vf_update_mac_lldp_num(vf, vsi, true); } ice_vfhw_mac_add(vf, vc_ether_addr); @@ -2038,6 +2440,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, ice_vfhw_mac_del(vf, vc_ether_addr); vf->num_mac--; + if (ice_is_mc_lldp_eth_addr(mac_addr)) + ice_vf_update_mac_lldp_num(vf, vsi, false); return 0; } @@ -2229,17 +2633,27 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) /** * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN + * @vf: VF to enable VLAN promisc on * @vsi: VF's VSI used to enable VLAN promiscuous mode * @vlan: VLAN used to enable VLAN promiscuous * * This function should only be called if VLAN promiscuous mode is allowed, * which can be determined via ice_is_vlan_promisc_allowed(). */ -static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) +static int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi, + struct ice_vlan *vlan) { - u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; + u8 promisc_m = 0; int status; + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) + promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS; + if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS; + + if (!promisc_m) + return 0; + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, vlan->vid); if (status && status != -EEXIST) @@ -2258,7 +2672,7 @@ static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) */ static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) { - u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; + u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS; int status; status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, @@ -2413,7 +2827,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } } else if (vlan_promisc) { - status = ice_vf_ena_vlan_promisc(vsi, &vlan); + status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", @@ -2696,12 +3110,8 @@ err: static int ice_vc_query_rxdid(struct ice_vf *vf) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct virtchnl_supported_rxdids *rxdid = NULL; - struct ice_hw *hw = &vf->pf->hw; struct ice_pf *pf = vf->pf; - int len = 0; - int ret, i; - u32 regval; + u64 rxdid; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -2713,35 +3123,11 @@ static int ice_vc_query_rxdid(struct ice_vf *vf) goto err; } - len = sizeof(struct virtchnl_supported_rxdids); - rxdid = kzalloc(len, GFP_KERNEL); - if (!rxdid) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; - len = 0; - goto err; - } - - /* RXDIDs supported by DDP package can be read from the register - * to get the supported RXDID bitmap. But the legacy 32byte RXDID - * is not listed in DDP package, add it in the bitmap manually. - * Legacy 16byte descriptor is not supported. - */ - rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1); - - for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { - regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); - if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) - & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) - rxdid->supported_rxdids |= BIT(i); - } - - pf->supported_rxdids = rxdid->supported_rxdids; + rxdid = pf->supported_rxdids; err: - ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, - v_ret, (u8 *)rxdid, len); - kfree(rxdid); - return ret; + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, + v_ret, (u8 *)&rxdid, sizeof(rxdid)); } /** @@ -3250,7 +3636,7 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, return err; if (vlan_promisc) { - err = ice_vf_ena_vlan_promisc(vsi, &vlan); + err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan); if (err) return err; } @@ -3278,7 +3664,8 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, */ if (!ice_is_dvm_ena(&vsi->back->hw)) { if (vlan_promisc) { - err = ice_vf_ena_vlan_promisc(vsi, &vlan); + err = ice_vf_ena_vlan_promisc(vf, vsi, + &vlan); if (err) return err; } @@ -3784,6 +4171,59 @@ out: v_ret, NULL, 0); } +static int ice_vc_get_ptp_cap(struct ice_vf *vf, + const struct virtchnl_ptp_caps *msg) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM; + u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP | + VIRTCHNL_1588_PTP_CAP_READ_PHC; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + goto err; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + + if (msg->caps & caps) + vf->ptp_caps = caps; + +err: + /* send the response back to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret, + (u8 *)&vf->ptp_caps, + sizeof(struct virtchnl_ptp_caps)); +} + +static int ice_vc_get_phc_time(struct ice_vf *vf) +{ + enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM; + struct virtchnl_phc_time *phc_time = NULL; + struct ice_pf *pf = vf->pf; + u32 len = 0; + int ret; + + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) + goto err; + + v_ret = VIRTCHNL_STATUS_SUCCESS; + + phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL); + if (!phc_time) { + v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + goto err; + } + + len = sizeof(*phc_time); + + phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL); + +err: + /* send the response back to the VF */ + ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret, + (u8 *)phc_time, len); + kfree(phc_time); + return ret; +} + static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .get_ver_msg = ice_vc_get_ver_msg, .get_vf_res_msg = ice_vc_get_vf_res_msg, @@ -3817,6 +4257,14 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = { .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, + .get_qos_caps = ice_vc_get_qos_caps, + .cfg_q_bw = ice_vc_cfg_q_bw, + .cfg_q_quanta = ice_vc_cfg_q_quanta, + .get_ptp_cap = ice_vc_get_ptp_cap, + .get_phc_time = ice_vc_get_phc_time, + /* If you add a new op here please make sure to add it to + * ice_virtchnl_repr_ops as well. + */ }; /** @@ -3874,7 +4322,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg) } ice_vfhw_mac_add(vf, &al->list[i]); - vf->num_mac++; break; } @@ -3947,6 +4394,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = { .dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg, .ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg, .dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg, + .get_qos_caps = ice_vc_get_qos_caps, + .cfg_q_bw = ice_vc_cfg_q_bw, + .cfg_q_quanta = ice_vc_cfg_q_quanta, + .get_ptp_cap = ice_vc_get_ptp_cap, + .get_phc_time = ice_vc_get_phc_time, }; /** @@ -4005,8 +4457,10 @@ ice_is_malicious_vf(struct ice_vf *vf, struct ice_mbx_data *mbxdata) * @event: pointer to the AQ event * @mbxdata: information used to detect VF attempting mailbox overflow * - * called from the common asq/arq handler to - * process request from VF + * Called from the common asq/arq handler to process request from VF. When this + * flow is used for devices with hardware VF to PF message queue overflow + * support (ICE_F_MBX_LIMIT) mbxdata is set to NULL and ice_is_malicious_vf + * check is skipped. */ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, struct ice_mbx_data *mbxdata) @@ -4032,7 +4486,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event, mutex_lock(&vf->cfg_lock); /* Check if the VF is trying to overflow the mailbox */ - if (ice_is_malicious_vf(vf, mbxdata)) + if (mbxdata && ice_is_malicious_vf(vf, mbxdata)) goto finish; /* Check if VF is disabled. */ @@ -4173,6 +4627,21 @@ error_handler: case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: err = ops->dis_vlan_insertion_v2_msg(vf, msg); break; + case VIRTCHNL_OP_GET_QOS_CAPS: + err = ops->get_qos_caps(vf); + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + err = ops->cfg_q_bw(vf, msg); + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + err = ops->cfg_q_quanta(vf, msg); + break; + case VIRTCHNL_OP_1588_PTP_GET_CAPS: + err = ops->get_ptp_cap(vf, (const void *)msg); + break; + case VIRTCHNL_OP_1588_PTP_GET_TIME: + err = ops->get_phc_time(vf); + break; case VIRTCHNL_OP_UNKNOWN: default: dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index 3a4115869153..222990f229d5 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -13,12 +13,22 @@ /* Restrict number of MAC Addr and VLAN that non-trusted VF can programmed */ #define ICE_MAX_VLAN_PER_VF 8 +#define ICE_DFLT_QUANTA 1024 +#define ICE_MAX_QUANTA_SIZE 4096 +#define ICE_MIN_QUANTA_SIZE 256 + +#define calc_quanta_desc(x) \ + max_t(u16, 12, min_t(u16, 63, (((x) + 66) / 132) * 2 + 4)) + /* MAC filters: 1 is reserved for the VF's default/perm_addr/LAA MAC, 1 for * broadcast, and 16 for additional unicast/multicast filters */ #define ICE_MAX_MACADDR_PER_VF 18 #define ICE_FLEX_DESC_RXDID_MAX_NUM 64 +/* Priority to be compared against previous priority from the pipe */ +#define ICE_RXDID_PRIO 0x03 + /* VFs only get a single VSI. For ice hardware, the VF does not need to know * its VSI index. However, the virtchnl interface requires a VSI number, * mainly due to legacy hardware. @@ -61,6 +71,13 @@ struct ice_virtchnl_ops { int (*dis_vlan_stripping_v2_msg)(struct ice_vf *vf, u8 *msg); int (*ena_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); int (*dis_vlan_insertion_v2_msg)(struct ice_vf *vf, u8 *msg); + int (*get_qos_caps)(struct ice_vf *vf); + int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg); + int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg); + int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg); + int (*get_ptp_cap)(struct ice_vf *vf, + const struct virtchnl_ptp_caps *msg); + int (*get_phc_time)(struct ice_vf *vf); }; #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index d796dbd2a440..a3d1579a619a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -84,6 +84,17 @@ static const u32 fdir_pf_allowlist_opcodes[] = { VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER, }; +/* VIRTCHNL_VF_CAP_PTP */ +static const u32 ptp_allowlist_opcodes[] = { + VIRTCHNL_OP_1588_PTP_GET_CAPS, + VIRTCHNL_OP_1588_PTP_GET_TIME, +}; + +static const u32 tc_allowlist_opcodes[] = { + VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW, + VIRTCHNL_OP_CONFIG_QUANTA, +}; + struct allowlist_opcode_info { const u32 *opcodes; size_t size; @@ -104,6 +115,8 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = { ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF, adv_rss_pf_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes), ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes), + ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes), }; /** diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index 8e4ff3af86c6..1cca9b2262e8 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -26,6 +26,15 @@ enum ice_fdir_tunnel_type { ICE_FDIR_TUNNEL_TYPE_NONE = 0, ICE_FDIR_TUNNEL_TYPE_GTPU, ICE_FDIR_TUNNEL_TYPE_GTPU_EH, + ICE_FDIR_TUNNEL_TYPE_ECPRI, + ICE_FDIR_TUNNEL_TYPE_GTPU_INNER, + ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER, + ICE_FDIR_TUNNEL_TYPE_GRE, + ICE_FDIR_TUNNEL_TYPE_GTPOGRE, + ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER, + ICE_FDIR_TUNNEL_TYPE_GRE_INNER, + ICE_FDIR_TUNNEL_TYPE_L2TPV2, + ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER, }; struct virtchnl_fdir_fltr_conf { @@ -33,6 +42,11 @@ struct virtchnl_fdir_fltr_conf { enum ice_fdir_tunnel_type ttype; u64 inset_flag; u32 flow_id; + + struct ice_parser_profile *prof; + bool parser_ena; + u8 *pkt_buf; + u8 pkt_len; }; struct virtchnl_fdir_inset_map { @@ -536,6 +550,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir) fdir->fdir_fltr_cnt[flow][0] = 0; fdir->fdir_fltr_cnt[flow][1] = 0; } + + fdir->fdir_fltr_cnt_total = 0; } /** @@ -785,6 +801,113 @@ err_exit: } /** + * ice_vc_fdir_is_raw_flow - check if FDIR flow is raw (binary) + * @proto: virtchnl protocol headers + * + * Check if the FDIR rule is raw flow (protocol agnostic flow) or not. Note + * that common FDIR rule must have non-zero proto->count. Thus, we choose the + * tunnel_level and count of proto as the indicators. If both tunnel_level and + * count of proto are zero, this FDIR rule will be regarded as raw flow. + * + * Returns: true if headers describe raw flow, false otherwise. + */ +static bool +ice_vc_fdir_is_raw_flow(struct virtchnl_proto_hdrs *proto) +{ + return (proto->tunnel_level == 0 && proto->count == 0); +} + +/** + * ice_vc_fdir_parse_raw - parse a virtchnl raw FDIR rule + * @vf: pointer to the VF info + * @proto: virtchnl protocol headers + * @conf: FDIR configuration for each filter + * + * Parse the virtual channel filter's raw flow and store it in @conf + * + * Return: 0 on success or negative errno on failure. + */ +static int +ice_vc_fdir_parse_raw(struct ice_vf *vf, + struct virtchnl_proto_hdrs *proto, + struct virtchnl_fdir_fltr_conf *conf) +{ + u8 *pkt_buf, *msk_buf __free(kfree) = NULL; + struct ice_parser_result rslt; + struct ice_pf *pf = vf->pf; + u16 pkt_len, udp_port = 0; + struct ice_parser *psr; + int status = -ENOMEM; + struct ice_hw *hw; + + pkt_len = proto->raw.pkt_len; + + if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET) + return -EINVAL; + + pkt_buf = kzalloc(pkt_len, GFP_KERNEL); + msk_buf = kzalloc(pkt_len, GFP_KERNEL); + + if (!pkt_buf || !msk_buf) + goto err_mem_alloc; + + memcpy(pkt_buf, proto->raw.spec, pkt_len); + memcpy(msk_buf, proto->raw.mask, pkt_len); + + hw = &pf->hw; + + /* Get raw profile info via Parser Lib */ + psr = ice_parser_create(hw); + if (IS_ERR(psr)) { + status = PTR_ERR(psr); + goto err_mem_alloc; + } + + ice_parser_dvm_set(psr, ice_is_dvm_ena(hw)); + + if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN)) + ice_parser_vxlan_tunnel_set(psr, udp_port, true); + + status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt); + if (status) + goto err_parser_destroy; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_parser_result_dump(hw, &rslt); + + conf->prof = kzalloc(sizeof(*conf->prof), GFP_KERNEL); + if (!conf->prof) { + status = -ENOMEM; + goto err_parser_destroy; + } + + status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf, + pkt_len, ICE_BLK_FD, + conf->prof); + if (status) + goto err_parser_profile_init; + + if (hw->debug_mask & ICE_DBG_PARSER) + ice_parser_profile_dump(hw, conf->prof); + + /* Store raw flow info into @conf */ + conf->pkt_len = pkt_len; + conf->pkt_buf = pkt_buf; + conf->parser_ena = true; + + ice_parser_destroy(psr); + return 0; + +err_parser_profile_init: + kfree(conf->prof); +err_parser_destroy: + ice_parser_destroy(psr); +err_mem_alloc: + kfree(pkt_buf); + return status; +} + +/** * ice_vc_fdir_parse_pattern * @vf: pointer to the VF info * @fltr: virtual channel add cmd buffer @@ -811,6 +934,10 @@ ice_vc_fdir_parse_pattern(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, return -EINVAL; } + /* For raw FDIR filters created by the parser */ + if (ice_vc_fdir_is_raw_flow(proto)) + return ice_vc_fdir_parse_raw(vf, proto, conf); + for (i = 0; i < proto->count; i++) { struct virtchnl_proto_hdr *hdr = &proto->proto_hdr[i]; struct ip_esp_hdr *esph; @@ -1099,8 +1226,10 @@ ice_vc_validate_fdir_fltr(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, struct virtchnl_proto_hdrs *proto = &fltr->rule_cfg.proto_hdrs; int ret; - if (!ice_vc_validate_pattern(vf, proto)) - return -EINVAL; + /* For raw FDIR filters created by the parser */ + if (!ice_vc_fdir_is_raw_flow(proto)) + if (!ice_vc_validate_pattern(vf, proto)) + return -EINVAL; ret = ice_vc_fdir_parse_pattern(vf, fltr, conf); if (ret) @@ -1293,11 +1422,15 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf, return -ENOMEM; ice_fdir_get_prgm_desc(hw, input, &desc, add); - ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); - if (ret) { - dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", - vf->vf_id, input->flow_type); - goto err_free_pkt; + if (conf->parser_ena) { + memcpy(pkt, conf->pkt_buf, conf->pkt_len); + } else { + ret = ice_fdir_get_gen_prgm_pkt(hw, input, pkt, false, is_tun); + if (ret) { + dev_dbg(dev, "Gen training pkt for VF %d ptype %d failed\n", + vf->vf_id, input->flow_type); + goto err_free_pkt; + } } ret = ice_prgm_fdir_fltr(ctrl_vsi, &desc, pkt); @@ -1388,7 +1521,7 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi, memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc)); spin_unlock_irqrestore(&fdir->ctx_lock, flags); - ret = del_timer(&ctx_irq->rx_tmr); + ret = timer_delete(&ctx_irq->rx_tmr); if (!ret) dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id); @@ -1519,6 +1652,16 @@ err_exit: return ret; } +static int ice_fdir_is_tunnel(enum ice_fdir_tunnel_type ttype) +{ + return (ttype == ICE_FDIR_TUNNEL_TYPE_GRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_GTPOGRE_INNER || + ttype == ICE_FDIR_TUNNEL_TYPE_ECPRI || + ttype == ICE_FDIR_TUNNEL_TYPE_L2TPV2_INNER); +} + /** * ice_vc_add_fdir_fltr_post * @vf: pointer to the VF structure @@ -1560,6 +1703,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, resp->status = status; resp->flow_id = conf->flow_id; vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++; + vf->fdir.fdir_fltr_cnt_total++; ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, (u8 *)resp, len); @@ -1624,6 +1768,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx, resp->status = status; ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--; + vf->fdir.fdir_fltr_cnt_total--; ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret, (u8 *)resp, len); @@ -1771,13 +1916,165 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf) struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq; unsigned long flags; - del_timer(&ctx->rx_tmr); + timer_delete(&ctx->rx_tmr); spin_lock_irqsave(&vf->fdir.ctx_lock, flags); ctx->flags &= ~ICE_VF_FDIR_CTX_VALID; spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags); } /** + * ice_vc_parser_fv_check_diff - check two parsed FDIR profile fv context + * @fv_a: struct of parsed FDIR profile field vector + * @fv_b: struct of parsed FDIR profile field vector + * + * Check if the two parsed FDIR profile field vector context are different, + * including proto_id, offset and mask. + * + * Return: true on different, false on otherwise. + */ +static bool ice_vc_parser_fv_check_diff(struct ice_parser_fv *fv_a, + struct ice_parser_fv *fv_b) +{ + return (fv_a->proto_id != fv_b->proto_id || + fv_a->offset != fv_b->offset || + fv_a->msk != fv_b->msk); +} + +/** + * ice_vc_parser_fv_save - save parsed FDIR profile fv context + * @fv: struct of parsed FDIR profile field vector + * @fv_src: parsed FDIR profile field vector context to save + * + * Save the parsed FDIR profile field vector context, including proto_id, + * offset and mask. + * + * Return: Void. + */ +static void ice_vc_parser_fv_save(struct ice_parser_fv *fv, + struct ice_parser_fv *fv_src) +{ + fv->proto_id = fv_src->proto_id; + fv->offset = fv_src->offset; + fv->msk = fv_src->msk; + fv->spec = 0; +} + +/** + * ice_vc_add_fdir_raw - add a raw FDIR filter for VF + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @v_ret: the final VIRTCHNL code + * @stat: pointer to the VIRTCHNL_OP_ADD_FDIR_FILTER + * @len: length of the stat + * + * Return: 0 on success or negative errno on failure. + */ +static int +ice_vc_add_fdir_raw(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + enum virtchnl_status_code *v_ret, + struct virtchnl_fdir_add *stat, int len) +{ + struct ice_vsi *vf_vsi, *ctrl_vsi; + struct ice_fdir_prof_info *pi; + struct ice_pf *pf = vf->pf; + int ret, ptg, id, i; + struct device *dev; + struct ice_hw *hw; + bool fv_found; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + *v_ret = VIRTCHNL_STATUS_ERR_PARAM; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; + + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %d\n", vf->vf_id); + return -ENODEV; + } + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %d\n", + vf->vf_id); + return -ENODEV; + } + + fv_found = false; + + /* Check if profile info already exists, then update the counter */ + pi = &vf->fdir_prof_info[ptg]; + if (pi->fdir_active_cnt != 0) { + for (i = 0; i < ICE_MAX_FV_WORDS; i++) + if (ice_vc_parser_fv_check_diff(&pi->prof.fv[i], + &conf->prof->fv[i])) + break; + if (i == ICE_MAX_FV_WORDS) { + fv_found = true; + pi->fdir_active_cnt++; + } + } + + /* HW profile setting is only required for the first time */ + if (!fv_found) { + ret = ice_flow_set_parser_prof(hw, vf_vsi->idx, + ctrl_vsi->idx, conf->prof, + ICE_BLK_FD); + + if (ret) { + *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: insert hw prof failed\n", + vf->vf_id); + return ret; + } + } + + ret = ice_vc_fdir_insert_entry(vf, conf, &conf->flow_id); + if (ret) { + *v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; + dev_dbg(dev, "VF %d: insert FDIR list failed\n", + vf->vf_id); + return ret; + } + + ret = ice_vc_fdir_set_irq_ctx(vf, conf, + VIRTCHNL_OP_ADD_FDIR_FILTER); + if (ret) { + dev_dbg(dev, "VF %d: set FDIR context failed\n", + vf->vf_id); + goto err_rem_entry; + } + + ret = ice_vc_fdir_write_fltr(vf, conf, true, false); + if (ret) { + dev_err(dev, "VF %d: adding FDIR raw flow rule failed, ret:%d\n", + vf->vf_id, ret); + goto err_clr_irq; + } + + /* Save parsed profile fv info of the FDIR rule for the first time */ + if (!fv_found) { + for (i = 0; i < conf->prof->fv_num; i++) + ice_vc_parser_fv_save(&pi->prof.fv[i], + &conf->prof->fv[i]); + pi->prof.fv_num = conf->prof->fv_num; + pi->fdir_active_cnt = 1; + } + + return 0; + +err_clr_irq: + ice_vc_fdir_clear_irq_ctx(vf); +err_rem_entry: + ice_vc_fdir_remove_entry(vf, conf, conf->flow_id); + return ret; +} + +/** * ice_vc_add_fdir_fltr - add a FDIR filter for VF by the msg buffer * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1790,6 +2087,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) struct virtchnl_fdir_add *stat = NULL; struct virtchnl_fdir_fltr_conf *conf; enum virtchnl_status_code v_ret; + struct ice_vsi *vf_vsi; struct device *dev; struct ice_pf *pf; int is_tun = 0; @@ -1798,6 +2096,22 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) pf = vf->pf; dev = ice_pf_to_dev(pf); + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + goto err_exit; + } + +#define ICE_VF_MAX_FDIR_FILTERS 128 + if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) || + vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) { + v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_err(dev, "Max number of FDIR filters for VF %d is reached\n", + vf->vf_id); + goto err_exit; + } + ret = ice_vc_fdir_param_check(vf, fltr->vsi_id); if (ret) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -1830,7 +2144,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) len = sizeof(*stat); ret = ice_vc_validate_fdir_fltr(vf, fltr, conf); if (ret) { - v_ret = VIRTCHNL_STATUS_SUCCESS; + v_ret = VIRTCHNL_STATUS_ERR_PARAM; stat->status = VIRTCHNL_FDIR_FAILURE_RULE_INVALID; dev_dbg(dev, "Invalid FDIR filter from VF %d\n", vf->vf_id); goto err_free_conf; @@ -1845,6 +2159,15 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg) goto exit; } + /* For raw FDIR filters created by the parser */ + if (conf->parser_ena) { + ret = ice_vc_add_fdir_raw(vf, conf, &v_ret, stat, len); + if (ret) + goto err_free_conf; + goto exit; + } + + is_tun = ice_fdir_is_tunnel(conf->ttype); ret = ice_vc_fdir_config_input_set(vf, fltr, conf, is_tun); if (ret) { v_ret = VIRTCHNL_STATUS_SUCCESS; @@ -1906,6 +2229,78 @@ err_exit: } /** + * ice_vc_del_fdir_raw - delete a raw FDIR filter for VF + * @vf: pointer to the VF info + * @conf: FDIR configuration for each filter + * @v_ret: the final VIRTCHNL code + * @stat: pointer to the VIRTCHNL_OP_DEL_FDIR_FILTER + * @len: length of the stat + * + * Return: 0 on success or negative errno on failure. + */ +static int +ice_vc_del_fdir_raw(struct ice_vf *vf, + struct virtchnl_fdir_fltr_conf *conf, + enum virtchnl_status_code *v_ret, + struct virtchnl_fdir_del *stat, int len) +{ + struct ice_vsi *vf_vsi, *ctrl_vsi; + enum ice_block blk = ICE_BLK_FD; + struct ice_fdir_prof_info *pi; + struct ice_pf *pf = vf->pf; + struct device *dev; + struct ice_hw *hw; + unsigned long id; + u16 vsi_num; + int ptg; + int ret; + + dev = ice_pf_to_dev(pf); + hw = &pf->hw; + *v_ret = VIRTCHNL_STATUS_ERR_PARAM; + stat->status = VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE; + + id = find_first_bit(conf->prof->ptypes, ICE_FLOW_PTYPE_MAX); + ptg = hw->blk[ICE_BLK_FD].xlt1.t[id]; + + ret = ice_vc_fdir_write_fltr(vf, conf, false, false); + if (ret) { + dev_err(dev, "VF %u: deleting FDIR raw flow rule failed: %d\n", + vf->vf_id, ret); + return ret; + } + + vf_vsi = ice_get_vf_vsi(vf); + if (!vf_vsi) { + dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id); + return -ENODEV; + } + + ctrl_vsi = pf->vsi[vf->ctrl_vsi_idx]; + if (!ctrl_vsi) { + dev_err(dev, "Can not get FDIR ctrl_vsi for VF %u\n", + vf->vf_id); + return -ENODEV; + } + + pi = &vf->fdir_prof_info[ptg]; + if (pi->fdir_active_cnt != 0) { + pi->fdir_active_cnt--; + /* Remove the profile id flow if no active FDIR rule left */ + if (!pi->fdir_active_cnt) { + vsi_num = ice_get_hw_vsi_num(hw, ctrl_vsi->idx); + ice_rem_prof_id_flow(hw, blk, vsi_num, id); + + vsi_num = ice_get_hw_vsi_num(hw, vf_vsi->idx); + ice_rem_prof_id_flow(hw, blk, vsi_num, id); + } + } + + conf->parser_ena = false; + return 0; +} + +/** * ice_vc_del_fdir_fltr - delete a FDIR filter for VF by the msg buffer * @vf: pointer to the VF info * @msg: pointer to the msg buffer @@ -1917,7 +2312,10 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) struct virtchnl_fdir_del *fltr = (struct virtchnl_fdir_del *)msg; struct virtchnl_fdir_del *stat = NULL; struct virtchnl_fdir_fltr_conf *conf; + struct ice_vf_fdir *fdir = &vf->fdir; enum virtchnl_status_code v_ret; + struct ice_fdir_fltr *input; + enum ice_fltr_ptype flow; struct device *dev; struct ice_pf *pf; int is_tun = 0; @@ -1967,6 +2365,15 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) goto err_exit; } + /* For raw FDIR filters created by the parser */ + if (conf->parser_ena) { + ret = ice_vc_del_fdir_raw(vf, conf, &v_ret, stat, len); + if (ret) + goto err_del_tmr; + goto exit; + } + + is_tun = ice_fdir_is_tunnel(conf->ttype); ret = ice_vc_fdir_write_fltr(vf, conf, false, is_tun); if (ret) { v_ret = VIRTCHNL_STATUS_SUCCESS; @@ -1976,6 +2383,13 @@ int ice_vc_del_fdir_fltr(struct ice_vf *vf, u8 *msg) goto err_del_tmr; } + /* Remove unused profiles to avoid unexpected behaviors */ + input = &conf->input; + flow = input->flow_type; + if (fdir->fdir_fltr_cnt[flow][is_tun] == 1) + ice_vc_fdir_rem_prof(vf, flow, is_tun); + +exit: kfree(stat); return ret; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h index c5bcc8d7481c..ac6dcab454b4 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h @@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx { struct ice_vf_fdir { u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX]; + u16 fdir_fltr_cnt_total; struct ice_fd_hw_prof **fdir_prof; struct idr fdir_rule_idr; diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c index 2e9ad27cb9d1..5291f2888ef8 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c @@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan) return -EINVAL; err = ice_fltr_add_vlan(vsi, vlan); - if (err && err != -EEXIST) { + if (!err) + vsi->num_vlan++; + else if (err == -EEXIST) + err = 0; + else dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n", vlan->vid, vsi->vsi_num, err); - return err; - } - vsi->num_vlan++; - return 0; + return err; } /** @@ -786,3 +787,60 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi) kfree(ctxt); return err; } + +int ice_vsi_clear_port_vlan(struct ice_vsi *vsi) +{ + struct ice_hw *hw = &vsi->back->hw; + struct ice_vsi_ctx *ctxt; + int err; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (!ctxt) + return -ENOMEM; + + ctxt->info = vsi->info; + + ctxt->info.port_based_outer_vlan = 0; + ctxt->info.port_based_inner_vlan = 0; + + ctxt->info.inner_vlan_flags = + FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M, + ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL); + if (ice_is_dvm_ena(hw)) { + ctxt->info.inner_vlan_flags |= + FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M, + ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING); + ctxt->info.outer_vlan_flags = + FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M, + ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL); + ctxt->info.outer_vlan_flags |= + FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M, + ICE_AQ_VSI_OUTER_TAG_VLAN_8100); + ctxt->info.outer_vlan_flags |= + ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING << + ICE_AQ_VSI_OUTER_VLAN_EMODE_S; + } + + ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; + ctxt->info.valid_sections = + cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID | + ICE_AQ_VSI_PROP_VLAN_VALID | + ICE_AQ_VSI_PROP_SW_VALID); + + err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); + if (err) { + dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n", + err, ice_aq_str(hw->adminq.sq_last_status)); + } else { + vsi->info.port_based_outer_vlan = + ctxt->info.port_based_outer_vlan; + vsi->info.port_based_inner_vlan = + ctxt->info.port_based_inner_vlan; + vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags; + vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; + vsi->info.sw_flags2 = ctxt->info.sw_flags2; + } + + kfree(ctxt); + return err; +} diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h index f0d84d11bd5b..12b227621a7d 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h @@ -36,5 +36,6 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid); int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi); int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi); +int ice_vsi_clear_port_vlan(struct ice_vsi *vsi); #endif /* _ICE_VSI_VLAN_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c index 4a6c850d83ac..8c7a9b41fb63 100644 --- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c +++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c @@ -3,6 +3,7 @@ #include "ice_pf_vsi_vlan_ops.h" #include "ice_vf_vsi_vlan_ops.h" +#include "ice_sf_vsi_vlan_ops.h" #include "ice_lib.h" #include "ice.h" @@ -72,12 +73,14 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi) switch (vsi->type) { case ICE_VSI_PF: - case ICE_VSI_SWITCHDEV_CTRL: ice_pf_vsi_init_vlan_ops(vsi); break; case ICE_VSI_VF: ice_vf_vsi_init_vlan_ops(vsi); break; + case ICE_VSI_SF: + ice_sf_vsi_init_vlan_ops(vsi); + break; default: dev_dbg(ice_pf_to_dev(vsi->back), "%s does not support VLAN operations\n", ice_vsi_type_str(vsi->type)); diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 1857220d27fe..a3a4eaa17739 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include <linux/bpf_trace.h> +#include <linux/unroll.h> #include <net/xdp_sock_drv.h> #include <net/xdp.h> #include "ice.h" @@ -39,7 +40,7 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats)); memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0, sizeof(vsi_stat->tx_ring_stats[q_idx]->stats)); - if (ice_is_xdp_ena_vsi(vsi)) + if (vsi->xdp_rings) memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0, sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats)); } @@ -52,10 +53,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx) static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx) { ice_clean_tx_ring(vsi->tx_rings[q_idx]); - if (ice_is_xdp_ena_vsi(vsi)) { - synchronize_rcu(); + if (vsi->xdp_rings) ice_clean_tx_ring(vsi->xdp_rings[q_idx]); - } ice_clean_rx_ring(vsi->rx_rings[q_idx]); } @@ -112,25 +111,29 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring, * ice_qvec_cfg_msix - Enable IRQ for given queue vector * @vsi: the VSI that contains queue vector * @q_vector: queue vector + * @qid: queue index */ static void -ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector) +ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid) { u16 reg_idx = q_vector->reg_idx; struct ice_pf *pf = vsi->back; struct ice_hw *hw = &pf->hw; - struct ice_tx_ring *tx_ring; - struct ice_rx_ring *rx_ring; + int q, _qid = qid; ice_cfg_itr(hw, q_vector); - ice_for_each_tx_ring(tx_ring, q_vector->tx) - ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx, - q_vector->tx.itr_idx); + for (q = 0; q < q_vector->num_ring_tx; q++) { + ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx); + _qid++; + } + + _qid = qid; - ice_for_each_rx_ring(rx_ring, q_vector->rx) - ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx, - q_vector->rx.itr_idx); + for (q = 0; q < q_vector->num_ring_rx; q++) { + ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx); + _qid++; + } ice_flush(hw); } @@ -163,7 +166,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) struct ice_q_vector *q_vector; struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; - int timeout = 50; + int fail = 0; int err; if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) @@ -173,40 +176,33 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) rx_ring = vsi->rx_rings[q_idx]; q_vector = rx_ring->q_vector; - while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) { - timeout--; - if (!timeout) - return -EBUSY; - usleep_range(1000, 2000); - } + synchronize_net(); + netif_carrier_off(vsi->netdev); + netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); ice_qvec_dis_irq(vsi, rx_ring, q_vector); ice_qvec_toggle_napi(vsi, q_vector, false); - netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); - ice_fill_txq_meta(vsi, tx_ring, &txq_meta); err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); - if (err) - return err; - if (ice_is_xdp_ena_vsi(vsi)) { + if (!fail) + fail = err; + if (vsi->xdp_rings) { struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; memset(&txq_meta, 0, sizeof(txq_meta)); ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, &txq_meta); - if (err) - return err; + if (!fail) + fail = err; } - err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); - if (err) - return err; + ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false); ice_qp_clean_rings(vsi, q_idx); ice_qp_reset_stats(vsi, q_idx); - return 0; + return fail; } /** @@ -219,40 +215,47 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) { struct ice_q_vector *q_vector; + int fail = 0; + bool link_up; int err; err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx); - if (err) - return err; + if (!fail) + fail = err; if (ice_is_xdp_ena_vsi(vsi)) { struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx); - if (err) - return err; + if (!fail) + fail = err; ice_set_ring_xdp(xdp_ring); ice_tx_xsk_pool(vsi, q_idx); } err = ice_vsi_cfg_single_rxq(vsi, q_idx); - if (err) - return err; + if (!fail) + fail = err; q_vector = vsi->rx_rings[q_idx]->q_vector; - ice_qvec_cfg_msix(vsi, q_vector); + ice_qvec_cfg_msix(vsi, q_vector, q_idx); err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); - if (err) - return err; + if (!fail) + fail = err; ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); - netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); - clear_bit(ICE_CFG_BUSY, vsi->state); + /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */ + synchronize_net(); + ice_get_link_status(vsi->port_info, &link_up); + if (link_up) { + netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); + netif_carrier_on(vsi->netdev); + } - return 0; + return fail; } /** @@ -269,7 +272,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) if (!pool) return -EINVAL; - clear_bit(qid, vsi->af_xdp_zc_qps); xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); return 0; @@ -288,7 +290,7 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) { int err; - if (vsi->type != ICE_VSI_PF) + if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) return -EINVAL; if (qid >= vsi->netdev->real_num_rx_queues || @@ -300,8 +302,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) if (err) return err; - set_bit(qid, vsi->af_xdp_zc_qps); - return 0; } @@ -349,11 +349,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present) int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc) { struct ice_rx_ring *rx_ring; - unsigned long q; + uint i; + + ice_for_each_rxq(vsi, i) { + rx_ring = vsi->rx_rings[i]; + if (!rx_ring->xsk_pool) + continue; - for_each_set_bit(q, vsi->af_xdp_zc_qps, - max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) { - rx_ring = vsi->rx_rings[q]; if (ice_realloc_rx_xdp_bufs(rx_ring, zc)) return -ENOMEM; } @@ -380,7 +382,8 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) goto failure; } - if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi); + if_running = !test_bit(ICE_VSI_DOWN, vsi->state) && + ice_is_xdp_ena_vsi(vsi); if (if_running) { struct ice_rx_ring *rx_ring = vsi->rx_rings[qid]; @@ -460,6 +463,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, /** * __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers * @rx_ring: Rx ring + * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW * @count: The number of buffers to allocate * * Place the @count of descriptors onto Rx ring. Handle the ring wrap @@ -468,7 +472,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp, * * Returns true if all allocations were successful, false if any fail. */ -static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) +static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count) { u32 nb_buffs_extra = 0, nb_buffs = 0; union ice_32b_rx_flex_desc *rx_desc; @@ -480,8 +485,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) xdp = ice_xdp_buf(rx_ring, ntu); if (ntu + count >= rx_ring->count) { - nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, - rx_desc, + nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, rx_ring->count - ntu); if (nb_buffs_extra != rx_ring->count - ntu) { ntu += nb_buffs_extra; @@ -494,7 +498,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) ice_release_rx_desc(rx_ring, 0); } - nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count); + nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count); ntu += nb_buffs; if (ntu == rx_ring->count) @@ -510,6 +514,7 @@ exit: /** * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers * @rx_ring: Rx ring + * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW * @count: The number of buffers to allocate * * Wrapper for internal allocation routine; figure out how many tail @@ -517,7 +522,8 @@ exit: * * Returns true if all calls to internal alloc routine succeeded */ -bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) +bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count) { u16 rx_thresh = ICE_RING_QUARTER(rx_ring); u16 leftover, i, tail_bumps; @@ -526,9 +532,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count) leftover = count - (tail_bumps * rx_thresh); for (i = 0; i < tail_bumps; i++) - if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh)) + if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh)) return false; - return __ice_alloc_rx_bufs_zc(rx_ring, leftover); + return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover); } /** @@ -555,8 +561,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } net_prefetch(xdp->data_meta); - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; @@ -598,8 +603,10 @@ out: /** * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ * @xdp_ring: XDP Tx ring + * @xsk_pool: AF_XDP buffer pool pointer */ -static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring) +static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring, + struct xsk_buff_pool *xsk_pool) { u16 ntc = xdp_ring->next_to_clean; struct ice_tx_desc *tx_desc; @@ -650,7 +657,7 @@ skip: if (xdp_ring->next_to_clean >= cnt) xdp_ring->next_to_clean -= cnt; if (xsk_frames) - xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames); + xsk_tx_completed(xsk_pool, xsk_frames); return completed_frames; } @@ -659,6 +666,7 @@ skip: * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX * @xdp: XDP buffer to xmit * @xdp_ring: XDP ring to produce descriptor onto + * @xsk_pool: AF_XDP buffer pool pointer * * note that this function works directly on xdp_buff, no need to convert * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning @@ -668,7 +676,8 @@ skip: * was not enough space on XDP ring */ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, - struct ice_tx_ring *xdp_ring) + struct ice_tx_ring *xdp_ring, + struct xsk_buff_pool *xsk_pool) { struct skb_shared_info *sinfo = NULL; u32 size = xdp->data_end - xdp->data; @@ -682,7 +691,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, free_space = ICE_DESC_UNUSED(xdp_ring); if (free_space < ICE_RING_QUARTER(xdp_ring)) - free_space += ice_clean_xdp_irq_zc(xdp_ring); + free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool); if (unlikely(!free_space)) goto busy; @@ -702,7 +711,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp, dma_addr_t dma; dma = xsk_buff_xdp_get_dma(xdp); - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size); + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size); tx_buf->xdp = xdp; tx_buf->type = ICE_TX_BUF_XSK_TX; @@ -744,12 +753,14 @@ busy: * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action + * @xsk_pool: AF_XDP buffer pool pointer * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static int ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, + struct xsk_buff_pool *xsk_pool) { int err, result = ICE_XDP_PASS; u32 act; @@ -760,7 +771,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); if (!err) return ICE_XDP_REDIR; - if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) + if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS) result = ICE_XDP_EXIT; else result = ICE_XDP_CONSUMED; @@ -771,7 +782,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, case XDP_PASS: break; case XDP_TX: - result = ice_xmit_xdp_tx_zc(xdp, xdp_ring); + result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool); if (result == ICE_XDP_CONSUMED) goto out_failure; break; @@ -791,46 +802,19 @@ out_failure: return result; } -static int -ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first, - struct xdp_buff *xdp, const unsigned int size) -{ - struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first); - - if (!size) - return 0; - - if (!xdp_buff_has_frags(first)) { - sinfo->nr_frags = 0; - sinfo->xdp_frags_size = 0; - xdp_buff_set_frags_flag(first); - } - - if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) { - xsk_buff_free(first); - return -ENOMEM; - } - - __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, - virt_to_page(xdp->data_hard_start), - XDP_PACKET_HEADROOM, size); - sinfo->xdp_frags_size += size; - xsk_buff_add_frag(xdp); - - return 0; -} - /** * ice_clean_rx_irq_zc - consumes packets from the hardware ring * @rx_ring: AF_XDP Rx ring + * @xsk_pool: AF_XDP buffer pool pointer * @budget: NAPI budget * * Returns number of processed packets on success, remaining budget on failure. */ -int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) +int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, + int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; - struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool; u32 ntc = rx_ring->next_to_clean; u32 ntu = rx_ring->next_to_use; struct xdp_buff *first = NULL; @@ -879,11 +863,12 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ICE_RX_FLX_DESC_PKT_LEN_M; xsk_buff_set_size(xdp, size); - xsk_buff_dma_sync_for_cpu(xdp, xsk_pool); + xsk_buff_dma_sync_for_cpu(xdp); if (!first) { first = xdp; - } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) { + } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) { + xsk_buff_free(first); break; } @@ -893,7 +878,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) if (ice_is_non_eop(rx_ring, rx_desc)) continue; - xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring); + xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring, + xsk_pool); if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) { xdp_xmit |= xdp_res; } else if (xdp_res == ICE_XDP_EXIT) { @@ -942,7 +928,8 @@ construct_skb: rx_ring->next_to_clean = ntc; entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring); if (entries_to_alloc > ICE_RING_QUARTER(rx_ring)) - failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc); + failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, + entries_to_alloc); ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); @@ -965,17 +952,19 @@ construct_skb: /** * ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor * @xdp_ring: XDP ring to produce the HW Tx descriptor on + * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW * @desc: AF_XDP descriptor to pull the DMA address and length from * @total_bytes: bytes accumulator that will be used for stats update */ -static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc, +static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, + struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc, unsigned int *total_bytes) { struct ice_tx_desc *tx_desc; dma_addr_t dma; - dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len); + dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr); + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len); tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++); tx_desc->buf_addr = cpu_to_le64(dma); @@ -988,21 +977,25 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc, /** * ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors * @xdp_ring: XDP ring to produce the HW Tx descriptors on + * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from * @total_bytes: bytes accumulator that will be used for stats update */ -static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, +static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, + struct xsk_buff_pool *xsk_pool, + struct xdp_desc *descs, unsigned int *total_bytes) { u16 ntu = xdp_ring->next_to_use; struct ice_tx_desc *tx_desc; u32 i; - loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) { + unrolled_count(PKTS_PER_BATCH) + for (i = 0; i < PKTS_PER_BATCH; i++) { dma_addr_t dma; - dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr); - xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len); + dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr); + xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len); tx_desc = ICE_TX_DESC(xdp_ring, ntu++); tx_desc->buf_addr = cpu_to_le64(dma); @@ -1018,60 +1011,69 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de /** * ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring * @xdp_ring: XDP ring to produce the HW Tx descriptors on + * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW * @descs: AF_XDP descriptors to pull the DMA addresses and lengths from * @nb_pkts: count of packets to be send * @total_bytes: bytes accumulator that will be used for stats update */ -static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs, - u32 nb_pkts, unsigned int *total_bytes) +static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, + struct xsk_buff_pool *xsk_pool, + struct xdp_desc *descs, u32 nb_pkts, + unsigned int *total_bytes) { u32 batched, leftover, i; batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH); leftover = nb_pkts & (PKTS_PER_BATCH - 1); for (i = 0; i < batched; i += PKTS_PER_BATCH) - ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes); + ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes); for (; i < batched + leftover; i++) - ice_xmit_pkt(xdp_ring, &descs[i], total_bytes); + ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes); } /** * ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring * @xdp_ring: XDP ring to produce the HW Tx descriptors on + * @xsk_pool: AF_XDP buffer pool pointer * * Returns true if there is no more work that needs to be done, false otherwise */ -bool ice_xmit_zc(struct ice_tx_ring *xdp_ring) +bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool) { - struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; + struct xdp_desc *descs = xsk_pool->tx_descs; u32 nb_pkts, nb_processed = 0; unsigned int total_bytes = 0; int budget; - ice_clean_xdp_irq_zc(xdp_ring); + ice_clean_xdp_irq_zc(xdp_ring, xsk_pool); + + if (!netif_carrier_ok(xdp_ring->vsi->netdev) || + !netif_running(xdp_ring->vsi->netdev)) + return true; budget = ICE_DESC_UNUSED(xdp_ring); budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring)); - nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); + nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget); if (!nb_pkts) return true; if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { nb_processed = xdp_ring->count - xdp_ring->next_to_use; - ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes); + ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed, + &total_bytes); xdp_ring->next_to_use = 0; } - ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, - &total_bytes); + ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed], + nb_pkts - nb_processed, &total_bytes); ice_set_rs_bit(xdp_ring); ice_xdp_ring_update_tail(xdp_ring); ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes); - if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) - xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); + if (xsk_uses_need_wakeup(xsk_pool)) + xsk_set_tx_need_wakeup(xsk_pool); return nb_pkts < budget; } @@ -1093,7 +1095,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, struct ice_vsi *vsi = np->vsi; struct ice_tx_ring *ring; - if (test_bit(ICE_VSI_DOWN, vsi->state)) + if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev)) return -ENETDOWN; if (!ice_is_xdp_ena_vsi(vsi)) @@ -1104,7 +1106,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, ring = vsi->rx_rings[queue_id]->xdp_ring; - if (!ring->xsk_pool) + if (!READ_ONCE(ring->xsk_pool)) return -EINVAL; /* The idea here is that if NAPI is running, mark a miss, so diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h index 6fa181f080ef..8dc5d55e26c5 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.h +++ b/drivers/net/ethernet/intel/ice/ice_xsk.h @@ -7,29 +7,25 @@ #define PKTS_PER_BATCH 8 -#ifdef __clang__ -#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for -#elif __GNUC__ >= 8 -#define loop_unrolled_for _Pragma("GCC unroll 8") for -#else -#define loop_unrolled_for for -#endif - struct ice_vsi; #ifdef CONFIG_XDP_SOCKETS int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid); -int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget); +int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, + int budget); int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); -bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count); +bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, + struct xsk_buff_pool *xsk_pool, u16 count); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring); void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring); -bool ice_xmit_zc(struct ice_tx_ring *xdp_ring); +bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool); int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc); #else -static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring) +static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring, + struct xsk_buff_pool __always_unused *xsk_pool) { return false; } @@ -44,6 +40,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi, static inline int ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, + struct xsk_buff_pool __always_unused *xsk_pool, int __always_unused budget) { return 0; @@ -51,6 +48,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring, static inline bool ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring, + struct xsk_buff_pool __always_unused *xsk_pool, u16 __always_unused count) { return false; |