summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile14
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c213
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.c551
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/health.h71
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.c (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.c)4
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/port.h (renamed from drivers/net/ethernet/intel/ice/devlink/devlink_port.h)2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h147
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h493
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c577
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h181
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c1570
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h85
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_debugfs.c633
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c1475
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h33
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c348
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c179
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c318
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h162
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.c472
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fwlog.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.c275
-rw-r--r--drivers/net/ethernet/intel/ice/ice_irq.h13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1011
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c219
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c871
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_parser_rt.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1051
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h270
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c1458
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h164
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c199
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sf_eth.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c191
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c62
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c266
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.c626
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tspll.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c929
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h154
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h68
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h91
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vlan_mode.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c335
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h36
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c)11
-rw-r--r--drivers/net/ethernet/intel/ice/virt/allowlist.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c)36
-rw-r--r--drivers/net/ethernet/intel/ice/virt/fdir.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.c975
-rw-r--r--drivers/net/ethernet/intel/ice/virt/queues.h20
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.c1922
-rw-r--r--drivers/net/ethernet/intel/ice/virt/rss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.c (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.c)1864
-rw-r--r--drivers/net/ethernet/intel/ice/virt/virtchnl.h (renamed from drivers/net/ethernet/intel/ice/ice_virtchnl.h)29
99 files changed, 13227 insertions, 9041 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 3307d551f431..5b2c666496e7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -32,7 +32,8 @@ ice-y := ice_main.o \
ice_parser_rt.o \
ice_idc.o \
devlink/devlink.o \
- devlink/devlink_port.o \
+ devlink/health.o \
+ devlink/port.o \
ice_sf_eth.o \
ice_sf_vsi_vlan_ops.o \
ice_ddp.o \
@@ -41,18 +42,19 @@ ice-y := ice_main.o \
ice_ethtool.o \
ice_repr.o \
ice_tc_lib.o \
- ice_fwlog.o \
ice_debugfs.o \
ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
- ice_virtchnl.o \
- ice_virtchnl_allowlist.o \
- ice_virtchnl_fdir.o \
+ virt/allowlist.o \
+ virt/fdir.o \
+ virt/queues.o \
+ virt/virtchnl.o \
+ virt/rss.o \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o ice_tspll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 415445cefdb2..d88b7f3fd1f9 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -6,7 +6,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
@@ -293,7 +293,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_discover_dev_caps(hw, &ctx->dev_caps);
if (err) {
dev_dbg(dev, "Failed to discover device capabilities, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
goto out_free_ctx;
}
@@ -302,7 +302,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_orom_ver(hw, &ctx->pending_orom);
if (err) {
dev_dbg(dev, "Unable to read inactive Option ROM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_orom = false;
@@ -313,7 +313,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_nvm_ver(hw, &ctx->pending_nvm);
if (err) {
dev_dbg(dev, "Unable to read inactive NVM version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_nvm = false;
@@ -324,7 +324,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
err = ice_get_inactive_netlist_ver(hw, &ctx->pending_netlist);
if (err) {
dev_dbg(dev, "Unable to read inactive Netlist version data, status %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
/* disable display of pending Option ROM */
ctx->dev_caps.common_cap.nvm_update_pending_netlist = false;
@@ -368,14 +368,18 @@ static int ice_devlink_info_get(struct devlink *devlink,
}
break;
case ICE_VERSION_RUNNING:
- err = devlink_info_version_running_put(req, key, ctx->buf);
+ err = devlink_info_version_running_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set running version");
goto out_free_ctx;
}
break;
case ICE_VERSION_STORED:
- err = devlink_info_version_stored_put(req, key, ctx->buf);
+ err = devlink_info_version_stored_put_ext(req, key,
+ ctx->buf,
+ DEVLINK_INFO_VERSION_TYPE_COMPONENT);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to set stored version");
goto out_free_ctx;
@@ -436,7 +440,7 @@ ice_devlink_reload_empr_start(struct ice_pf *pf,
err = ice_aq_nvm_update_empr(hw);
if (err) {
dev_err(dev, "Failed to trigger EMP device reset to reload firmware, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to trigger EMP device reset to reload firmware");
return err;
}
@@ -455,6 +459,7 @@ static void ice_devlink_reinit_down(struct ice_pf *pf)
rtnl_lock();
ice_vsi_decfg(ice_get_main_vsi(pf));
rtnl_unlock();
+ ice_deinit_pf(pf);
ice_deinit_dev(pf);
}
@@ -605,11 +610,13 @@ exit_release_res:
* @devlink: pointer to the devlink instance
* @id: the parameter ID to set
* @ctx: context to store the parameter value
+ * @extack: netlink extended ACK structure
*
* Return: zero on success and negative value on failure.
*/
static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
int err;
@@ -977,6 +984,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
@@ -1198,6 +1208,25 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+static void ice_set_min_max_msix(struct ice_pf *pf)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value val;
+ int err;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ &val);
+ if (!err)
+ pf->msix.min = val.vu32;
+
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ &val);
+ if (!err)
+ pf->msix.max = val.vu32;
+}
+
/**
* ice_devlink_reinit_up - do reinit of the given PF
* @pf: pointer to the PF struct
@@ -1205,11 +1234,28 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
static int ice_devlink_reinit_up(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct device *dev = ice_pf_to_dev(pf);
+ bool need_dev_deinit = false;
int err;
+ err = ice_init_hw(&pf->hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ /* load MSI-X values */
+ ice_set_min_max_msix(pf);
+
err = ice_init_dev(pf);
if (err)
- return err;
+ goto unroll_hw_init;
+
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
+ goto unroll_dev_init;
+ }
vsi->flags = ICE_VSI_FLAG_INIT;
@@ -1217,7 +1263,7 @@ static int ice_devlink_reinit_up(struct ice_pf *pf)
err = ice_vsi_cfg(vsi);
rtnl_unlock();
if (err)
- goto err_vsi_cfg;
+ goto unroll_pf_init;
/* No need to take devl_lock, it's already taken by devlink API */
err = ice_load(pf);
@@ -1230,8 +1276,14 @@ err_load:
rtnl_lock();
ice_vsi_decfg(vsi);
rtnl_unlock();
-err_vsi_cfg:
- ice_deinit_dev(pf);
+unroll_pf_init:
+ ice_deinit_pf(pf);
+unroll_dev_init:
+ need_dev_deinit = true;
+unroll_hw_init:
+ ice_deinit_hw(&pf->hw);
+ if (need_dev_deinit)
+ ice_deinit_dev(pf);
return err;
}
@@ -1299,11 +1351,17 @@ static const struct devlink_ops ice_sf_devlink_ops;
static int
ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? true : false;
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2);
return 0;
}
@@ -1313,19 +1371,24 @@ static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool roce_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!roce_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_ROCEV2;
return ret;
}
@@ -1336,11 +1399,16 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
+
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP) {
+ if (cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP) {
NL_SET_ERR_MSG_MOD(extack, "iWARP is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1350,11 +1418,17 @@ ice_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
static int
ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
- ctx->val.vbool = pf->rdma_mode & IIDC_RDMA_PROTOCOL_IWARP;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
+ ctx->val.vbool = !!(cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_IWARP);
return 0;
}
@@ -1364,19 +1438,24 @@ static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
+ struct iidc_rdma_core_dev_info *cdev;
bool iw_ena = ctx->val.vbool;
int ret;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
if (!iw_ena) {
ice_unplug_aux_dev(pf);
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return 0;
}
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol |= IIDC_RDMA_PROTOCOL_IWARP;
ret = ice_plug_aux_dev(pf);
if (ret)
- pf->rdma_mode &= ~IIDC_RDMA_PROTOCOL_IWARP;
+ cdev->rdma_protocol &= ~IIDC_RDMA_PROTOCOL_IWARP;
return ret;
}
@@ -1391,7 +1470,7 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
return -EOPNOTSUPP;
- if (pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2) {
+ if (pf->cdev_info->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2) {
NL_SET_ERR_MSG_MOD(extack, "RoCEv2 is currently enabled. This device cannot enable iWARP and RoCEv2 simultaneously");
return -EOPNOTSUPP;
}
@@ -1447,11 +1526,13 @@ static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
* @devlink: Pointer to the devlink instance.
* @id: The parameter ID to set.
* @ctx: Context to store the parameter value.
+ * @extack: netlink extended ACK structure
*
* Return: Zero.
*/
static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct ice_port_info *pi;
@@ -1518,6 +1599,43 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (val.vu32 > pf->hw.func_caps.common_cap.num_msix_vectors)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu32 < ICE_MIN_MSIX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ice_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ bool new_state = val.vbool;
+
+ if (new_state && !test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
@@ -1533,6 +1651,17 @@ static const struct devlink_param ice_dvl_rdma_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+ DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_enable_rdma_validate),
+};
+
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
};
static const struct devlink_param ice_dvl_sched_params[] = {
@@ -1636,6 +1765,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
struct ice_hw *hw = &pf->hw;
int status;
@@ -1644,10 +1774,39 @@ int ice_devlink_register_params(struct ice_pf *pf)
if (status)
return status;
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ goto unregister_rdma_params;
+
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ goto unregister_msix_params;
+
+ value.vu32 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu32 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
+ return 0;
+
+unregister_msix_params:
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+unregister_rdma_params:
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
return status;
}
@@ -1658,6 +1817,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
devl_params_unregister(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
devl_params_unregister(devlink, ice_dvl_sched_params,
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.c b/drivers/net/ethernet/intel/ice/devlink/health.c
new file mode 100644
index 000000000000..8e9a8a8178d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_adminq_cmd.h" /* for enum ice_aqc_health_status_elem */
+#include "health.h"
+
+#define ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, obj, name) \
+ devlink_fmsg_put(fmsg, #name, (obj)->name)
+
+#define ICE_HEALTH_STATUS_DATA_SIZE 2
+
+struct ice_health_status {
+ enum ice_aqc_health_status code;
+ const char *description;
+ const char *solution;
+ const char *data_label[ICE_HEALTH_STATUS_DATA_SIZE];
+};
+
+/*
+ * In addition to the health status codes provided below, the firmware might
+ * generate Health Status Codes that are not pertinent to the end-user.
+ * For instance, Health Code 0x1002 is triggered when the command fails.
+ * Such codes should be disregarded by the end-user.
+ * The below lookup requires to be sorted by code.
+ */
+
+static const char ice_common_port_solutions[] =
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.";
+static const char ice_port_number_label[] = "Port Number";
+static const char ice_update_nvm_solution[] = "Update to the latest NVM image.";
+
+static const struct ice_health_status ice_health_status_lookup[] = {
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE, "Module type is not supported.",
+ "Change or replace the module or cable.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL, "Module is not qualified.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM,
+ "Device cannot communicate with the module.",
+ "Check your cable connection. Change or replace the module or cable. Manually set speed and duplex.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT, "Unresolved module conflict.",
+ "Manually set speed/duplex or change the port option. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT, "Module is not present.",
+ "Check that the module is inserted correctly. If the problem persists, use a cable/module that is found in the supported modules and cables list for this device.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED, "Underutilized module.",
+ "Change or replace the module or cable. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT, "An unsupported module was detected.",
+ ice_common_port_solutions, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG, "Invalid link configuration.",
+ NULL, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS, "Port hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE, "A port is unreachable.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED, "Port speed is limited due to module.",
+ "Change the module or configure the port option to match the current module speed. Change the port option.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT,
+ "All configured link modes were attempted but failed to establish link. The device will restart the process to establish link.",
+ "Check link partner connection and configuration.",
+ {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED,
+ "Port speed is limited by PHY capabilities.",
+ "Change the module to align to port option.", {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO, "LOM topology netlist is corrupted.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NETLIST, "Unrecoverable netlist error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT, "Port topology conflict.",
+ "Change the port option. Update to the latest NVM image."},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS, "Unrecoverable hardware access error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME, "Unrecoverable runtime error.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT, "Link management engine failed to initialize.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD,
+ "Failed to load the firmware image in the external PHY.",
+ ice_update_nvm_solution, {ice_port_number_label}},
+ {ICE_AQC_HEALTH_STATUS_INFO_RECOVERY, "The device is in firmware recovery mode.",
+ ice_update_nvm_solution, {"Extended Error"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS, "The flash chip cannot be accessed.",
+ "If issue persists, call customer support.", {"Access Type"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH, "NVM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH, "Option ROM authentication failed.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH, "DDP package authentication failed.",
+ "Update to latest base driver and DDP package."},
+ {ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT, "NVM image is incompatible.",
+ ice_update_nvm_solution},
+ {ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT, "Option ROM is incompatible.",
+ ice_update_nvm_solution, {"Expected PCI Device ID", "Expected Module ID"}},
+ {ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB,
+ "Supplied MIB file is invalid. DCB reverted to default configuration.",
+ "Disable FW-LLDP and check DCBx system configuration.",
+ {ice_port_number_label, "MIB ID"}},
+};
+
+static int ice_health_status_lookup_compare(const void *a, const void *b)
+{
+ return ((struct ice_health_status *)a)->code - ((struct ice_health_status *)b)->code;
+}
+
+static const struct ice_health_status *ice_get_health_status(u16 code)
+{
+ struct ice_health_status key = { .code = code };
+
+ return bsearch(&key, ice_health_status_lookup, ARRAY_SIZE(ice_health_status_lookup),
+ sizeof(struct ice_health_status), ice_health_status_lookup_compare);
+}
+
+static void ice_describe_status_code(struct devlink_fmsg *fmsg,
+ struct ice_aqc_health_status_elem *hse)
+{
+ static const char *const aux_label[] = { "Aux Data 1", "Aux Data 2" };
+ const struct ice_health_status *health_code;
+ u32 internal_data[2];
+ u16 status_code;
+
+ status_code = le16_to_cpu(hse->health_status_code);
+
+ devlink_fmsg_put(fmsg, "Syndrome", status_code);
+ if (status_code) {
+ internal_data[0] = le32_to_cpu(hse->internal_data1);
+ internal_data[1] = le32_to_cpu(hse->internal_data2);
+
+ health_code = ice_get_health_status(status_code);
+ if (!health_code)
+ return;
+
+ devlink_fmsg_string_pair_put(fmsg, "Description", health_code->description);
+ if (health_code->solution)
+ devlink_fmsg_string_pair_put(fmsg, "Possible Solution",
+ health_code->solution);
+
+ for (size_t i = 0; i < ICE_HEALTH_STATUS_DATA_SIZE; i++) {
+ if (internal_data[i] != ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA)
+ devlink_fmsg_u32_pair_put(fmsg,
+ health_code->data_label[i] ?
+ health_code->data_label[i] :
+ aux_label[i],
+ internal_data[i]);
+ }
+ }
+}
+
+static int
+ice_port_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_port_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack __always_unused *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.port_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_diagnose(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static int
+ice_fw_reporter_dump(struct devlink_health_reporter *reporter, struct devlink_fmsg *fmsg,
+ void *priv_ctx, struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_health_reporter_priv(reporter);
+
+ ice_describe_status_code(fmsg, &pf->health_reporters.fw_status);
+ return 0;
+}
+
+static void ice_config_health_events(struct ice_pf *pf, bool enable)
+{
+ u8 enable_bits = 0;
+ int ret;
+
+ if (enable)
+ enable_bits = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK;
+
+ ret = ice_aq_set_health_status_cfg(&pf->hw, enable_bits);
+ if (ret)
+ dev_err(ice_pf_to_dev(pf), "Failed to %s firmware health events, err %d aq_err %s\n",
+ str_enable_disable(enable), ret,
+ libie_aq_str(pf->hw.adminq.sq_last_status));
+}
+
+/**
+ * ice_process_health_status_event - Process the health status event from FW
+ * @pf: pointer to the PF structure
+ * @event: event structure containing the Health Status Event opcode
+ *
+ * Decode the Health Status Events and print the associated messages
+ */
+void ice_process_health_status_event(struct ice_pf *pf, struct ice_rq_event_info *event)
+{
+ const struct ice_aqc_health_status_elem *health_info;
+ const struct ice_aqc_get_health_status *cmd;
+ u16 count;
+
+ health_info = (struct ice_aqc_health_status_elem *)event->msg_buf;
+ cmd = libie_aq_raw(&event->desc);
+ count = le16_to_cpu(cmd->health_status_count);
+
+ if (count > (event->buf_len / sizeof(*health_info))) {
+ dev_err(ice_pf_to_dev(pf), "Received a health status event with invalid element count\n");
+ return;
+ }
+
+ for (size_t i = 0; i < count; i++) {
+ const struct ice_health_status *health_code;
+ u16 status_code;
+
+ status_code = le16_to_cpu(health_info->health_status_code);
+ health_code = ice_get_health_status(status_code);
+
+ if (health_code) {
+ switch (le16_to_cpu(health_info->event_source)) {
+ case ICE_AQC_HEALTH_STATUS_GLOBAL:
+ pf->health_reporters.fw_status = *health_info;
+ devlink_health_report(pf->health_reporters.fw,
+ "FW syndrome reported", NULL);
+ break;
+ case ICE_AQC_HEALTH_STATUS_PF:
+ case ICE_AQC_HEALTH_STATUS_PORT:
+ pf->health_reporters.port_status = *health_info;
+ devlink_health_report(pf->health_reporters.port,
+ "Port syndrome reported", NULL);
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "Health code with unknown source\n");
+ }
+ } else {
+ u32 data1, data2;
+ u16 source;
+
+ source = le16_to_cpu(health_info->event_source);
+ data1 = le32_to_cpu(health_info->internal_data1);
+ data2 = le32_to_cpu(health_info->internal_data2);
+ dev_dbg(ice_pf_to_dev(pf),
+ "Received internal health status code 0x%08x, source: 0x%08x, data1: 0x%08x, data2: 0x%08x",
+ status_code, source, data1, data2);
+ }
+ health_info++;
+ }
+}
+
+/**
+ * ice_devlink_health_report - boilerplate to call given @reporter
+ *
+ * @reporter: devlink health reporter to call, do nothing on NULL
+ * @msg: message to pass up, "event name" is fine
+ * @priv_ctx: typically some event struct
+ */
+static void ice_devlink_health_report(struct devlink_health_reporter *reporter,
+ const char *msg, void *priv_ctx)
+{
+ if (!reporter)
+ return;
+
+ /* We do not do auto recovering, so return value of the below function
+ * will always be 0, thus we do ignore it.
+ */
+ devlink_health_report(reporter, msg, priv_ctx);
+}
+
+struct ice_mdd_event {
+ enum ice_mdd_src src;
+ u16 vf_num;
+ u16 queue;
+ u8 pf_num;
+ u8 event;
+};
+
+static const char *ice_mdd_src_to_str(enum ice_mdd_src src)
+{
+ switch (src) {
+ case ICE_MDD_SRC_TX_PQM:
+ return "tx_pqm";
+ case ICE_MDD_SRC_TX_TCLAN:
+ return "tx_tclan";
+ case ICE_MDD_SRC_TX_TDPU:
+ return "tx_tdpu";
+ case ICE_MDD_SRC_RX:
+ return "rx";
+ default:
+ return "invalid";
+ }
+}
+
+static int
+ice_mdd_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_mdd_event *mdd_event = priv_ctx;
+ const char *src;
+
+ if (!mdd_event)
+ return 0;
+
+ src = ice_mdd_src_to_str(mdd_event->src);
+
+ devlink_fmsg_obj_nest_start(fmsg);
+ devlink_fmsg_put(fmsg, "src", src);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, pf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, vf_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, event);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, mdd_event, queue);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+/**
+ * ice_report_mdd_event - Report an MDD event through devlink health
+ * @pf: the PF device structure
+ * @src: the HW block that was the source of this MDD event
+ * @pf_num: the pf_num on which the MDD event occurred
+ * @vf_num: the vf_num on which the MDD event occurred
+ * @event: the event type of the MDD event
+ * @queue: the queue on which the MDD event occurred
+ *
+ * Report an MDD event that has occurred on this PF.
+ */
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue)
+{
+ struct ice_mdd_event ev = {
+ .src = src,
+ .pf_num = pf_num,
+ .vf_num = vf_num,
+ .event = event,
+ .queue = queue,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.mdd, "MDD event", &ev);
+}
+
+/**
+ * ice_fmsg_put_ptr - put hex value of pointer into fmsg
+ *
+ * @fmsg: devlink fmsg under construction
+ * @name: name to pass
+ * @ptr: 64 bit value to print as hex and put into fmsg
+ */
+static void ice_fmsg_put_ptr(struct devlink_fmsg *fmsg, const char *name,
+ void *ptr)
+{
+ char buf[sizeof(ptr) * 3];
+
+ sprintf(buf, "%p", ptr);
+ devlink_fmsg_put(fmsg, name, buf);
+}
+
+struct ice_tx_hang_event {
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ u16 queue;
+ u16 next_to_clean;
+ u16 next_to_use;
+ struct ice_tx_ring *tx_ring;
+};
+
+static int ice_tx_hang_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_tx_hang_event *event = priv_ctx;
+ struct sk_buff *skb;
+
+ if (!event)
+ return 0;
+
+ skb = event->tx_ring->tx_buf->skb;
+ devlink_fmsg_obj_nest_start(fmsg);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, head);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, intr);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, vsi_num);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, queue);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_clean);
+ ICE_DEVLINK_FMSG_PUT_FIELD(fmsg, event, next_to_use);
+ devlink_fmsg_put(fmsg, "irq-mapping", event->tx_ring->q_vector->name);
+ ice_fmsg_put_ptr(fmsg, "desc-ptr", event->tx_ring->desc);
+ ice_fmsg_put_ptr(fmsg, "dma-ptr", (void *)(long)event->tx_ring->dma);
+ ice_fmsg_put_ptr(fmsg, "skb-ptr", skb);
+ devlink_fmsg_binary_pair_put(fmsg, "desc", event->tx_ring->desc,
+ event->tx_ring->count * sizeof(struct ice_tx_desc));
+ devlink_fmsg_dump_skb(fmsg, skb);
+ devlink_fmsg_obj_nest_end(fmsg);
+
+ return 0;
+}
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+
+ buf->tx_ring = tx_ring;
+ buf->vsi_num = vsi_num;
+ buf->head = head;
+ buf->intr = intr;
+}
+
+void ice_report_tx_hang(struct ice_pf *pf)
+{
+ struct ice_health_tx_hang_buf *buf = &pf->health_reporters.tx_hang_buf;
+ struct ice_tx_ring *tx_ring = buf->tx_ring;
+
+ struct ice_tx_hang_event ev = {
+ .head = buf->head,
+ .intr = buf->intr,
+ .vsi_num = buf->vsi_num,
+ .queue = tx_ring->q_index,
+ .next_to_clean = tx_ring->next_to_clean,
+ .next_to_use = tx_ring->next_to_use,
+ .tx_ring = tx_ring,
+ };
+
+ ice_devlink_health_report(pf->health_reporters.tx_hang, "Tx hang", &ev);
+}
+
+static struct devlink_health_reporter *
+ice_init_devlink_rep(struct ice_pf *pf,
+ const struct devlink_health_reporter_ops *ops)
+{
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct devlink_health_reporter *rep;
+
+ rep = devl_health_reporter_create(devlink, ops, pf);
+ if (IS_ERR(rep)) {
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_err(dev, "failed to create devlink %s health report er",
+ ops->name);
+ return NULL;
+ }
+ return rep;
+}
+
+#define ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field) \
+ ._field = ice_##_name##_reporter_##_field,
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_1(_name, _field1) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ }
+
+#define ICE_DEFINE_HEALTH_REPORTER_OPS_2(_name, _field1, _field2) \
+ static const struct devlink_health_reporter_ops ice_##_name##_reporter_ops = { \
+ .name = #_name, \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field1) \
+ ICE_HEALTH_REPORTER_OPS_FIELD(_name, _field2) \
+ }
+
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(mdd, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_1(tx_hang, dump);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(fw, dump, diagnose);
+ICE_DEFINE_HEALTH_REPORTER_OPS_2(port, dump, diagnose);
+
+/**
+ * ice_health_init - allocate and init all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_init(struct ice_pf *pf)
+{
+ struct ice_health *reps = &pf->health_reporters;
+
+ reps->mdd = ice_init_devlink_rep(pf, &ice_mdd_reporter_ops);
+ reps->tx_hang = ice_init_devlink_rep(pf, &ice_tx_hang_reporter_ops);
+
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ reps->fw = ice_init_devlink_rep(pf, &ice_fw_reporter_ops);
+ reps->port = ice_init_devlink_rep(pf, &ice_port_reporter_ops);
+ ice_config_health_events(pf, true);
+ }
+}
+
+/**
+ * ice_deinit_devl_reporter - destroy given devlink health reporter
+ * @reporter: reporter to destroy
+ */
+static void ice_deinit_devl_reporter(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devl_health_reporter_destroy(reporter);
+}
+
+/**
+ * ice_health_deinit - deallocate all ice devlink health reporters and
+ * accompanied data
+ *
+ * @pf: PF struct
+ */
+void ice_health_deinit(struct ice_pf *pf)
+{
+ ice_deinit_devl_reporter(pf->health_reporters.mdd);
+ ice_deinit_devl_reporter(pf->health_reporters.tx_hang);
+ if (ice_is_fw_health_report_supported(&pf->hw)) {
+ ice_deinit_devl_reporter(pf->health_reporters.fw);
+ ice_deinit_devl_reporter(pf->health_reporters.port);
+ ice_config_health_events(pf, false);
+ }
+}
+
+static
+void ice_health_assign_healthy_state(struct devlink_health_reporter *reporter)
+{
+ if (reporter)
+ devlink_health_reporter_state_update(reporter,
+ DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
+}
+
+/**
+ * ice_health_clear - clear devlink health issues after a reset
+ * @pf: the PF device structure
+ *
+ * Mark the PF in healthy state again after a reset has completed.
+ */
+void ice_health_clear(struct ice_pf *pf)
+{
+ ice_health_assign_healthy_state(pf->health_reporters.mdd);
+ ice_health_assign_healthy_state(pf->health_reporters.tx_hang);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/health.h b/drivers/net/ethernet/intel/ice/devlink/health.h
new file mode 100644
index 000000000000..5edfc4d2adce
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/health.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _HEALTH_H_
+#define _HEALTH_H_
+
+#include <linux/types.h>
+
+/**
+ * DOC: health.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * devlink health mechanism for ice driver.
+ */
+
+struct ice_aqc_health_status_elem;
+struct ice_pf;
+struct ice_tx_ring;
+struct ice_rq_event_info;
+
+enum ice_mdd_src {
+ ICE_MDD_SRC_TX_PQM,
+ ICE_MDD_SRC_TX_TCLAN,
+ ICE_MDD_SRC_TX_TDPU,
+ ICE_MDD_SRC_RX,
+};
+
+/**
+ * struct ice_health - stores ice devlink health reporters and accompanied data
+ * @fw: devlink health reporter for FW Health Status events
+ * @mdd: devlink health reporter for MDD detection event
+ * @port: devlink health reporter for Port Health Status events
+ * @tx_hang: devlink health reporter for tx_hang event
+ * @tx_hang_buf: pre-allocated place to put info for Tx hang reporter from
+ * non-sleeping context
+ * @tx_ring: ring that the hang occurred on
+ * @head: descriptor head
+ * @intr: interrupt register value
+ * @vsi_num: VSI owning the queue that the hang occurred on
+ * @fw_status: buffer for last received FW Status event
+ * @port_status: buffer for last received Port Status event
+ */
+struct ice_health {
+ struct devlink_health_reporter *fw;
+ struct devlink_health_reporter *mdd;
+ struct devlink_health_reporter *port;
+ struct devlink_health_reporter *tx_hang;
+ struct_group_tagged(ice_health_tx_hang_buf, tx_hang_buf,
+ struct ice_tx_ring *tx_ring;
+ u32 head;
+ u32 intr;
+ u16 vsi_num;
+ );
+ struct ice_aqc_health_status_elem fw_status;
+ struct ice_aqc_health_status_elem port_status;
+};
+
+void ice_process_health_status_event(struct ice_pf *pf,
+ struct ice_rq_event_info *event);
+
+void ice_health_init(struct ice_pf *pf);
+void ice_health_deinit(struct ice_pf *pf);
+void ice_health_clear(struct ice_pf *pf);
+
+void ice_prep_tx_hang_report(struct ice_pf *pf, struct ice_tx_ring *tx_ring,
+ u16 vsi_num, u32 head, u32 intr);
+void ice_report_mdd_event(struct ice_pf *pf, enum ice_mdd_src src, u8 pf_num,
+ u16 vf_num, u8 event, u16 queue);
+void ice_report_tx_hang(struct ice_pf *pf);
+
+#endif /* _HEALTH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/port.c
index c6779d9dffff..63fb36fc4b3d 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
+++ b/drivers/net/ethernet/intel/ice/devlink/port.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "devlink.h"
-#include "devlink_port.h"
+#include "port.h"
#include "ice_lib.h"
#include "ice_fltr.h"
@@ -30,6 +30,8 @@ static const char *ice_devlink_port_opt_speed_str(u8 speed)
return "10";
case ICE_AQC_PORT_OPT_MAX_LANE_25G:
return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_40G:
+ return "40";
case ICE_AQC_PORT_OPT_MAX_LANE_50G:
return "50";
case ICE_AQC_PORT_OPT_MAX_LANE_100G:
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/port.h
index d60efc340945..e89ddd60eeac 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
+++ b/drivers/net/ethernet/intel/ice/devlink/port.h
@@ -11,7 +11,7 @@
* struct ice_dynamic_port - Track dynamically added devlink port instance
* @hw_addr: the HW address for this port
* @active: true if the port has been activated
- * @attached: true it the prot is attached
+ * @attached: true if the prot is attached
* @devlink_port: the associated devlink port structure
* @pf: pointer to the PF private structure
* @vsi: the VSI associated with this port
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f5d6f974185..147aaee192a7 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -67,6 +67,7 @@
#include "ice_sriov.h"
#include "ice_vf_mbx.h"
#include "ice_ptp.h"
+#include "ice_tspll.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
@@ -78,11 +79,16 @@
#include "ice_irq.h"
#include "ice_dpll.h"
#include "ice_adapter.h"
+#include "devlink/health.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC 64
-#define ICE_MAX_NUM_DESC 8160
+#define ICE_MAX_NUM_DESC_E810 8160
+#define ICE_MAX_NUM_DESC_E830 8096
+#define ICE_MAX_NUM_DESC_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ ICE_MAX_NUM_DESC_E830 : \
+ ICE_MAX_NUM_DESC_E810)
#define ICE_DFLT_MIN_RX_DESC 512
#define ICE_DFLT_NUM_TX_DESC 256
#define ICE_DFLT_NUM_RX_DESC 2048
@@ -96,9 +102,6 @@
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
-#define ICE_RDMA_NUM_AEQ_MSIX 4
-#define ICE_MIN_RDMA_MSIX 2
-#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -195,16 +198,17 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
-#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
-
enum ice_feature {
ICE_F_DSCP,
ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_TXTIME,
+ ICE_F_GCS,
ICE_F_ROCE_LAG,
ICE_F_SRIOV_LAG,
+ ICE_F_SRIOV_AA_LAG,
ICE_F_MBX_LIMIT,
ICE_F_MAX
};
@@ -347,6 +351,7 @@ struct ice_vsi {
u16 num_q_vectors;
/* tell if only dynamic irq allocation is allowed */
bool irq_dyn_alloc;
+ bool hsplit:1;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
@@ -370,6 +375,8 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
+ u16 max_frame;
+
struct ice_aqc_vsi_props info; /* VSI properties */
struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
@@ -402,7 +409,6 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
- u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
@@ -477,9 +483,6 @@ struct ice_q_vector {
struct ice_ring_container rx;
struct ice_ring_container tx;
- cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
-
struct ice_channel *ch;
char name[ICE_INT_NAME_STR_LEN];
@@ -509,16 +512,17 @@ enum ice_pf_flags {
ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
- ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_VF_VLAN_PRUNING,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_FLAG_UNPLUG_AUX_DEV,
+ ICE_FLAG_AUX_DEV_CREATED,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
+ ICE_FLAG_LLDP_AQ_FLTR,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -541,6 +545,14 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u32 cur;
+ u32 min;
+ u32 max;
+ u32 total;
+ u32 rest;
+};
+
struct ice_pf {
struct pci_dev *pdev;
struct ice_adapter *adapter;
@@ -553,15 +565,8 @@ struct ice_pf {
struct devlink_port devlink_port;
/* OS reserved IRQ details */
- struct msix_entry *msix_entries;
struct ice_irq_tracker irq_tracker;
- /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
- * number of MSIX vectors needed for all SR-IOV VFs from the number of
- * MSIX vectors allowed on this PF.
- */
- u16 sriov_base_vector;
- unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
- u16 sriov_irq_size; /* size of the irq_bm bitmap */
+ struct ice_virt_irq_tracker virt_irq_tracker;
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -570,9 +575,6 @@ struct ice_pf {
struct ice_sw *first_sw; /* first switch created by firmware */
u16 eswitch_mode; /* current mode of eswitch */
struct dentry *ice_debugfs_pf;
- struct dentry *ice_debugfs_pf_fwlog;
- /* keep track of all the dentrys for FW log modules */
- struct dentry **ice_debugfs_pf_fwlog_modules;
struct ice_vfs vfs;
DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
@@ -580,6 +582,7 @@ struct ice_pf {
DECLARE_BITMAP(misc_thread, ICE_MISC_THREAD_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
+ unsigned long *txtime_txqs; /* bitmap to track PF Tx Time queue */
unsigned long serv_tmr_period;
unsigned long serv_tmr_prev;
struct timer_list serv_tmr;
@@ -594,7 +597,6 @@ struct ice_pf {
struct gnss_serial *gnss_serial;
struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
- u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
@@ -611,7 +613,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
- u16 num_lan_msix; /* Total MSIX vectors for base driver */
+ struct ice_pf_msix msix;
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
@@ -620,6 +622,7 @@ struct ice_pf {
u16 globr_count; /* Global reset count */
u16 empr_count; /* EMP reset count */
u16 pfr_count; /* PF reset count */
+ u32 link_down_events;
u8 wol_ena : 1; /* software state of WoL */
u32 wakeup_reason; /* last wakeup reason */
@@ -627,14 +630,12 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
u8 stat_prev_loaded:1; /* has previous stats been loaded */
- u8 rdma_mode;
u16 dcbx_cap;
u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
char int_name_ll_ts[ICE_INT_NAME_STR_LEN];
- struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
/* count of tc_flower filters specific to channel (aka where filter
@@ -665,6 +666,8 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
struct ice_dplls dplls;
struct device *hwmon_dev;
+ struct ice_health health_reporters;
+ struct iidc_rdma_core_dev_info *cdev_info;
u8 num_quanta_prof_used;
};
@@ -754,6 +757,31 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
+ * ice_is_txtime_ena - check if Tx Time is enabled on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring has Tx Time enabled, false otherwise.
+ */
+static inline bool ice_is_txtime_ena(const struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_pf *pf = vsi->back;
+
+ return test_bit(ring->q_index, pf->txtime_txqs);
+}
+
+/**
+ * ice_is_txtime_cfg - check if Tx Time is configured on the Tx ring
+ * @ring: pointer to Tx ring
+ *
+ * Return: true if the Tx ring is configured for Tx ring, false otherwise.
+ */
+static inline bool ice_is_txtime_cfg(const struct ice_tx_ring *ring)
+{
+ return !!(ring->flags & ICE_TX_FLAGS_TXTIME);
+}
+
+/**
* ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
* @vsi: pointer to VSI
* @qid: index of a queue to look at XSK buff pool presence
@@ -910,11 +938,10 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-void ice_debugfs_fwlog_init(struct ice_pf *pf);
+int ice_debugfs_pf_init(struct ice_pf *pf);
void ice_debugfs_pf_deinit(struct ice_pf *pf);
void ice_debugfs_init(void);
void ice_debugfs_exit(void);
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module);
bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
@@ -964,7 +991,6 @@ int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
void ice_deinit_rdma(struct ice_pf *pf);
-const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
int
@@ -1005,11 +1031,15 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+void ice_start_service_task(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
void ice_adv_lnk_speed_maps_init(void);
+void ice_init_dev_hw(struct ice_pf *pf);
int ice_init_dev(struct ice_pf *pf);
void ice_deinit_dev(struct ice_pf *pf);
+int ice_init_pf(struct ice_pf *pf);
+void ice_deinit_pf(struct ice_pf *pf);
int ice_change_mtu(struct net_device *netdev, int new_mtu);
void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue);
int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp);
@@ -1045,10 +1075,63 @@ static inline void ice_clear_rdma_cap(struct ice_pf *pf)
clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
}
-static inline enum ice_phy_model ice_get_phy_model(const struct ice_hw *hw)
+extern const struct xdp_metadata_ops ice_xdp_md_ops;
+
+/**
+ * ice_is_dual - Check if given config is multi-NAC
+ * @hw: pointer to HW structure
+ *
+ * Return: true if the device is running in mutli-NAC (Network
+ * Acceleration Complex) configuration variant, false otherwise
+ * (always false for non-E825 devices).
+ */
+static inline bool ice_is_dual(struct ice_hw *hw)
{
- return hw->ptp.phy_model;
+ return hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
}
-extern const struct xdp_metadata_ops ice_xdp_md_ops;
+/**
+ * ice_is_primary - Check if given device belongs to the primary complex
+ * @hw: pointer to HW structure
+ *
+ * Check if given PF/HW is running on primary complex in multi-NAC
+ * configuration.
+ *
+ * Return: true if the device is dual, false otherwise (always true
+ * for non-E825 devices).
+ */
+static inline bool ice_is_primary(struct ice_hw *hw)
+{
+ return hw->mac_type != ICE_MAC_GENERIC_3K_E825 ||
+ !ice_is_dual(hw) ||
+ (hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M);
+}
+
+/**
+ * ice_pf_src_tmr_owned - Check if a primary timer is owned by PF
+ * @pf: pointer to PF structure
+ *
+ * Return: true if PF owns primary timer, false otherwise.
+ */
+static inline bool ice_pf_src_tmr_owned(struct ice_pf *pf)
+{
+ return pf->hw.func_caps.ts_func_info.src_tmr_owned &&
+ ice_is_primary(&pf->hw);
+}
+
+/**
+ * ice_get_primary_hw - Get pointer to primary ice_hw structure
+ * @pf: pointer to PF structure
+ *
+ * Return: A pointer to ice_hw structure with access to timesync
+ * register space.
+ */
+static inline struct ice_hw *ice_get_primary_hw(struct ice_pf *pf)
+{
+ if (!pf->adapter->ctrl_pf)
+ return &pf->hw;
+ else
+ return &pf->adapter->ctrl_pf->hw;
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
index 01a08cfd0090..0a8a48cd4bce 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
// SPDX-FileCopyrightText: Copyright Red Hat
-#include <linux/bitfield.h>
#include <linux/cleanup.h>
#include <linux/mutex.h>
#include <linux/pci.h>
@@ -14,32 +13,45 @@
static DEFINE_XARRAY(ice_adapters);
static DEFINE_MUTEX(ice_adapters_mutex);
-/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
-#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
-#define INDEX_FIELD_DEV GENMASK(31, 16)
-#define INDEX_FIELD_BUS GENMASK(12, 5)
-#define INDEX_FIELD_SLOT GENMASK(4, 0)
+#define ICE_ADAPTER_FIXED_INDEX BIT_ULL(63)
-static unsigned long ice_adapter_index(const struct pci_dev *pdev)
-{
- unsigned int domain = pci_domain_nr(pdev->bus);
-
- WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+#define ICE_ADAPTER_INDEX_E825C \
+ (ICE_DEV_ID_E825C_BACKPLANE | ICE_ADAPTER_FIXED_INDEX)
+static u64 ice_adapter_index(struct pci_dev *pdev)
+{
switch (pdev->device) {
case ICE_DEV_ID_E825C_BACKPLANE:
case ICE_DEV_ID_E825C_QSFP:
case ICE_DEV_ID_E825C_SFP:
case ICE_DEV_ID_E825C_SGMII:
- return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
+ /* E825C devices have multiple NACs which are connected to the
+ * same clock source, and which must share the same
+ * ice_adapter structure. We can't use the serial number since
+ * each NAC has its own NVM generated with its own unique
+ * Device Serial Number. Instead, rely on the embedded nature
+ * of the E825C devices, and use a fixed index. This relies on
+ * the fact that all E825C physical functions in a given
+ * system are part of the same overall device.
+ */
+ return ICE_ADAPTER_INDEX_E825C;
default:
- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+ return pci_get_dsn(pdev) & ~ICE_ADAPTER_FIXED_INDEX;
}
}
-static struct ice_adapter *ice_adapter_new(void)
+static unsigned long ice_adapter_xa_index(struct pci_dev *pdev)
+{
+ u64 index = ice_adapter_index(pdev);
+
+#if BITS_PER_LONG == 64
+ return index;
+#else
+ return (u32)index ^ (u32)(index >> 32);
+#endif
+}
+
+static struct ice_adapter *ice_adapter_new(struct pci_dev *pdev)
{
struct ice_adapter *adapter;
@@ -47,7 +59,9 @@ static struct ice_adapter *ice_adapter_new(void)
if (!adapter)
return NULL;
+ adapter->index = ice_adapter_index(pdev);
spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ spin_lock_init(&adapter->txq_ctx_lock);
refcount_set(&adapter->refcount, 1);
mutex_init(&adapter->ports.lock);
@@ -77,25 +91,29 @@ static void ice_adapter_free(struct ice_adapter *adapter)
* Return: Pointer to ice_adapter on success.
* ERR_PTR() on error. -ENOMEM is the only possible error.
*/
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
int err;
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
- err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
- if (err == -EBUSY) {
- adapter = xa_load(&ice_adapters, index);
+ adapter = xa_load(&ice_adapters, index);
+ if (adapter) {
refcount_inc(&adapter->refcount);
+ WARN_ON_ONCE(adapter->index != ice_adapter_index(pdev));
return adapter;
}
+ err = xa_reserve(&ice_adapters, index, GFP_KERNEL);
if (err)
return ERR_PTR(err);
- adapter = ice_adapter_new();
- if (!adapter)
+ adapter = ice_adapter_new(pdev);
+ if (!adapter) {
+ xa_release(&ice_adapters, index);
return ERR_PTR(-ENOMEM);
+ }
xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
}
return adapter;
@@ -110,11 +128,12 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
*
* Context: Process, may sleep.
*/
-void ice_adapter_put(const struct pci_dev *pdev)
+void ice_adapter_put(struct pci_dev *pdev)
{
- unsigned long index = ice_adapter_index(pdev);
struct ice_adapter *adapter;
+ unsigned long index;
+ index = ice_adapter_xa_index(pdev);
scoped_guard(mutex, &ice_adapters_mutex) {
adapter = xa_load(&ice_adapters, index);
if (WARN_ON(!adapter))
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
index e233225848b3..e95266c7f20b 100644
--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -27,22 +27,27 @@ struct ice_port_list {
/**
* struct ice_adapter - PCI adapter resources shared across PFs
+ * @refcount: Reference count. struct ice_pf objects hold the references.
* @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
* register of the PTP clock.
- * @refcount: Reference count. struct ice_pf objects hold the references.
+ * @txq_ctx_lock: Spinlock protecting access to the GLCOMM_QTX_CNTX_CTL register
* @ctrl_pf: Control PF of the adapter
* @ports: Ports list
+ * @index: 64-bit index cached for collision detection on 32bit systems
*/
struct ice_adapter {
refcount_t refcount;
/* For access to the GLTSYN_TIME register */
spinlock_t ptp_gltsyn_time_lock;
+ /* For access to GLCOMM_QTX_CNTX_CTL register */
+ spinlock_t txq_ctx_lock;
struct ice_pf *ctrl_pf;
struct ice_port_list ports;
+ u64 index;
};
-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
-void ice_adapter_put(const struct pci_dev *pdev);
+struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
+void ice_adapter_put(struct pci_dev *pdev);
#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1489a8ceec51..859e9c66f3e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -4,6 +4,8 @@
#ifndef _ICE_ADMINQ_CMD_H_
#define _ICE_ADMINQ_CMD_H_
+#include <linux/net/intel/libie/adminq.h>
+
/* This header file defines the Admin Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
*/
@@ -12,37 +14,28 @@
#define ICE_AQC_TOPO_MAX_LEVEL_NUM 0x9
#define ICE_AQ_SET_MAC_FRAME_SIZE_MAX 9728
-struct ice_aqc_generic {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
-};
+#define ICE_RXQ_CTX_SIZE_DWORDS 8
+#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
-/* Get version (direct 0x0001) */
-struct ice_aqc_get_ver {
- __le32 rom_ver;
- __le32 fw_build;
- u8 fw_branch;
- u8 fw_major;
- u8 fw_minor;
- u8 fw_patch;
- u8 api_branch;
- u8 api_major;
- u8 api_minor;
- u8 api_patch;
-};
-
-/* Send driver version (indirect 0x0002) */
-struct ice_aqc_driver_ver {
- u8 major_ver;
- u8 minor_ver;
- u8 build_ver;
- u8 subbuild_ver;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
+typedef struct __packed { u8 buf[ICE_RXQ_CTX_SZ]; } ice_rxq_ctx_buf_t;
+
+/* The Tx queue context is 40 bytes, and includes some internal state. The
+ * Admin Queue buffers don't include the internal state, so only include the
+ * first 22 bytes of the context.
+ */
+#define ICE_TXQ_CTX_SZ 22
+
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_SZ]; } ice_txq_ctx_buf_t;
+
+#define ICE_TXQ_CTX_FULL_SIZE_DWORDS 10
+#define ICE_TXQ_CTX_FULL_SZ \
+ (ICE_TXQ_CTX_FULL_SIZE_DWORDS * sizeof(u32))
+
+typedef struct __packed { u8 buf[ICE_TXQ_CTX_FULL_SZ]; } ice_txq_ctx_buf_full_t;
+
+#define ICE_TXTIME_CTX_SZ 25
+
+typedef struct __packed { u8 buf[ICE_TXTIME_CTX_SZ]; } ice_txtime_ctx_buf_t;
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
@@ -51,94 +44,6 @@ struct ice_aqc_q_shutdown {
u8 reserved[15];
};
-/* Request resource ownership (direct 0x0008)
- * Release resource ownership (direct 0x0009)
- */
-struct ice_aqc_req_res {
- __le16 res_id;
-#define ICE_AQC_RES_ID_NVM 1
-#define ICE_AQC_RES_ID_SDP 2
-#define ICE_AQC_RES_ID_CHNG_LOCK 3
-#define ICE_AQC_RES_ID_GLBL_LOCK 4
- __le16 access_type;
-#define ICE_AQC_RES_ACCESS_READ 1
-#define ICE_AQC_RES_ACCESS_WRITE 2
-
- /* Upon successful completion, FW writes this value and driver is
- * expected to release resource before timeout. This value is provided
- * in milliseconds.
- */
- __le32 timeout;
-#define ICE_AQ_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
-#define ICE_AQ_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
-#define ICE_AQ_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
-#define ICE_AQ_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
- /* For SDP: pin ID of the SDP */
- __le32 res_number;
- /* Status is only used for ICE_AQC_RES_ID_GLBL_LOCK */
- __le16 status;
-#define ICE_AQ_RES_GLBL_SUCCESS 0
-#define ICE_AQ_RES_GLBL_IN_PROG 1
-#define ICE_AQ_RES_GLBL_DONE 2
- u8 reserved[2];
-};
-
-/* Get function capabilities (indirect 0x000A)
- * Get device capabilities (indirect 0x000B)
- */
-struct ice_aqc_list_caps {
- u8 cmd_flags;
- u8 pf_index;
- u8 reserved[2];
- __le32 count;
- __le32 addr_high;
- __le32 addr_low;
-};
-
-/* Device/Function buffer entry, repeated per reported capability */
-struct ice_aqc_list_caps_elem {
- __le16 cap;
-#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
-#define ICE_AQC_CAPS_SRIOV 0x0012
-#define ICE_AQC_CAPS_VF 0x0013
-#define ICE_AQC_CAPS_VSI 0x0017
-#define ICE_AQC_CAPS_DCB 0x0018
-#define ICE_AQC_CAPS_RSS 0x0040
-#define ICE_AQC_CAPS_RXQS 0x0041
-#define ICE_AQC_CAPS_TXQS 0x0042
-#define ICE_AQC_CAPS_MSIX 0x0043
-#define ICE_AQC_CAPS_FD 0x0045
-#define ICE_AQC_CAPS_1588 0x0046
-#define ICE_AQC_CAPS_MAX_MTU 0x0047
-#define ICE_AQC_CAPS_NVM_VER 0x0048
-#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
-#define ICE_AQC_CAPS_OROM_VER 0x004A
-#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
-#define ICE_AQC_CAPS_NET_VER 0x004C
-#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
-#define ICE_AQC_CAPS_RDMA 0x0051
-#define ICE_AQC_CAPS_SENSOR_READING 0x0067
-#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
-#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
-#define ICE_AQC_CAPS_NVM_MGMT 0x0080
-#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
-#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
-#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
-#define ICE_AQC_BIT_ROCEV2_LAG 0x01
-#define ICE_AQC_BIT_SRIOV_LAG 0x02
-
- u8 major_ver;
- u8 minor_ver;
- /* Number of resources described by this capability */
- __le32 number;
- /* Only meaningful for some types of resources */
- __le32 logical_id;
- /* Only meaningful for some types of resources */
- __le32 phys_id;
- __le64 rsvd1;
- __le64 rsvd2;
-};
-
/* Manage MAC address, read command - indirect (0x0107)
* This struct is also used for the response
*/
@@ -1491,7 +1396,6 @@ struct ice_aqc_dnl_equa_param {
#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
-#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT)
#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT)
@@ -1665,6 +1569,8 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
+#define ICE_AQC_PORT_OPT_MAX_LANE_40G 9
u8 global_scid[2];
u8 phy_scid[2];
@@ -1807,6 +1713,7 @@ struct ice_aqc_nvm_pass_comp_tbl {
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED 0x0
#define ICE_AQ_NVM_PASS_COMP_CAN_MAY_BE_UPDATEABLE 0x1
#define ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED 0x2
+#define ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK 0x3
u8 component_response_code; /* Response only */
#define ICE_AQ_NVM_PASS_COMP_CAN_BE_UPDATED_CODE 0x0
#define ICE_AQ_NVM_PASS_COMP_STAMP_IDENTICAL_CODE 0x1
@@ -2084,10 +1991,10 @@ struct ice_aqc_add_txqs_perq {
__le16 txq_id;
u8 rsvd[2];
__le32 q_teid;
- u8 txq_ctx[22];
+ ice_txq_ctx_buf_t txq_ctx;
u8 rsvd2[2];
struct ice_aqc_txsched_elem info;
-};
+} __packed;
/* The format of the command buffer for Add Tx LAN Queues (0x0C30)
* is an array of the following structs. Please note that the length of
@@ -2157,6 +2064,10 @@ struct ice_aqc_cfg_txqs {
#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
#define ICE_AQC_Q_CFG_DST_PRT_S 3
#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
+#define ICE_AQC_Q_CFG_MODE_M GENMASK(7, 6)
+#define ICE_AQC_Q_CFG_MODE_SAME_PF 0x0
+#define ICE_AQC_Q_CFG_MODE_GIVE_OWN 0x1
+#define ICE_AQC_Q_CFG_MODE_KEEP_OWN 0x2
u8 time_out;
#define ICE_AQC_Q_CFG_TIMEOUT_S 2
#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
@@ -2210,6 +2121,34 @@ struct ice_aqc_add_rdma_qset_data {
struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
};
+/* Set Tx Time LAN Queue (indirect 0x0C35) */
+struct ice_aqc_set_txtimeqs {
+ __le16 q_id;
+ __le16 q_amount;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each queue entry for the Set Tx Time Queue
+ * command (0x0C35). Only used within struct ice_aqc_set_txtime_qgrp.
+ */
+struct ice_aqc_set_txtimeqs_perq {
+ u8 reserved[4];
+ ice_txtime_ctx_buf_t txtime_ctx;
+ u8 reserved1[3];
+};
+
+/* The format of the command buffer for Set Tx Time Queue (0x0C35)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_set_txtime_qgrp is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_set_txtime_qgrp {
+ u8 reserved[8];
+ struct ice_aqc_set_txtimeqs_perq txtimeqs[];
+};
+
/* Download Package (indirect 0x0C40) */
/* Also used for Update Package (indirect 0x0C41 and 0x0C42) */
struct ice_aqc_download_pkg {
@@ -2264,6 +2203,24 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+#define ICE_CGU_INPUT_PHASE_OFFSET_BYTES 6
+
+struct ice_cgu_input_measure {
+ u8 phase_offset[ICE_CGU_INPUT_PHASE_OFFSET_BYTES];
+ __le32 freq;
+} __packed __aligned(sizeof(__le16));
+
+#define ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M ICE_M(0xf, 0)
+
+/* Get CGU input measure command response data structure (indirect 0x0C59) */
+struct ice_aqc_get_cgu_input_measure {
+ u8 dpll_idx_opt;
+ u8 length;
+ u8 rsvd[6];
+};
+
+#define ICE_AQC_GET_CGU_MAX_PHASE_ADJ GENMASK(30, 0)
+
/* Get CGU abilities command response data structure (indirect 0x0C61) */
struct ice_aqc_get_cgu_abilities {
u8 num_inputs;
@@ -2278,6 +2235,8 @@ struct ice_aqc_get_cgu_abilities {
u8 rsvd[3];
};
+#define ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN BIT(7)
+
/* Set CGU input config (direct 0x0C62) */
struct ice_aqc_set_cgu_input_config {
u8 input_idx;
@@ -2472,225 +2431,85 @@ struct ice_aqc_event_lan_overflow {
u8 reserved[8];
};
-enum ice_aqc_fw_logging_mod {
- ICE_AQC_FW_LOG_ID_GENERAL = 0,
- ICE_AQC_FW_LOG_ID_CTRL,
- ICE_AQC_FW_LOG_ID_LINK,
- ICE_AQC_FW_LOG_ID_LINK_TOPO,
- ICE_AQC_FW_LOG_ID_DNL,
- ICE_AQC_FW_LOG_ID_I2C,
- ICE_AQC_FW_LOG_ID_SDP,
- ICE_AQC_FW_LOG_ID_MDIO,
- ICE_AQC_FW_LOG_ID_ADMINQ,
- ICE_AQC_FW_LOG_ID_HDMA,
- ICE_AQC_FW_LOG_ID_LLDP,
- ICE_AQC_FW_LOG_ID_DCBX,
- ICE_AQC_FW_LOG_ID_DCB,
- ICE_AQC_FW_LOG_ID_XLR,
- ICE_AQC_FW_LOG_ID_NVM,
- ICE_AQC_FW_LOG_ID_AUTH,
- ICE_AQC_FW_LOG_ID_VPD,
- ICE_AQC_FW_LOG_ID_IOSF,
- ICE_AQC_FW_LOG_ID_PARSER,
- ICE_AQC_FW_LOG_ID_SW,
- ICE_AQC_FW_LOG_ID_SCHEDULER,
- ICE_AQC_FW_LOG_ID_TXQ,
- ICE_AQC_FW_LOG_ID_RSVD,
- ICE_AQC_FW_LOG_ID_POST,
- ICE_AQC_FW_LOG_ID_WATCHDOG,
- ICE_AQC_FW_LOG_ID_TASK_DISPATCH,
- ICE_AQC_FW_LOG_ID_MNG,
- ICE_AQC_FW_LOG_ID_SYNCE,
- ICE_AQC_FW_LOG_ID_HEALTH,
- ICE_AQC_FW_LOG_ID_TSDRV,
- ICE_AQC_FW_LOG_ID_PFREG,
- ICE_AQC_FW_LOG_ID_MDLVER,
- ICE_AQC_FW_LOG_ID_MAX,
-};
-
-/* Set FW Logging configuration (indirect 0xFF30)
- * Register for FW Logging (indirect 0xFF31)
- * Query FW Logging (indirect 0xFF32)
- * FW Log Event (indirect 0xFF33)
- */
-struct ice_aqc_fw_log {
- u8 cmd_flags;
-#define ICE_AQC_FW_LOG_CONF_UART_EN BIT(0)
-#define ICE_AQC_FW_LOG_CONF_AQ_EN BIT(1)
-#define ICE_AQC_FW_LOG_QUERY_REGISTERED BIT(2)
-#define ICE_AQC_FW_LOG_CONF_SET_VALID BIT(3)
-#define ICE_AQC_FW_LOG_AQ_REGISTER BIT(0)
-#define ICE_AQC_FW_LOG_AQ_QUERY BIT(2)
-
- u8 rsp_flag;
- __le16 fw_rt_msb;
- union {
- struct {
- __le32 fw_rt_lsb;
- } sync;
- struct {
- __le16 log_resolution;
-#define ICE_AQC_FW_LOG_MIN_RESOLUTION (1)
-#define ICE_AQC_FW_LOG_MAX_RESOLUTION (128)
-
- __le16 mdl_cnt;
- } cfg;
- } ops;
+enum ice_aqc_health_status_mask {
+ ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK = BIT(0),
+ ICE_AQC_HEALTH_STATUS_SET_ALL_PF_MASK = BIT(1),
+ ICE_AQC_HEALTH_STATUS_SET_GLOBAL_MASK = BIT(2),
+};
+
+/* Set Health Status (direct 0xFF20) */
+struct ice_aqc_set_health_status_cfg {
+ u8 event_source;
+ u8 reserved[15];
+};
+
+enum ice_aqc_health_status {
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT = 0x101,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_TYPE = 0x102,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_QUAL = 0x103,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_COMM = 0x104,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_CONFLICT = 0x105,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_NOT_PRESENT = 0x106,
+ ICE_AQC_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED = 0x107,
+ ICE_AQC_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT = 0x108,
+ ICE_AQC_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE = 0x109,
+ ICE_AQC_HEALTH_STATUS_ERR_INVALID_LINK_CFG = 0x10B,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_ACCESS = 0x10C,
+ ICE_AQC_HEALTH_STATUS_ERR_PORT_UNREACHABLE = 0x10D,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED = 0x10F,
+ ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT = 0x110,
+ ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED = 0x111,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST_TOPO = 0x112,
+ ICE_AQC_HEALTH_STATUS_ERR_NETLIST = 0x113,
+ ICE_AQC_HEALTH_STATUS_ERR_TOPO_CONFLICT = 0x114,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_HW_ACCESS = 0x115,
+ ICE_AQC_HEALTH_STATUS_ERR_LINK_RUNTIME = 0x116,
+ ICE_AQC_HEALTH_STATUS_ERR_DNL_INIT = 0x117,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_NVM_PROG = 0x120,
+ ICE_AQC_HEALTH_STATUS_ERR_PHY_FW_LOAD = 0x121,
+ ICE_AQC_HEALTH_STATUS_INFO_RECOVERY = 0x500,
+ ICE_AQC_HEALTH_STATUS_ERR_FLASH_ACCESS = 0x501,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_AUTH = 0x502,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_AUTH = 0x503,
+ ICE_AQC_HEALTH_STATUS_ERR_DDP_AUTH = 0x504,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_COMPAT = 0x505,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_COMPAT = 0x506,
+ ICE_AQC_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION = 0x507,
+ ICE_AQC_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION = 0x508,
+ ICE_AQC_HEALTH_STATUS_ERR_DCB_MIB = 0x509,
+ ICE_AQC_HEALTH_STATUS_ERR_MNG_TIMEOUT = 0x50A,
+ ICE_AQC_HEALTH_STATUS_ERR_BMC_RESET = 0x50B,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_MNG_FAIL = 0x50C,
+ ICE_AQC_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL = 0x50D,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_LOOP = 0x1000,
+ ICE_AQC_HEALTH_STATUS_ERR_FW_PFR_FAIL = 0x1001,
+ ICE_AQC_HEALTH_STATUS_ERR_LAST_FAIL_AQ = 0x1002,
+};
+
+/* Get Health Status (indirect 0xFF22) */
+struct ice_aqc_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
__le32 addr_high;
__le32 addr_low;
};
-/* Response Buffer for:
- * Set Firmware Logging Configuration (0xFF30)
- * Query FW Logging (0xFF32)
- */
-struct ice_aqc_fw_log_cfg_resp {
- __le16 module_identifier;
- u8 log_level;
- u8 rsvd0;
+enum ice_aqc_health_status_scope {
+ ICE_AQC_HEALTH_STATUS_PF = 0x1,
+ ICE_AQC_HEALTH_STATUS_PORT = 0x2,
+ ICE_AQC_HEALTH_STATUS_GLOBAL = 0x3,
};
-/**
- * struct ice_aq_desc - Admin Queue (AQ) descriptor
- * @flags: ICE_AQ_FLAG_* flags
- * @opcode: AQ command opcode
- * @datalen: length in bytes of indirect/external data buffer
- * @retval: return value from firmware
- * @cookie_high: opaque data high-half
- * @cookie_low: opaque data low-half
- * @params: command-specific parameters
- *
- * Descriptor format for commands the driver posts on the Admin Transmit Queue
- * (ATQ). The firmware writes back onto the command descriptor and returns
- * the result of the command. Asynchronous events that are not an immediate
- * result of the command are written to the Admin Receive Queue (ARQ) using
- * the same descriptor format. Descriptors are in little-endian notation with
- * 32-bit words.
+#define ICE_AQC_HEALTH_STATUS_UNDEFINED_DATA 0xDEADBEEF
+
+/* Get Health Status event buffer entry (0xFF22),
+ * repeated per reported health status.
*/
-struct ice_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- u8 raw[16];
- struct ice_aqc_generic generic;
- struct ice_aqc_get_ver get_ver;
- struct ice_aqc_driver_ver driver_ver;
- struct ice_aqc_q_shutdown q_shutdown;
- struct ice_aqc_req_res res_owner;
- struct ice_aqc_manage_mac_read mac_read;
- struct ice_aqc_manage_mac_write mac_write;
- struct ice_aqc_clear_pxe clear_pxe;
- struct ice_aqc_list_caps get_cap;
- struct ice_aqc_get_phy_caps get_phy;
- struct ice_aqc_set_phy_cfg set_phy;
- struct ice_aqc_restart_an restart_an;
- struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
- struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
- struct ice_aqc_get_sensor_reading get_sensor_reading;
- struct ice_aqc_get_sensor_reading_resp get_sensor_reading_resp;
- struct ice_aqc_gpio read_write_gpio;
- struct ice_aqc_sff_eeprom read_write_sff_param;
- struct ice_aqc_set_port_id_led set_port_id_led;
- struct ice_aqc_get_port_options get_port_options;
- struct ice_aqc_set_port_option set_port_option;
- struct ice_aqc_get_sw_cfg get_sw_conf;
- struct ice_aqc_set_port_params set_port_params;
- struct ice_aqc_sw_rules sw_rules;
- struct ice_aqc_add_get_recipe add_get_recipe;
- struct ice_aqc_recipe_to_profile recipe_to_profile;
- struct ice_aqc_get_topo get_topo;
- struct ice_aqc_sched_elem_cmd sched_elem_cmd;
- struct ice_aqc_query_txsched_res query_sched_res;
- struct ice_aqc_query_port_ets port_ets;
- struct ice_aqc_rl_profile rl_profile;
- struct ice_aqc_nvm nvm;
- struct ice_aqc_nvm_checksum nvm_checksum;
- struct ice_aqc_nvm_pkg_data pkg_data;
- struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
- struct ice_aqc_pf_vf_msg virt;
- struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
- struct ice_aqc_lldp_get_mib lldp_get_mib;
- struct ice_aqc_lldp_set_mib_change lldp_set_event;
- struct ice_aqc_lldp_stop lldp_stop;
- struct ice_aqc_lldp_start lldp_start;
- struct ice_aqc_lldp_set_local_mib lldp_set_mib;
- struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
- struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
- struct ice_aqc_get_set_rss_lut get_set_rss_lut;
- struct ice_aqc_get_set_rss_key get_set_rss_key;
- struct ice_aqc_neigh_dev_req neigh_dev;
- struct ice_aqc_add_txqs add_txqs;
- struct ice_aqc_dis_txqs dis_txqs;
- struct ice_aqc_cfg_txqs cfg_txqs;
- struct ice_aqc_add_rdma_qset add_rdma_qset;
- struct ice_aqc_add_get_update_free_vsi vsi_cmd;
- struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
- struct ice_aqc_download_pkg download_pkg;
- struct ice_aqc_set_cgu_input_config set_cgu_input_config;
- struct ice_aqc_get_cgu_input_config get_cgu_input_config;
- struct ice_aqc_set_cgu_output_config set_cgu_output_config;
- struct ice_aqc_get_cgu_output_config get_cgu_output_config;
- struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status;
- struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config;
- struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio;
- struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
- struct ice_aqc_get_cgu_info get_cgu_info;
- struct ice_aqc_driver_shared_params drv_shared_params;
- struct ice_aqc_fw_log fw_log;
- struct ice_aqc_set_mac_lb set_mac_lb;
- struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
- struct ice_aqc_set_mac_cfg set_mac_cfg;
- struct ice_aqc_set_event_mask set_event_mask;
- struct ice_aqc_get_link_status get_link_status;
- struct ice_aqc_event_lan_overflow lan_overflow;
- struct ice_aqc_get_link_topo get_link_topo;
- struct ice_aqc_dnl_call_command dnl_call;
- struct ice_aqc_i2c read_write_i2c;
- struct ice_aqc_read_i2c_resp read_i2c_resp;
- struct ice_aqc_get_set_tx_topo get_set_tx_topo;
- } params;
-};
-
-/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
-#define ICE_AQ_LG_BUF 512
-
-#define ICE_AQ_FLAG_DD_S 0
-#define ICE_AQ_FLAG_CMP_S 1
-#define ICE_AQ_FLAG_ERR_S 2
-#define ICE_AQ_FLAG_LB_S 9
-#define ICE_AQ_FLAG_RD_S 10
-#define ICE_AQ_FLAG_BUF_S 12
-#define ICE_AQ_FLAG_SI_S 13
-
-#define ICE_AQ_FLAG_DD BIT(ICE_AQ_FLAG_DD_S) /* 0x1 */
-#define ICE_AQ_FLAG_CMP BIT(ICE_AQ_FLAG_CMP_S) /* 0x2 */
-#define ICE_AQ_FLAG_ERR BIT(ICE_AQ_FLAG_ERR_S) /* 0x4 */
-#define ICE_AQ_FLAG_LB BIT(ICE_AQ_FLAG_LB_S) /* 0x200 */
-#define ICE_AQ_FLAG_RD BIT(ICE_AQ_FLAG_RD_S) /* 0x400 */
-#define ICE_AQ_FLAG_BUF BIT(ICE_AQ_FLAG_BUF_S) /* 0x1000 */
-#define ICE_AQ_FLAG_SI BIT(ICE_AQ_FLAG_SI_S) /* 0x2000 */
-
-/* error codes */
-enum ice_aq_err {
- ICE_AQ_RC_OK = 0, /* Success */
- ICE_AQ_RC_EPERM = 1, /* Operation not permitted */
- ICE_AQ_RC_ENOENT = 2, /* No such element */
- ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
- ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
- ICE_AQ_RC_EEXIST = 13, /* Object already exists */
- ICE_AQ_RC_EINVAL = 14, /* Invalid argument */
- ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
- ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
- ICE_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
- ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
- ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
- ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
- ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+struct ice_aqc_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+ __le32 internal_data1;
+ __le32 internal_data2;
};
/* Admin Queue command opcodes */
@@ -2827,6 +2646,9 @@ enum ice_adminq_opc {
ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
+ /* Tx Time queue commands */
+ ice_aqc_opc_set_txtimeqs = 0x0C35,
+
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_upload_section = 0x0C41,
@@ -2834,6 +2656,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_pkg_info_list = 0x0C43,
/* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_input_measure = 0x0C59,
ice_aqc_opc_get_cgu_abilities = 0x0C61,
ice_aqc_opc_set_cgu_input_config = 0x0C62,
ice_aqc_opc_get_cgu_input_config = 0x0C63,
@@ -2850,6 +2673,10 @@ enum ice_adminq_opc {
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
+ /* System Diagnostic commands */
+ ice_aqc_opc_set_health_status_cfg = 0xFF20,
+ ice_aqc_opc_get_health_status = 0xFF22,
+
/* FW Logging Commands */
ice_aqc_opc_fw_logs_config = 0xFF30,
ice_aqc_opc_fw_logs_register = 0xFF31,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 7cee365cc7d1..1f7834c03550 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -378,6 +378,50 @@ ice_arfs_is_perfect_flow_set(struct ice_hw *hw, __be16 l3_proto, u8 l4_proto)
}
/**
+ * ice_arfs_cmp - Check if aRFS filter matches this flow.
+ * @fltr_info: filter info of the saved ARFS entry.
+ * @fk: flow dissector keys.
+ * @n_proto: One of htons(ETH_P_IP) or htons(ETH_P_IPV6).
+ * @ip_proto: One of IPPROTO_TCP or IPPROTO_UDP.
+ *
+ * Since this function assumes limited values for n_proto and ip_proto, it
+ * is meant to be called only from ice_rx_flow_steer().
+ *
+ * Return:
+ * * true - fltr_info refers to the same flow as fk.
+ * * false - fltr_info and fk refer to different flows.
+ */
+static bool
+ice_arfs_cmp(const struct ice_fdir_fltr *fltr_info, const struct flow_keys *fk,
+ __be16 n_proto, u8 ip_proto)
+{
+ /* Determine if the filter is for IPv4 or IPv6 based on flow_type,
+ * which is one of ICE_FLTR_PTYPE_NONF_IPV{4,6}_{TCP,UDP}.
+ */
+ bool is_v4 = fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
+ fltr_info->flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+
+ /* Following checks are arranged in the quickest and most discriminative
+ * fields first for early failure.
+ */
+ if (is_v4)
+ return n_proto == htons(ETH_P_IP) &&
+ fltr_info->ip.v4.src_port == fk->ports.src &&
+ fltr_info->ip.v4.dst_port == fk->ports.dst &&
+ fltr_info->ip.v4.src_ip == fk->addrs.v4addrs.src &&
+ fltr_info->ip.v4.dst_ip == fk->addrs.v4addrs.dst &&
+ fltr_info->ip.v4.proto == ip_proto;
+
+ return fltr_info->ip.v6.src_port == fk->ports.src &&
+ fltr_info->ip.v6.dst_port == fk->ports.dst &&
+ fltr_info->ip.v6.proto == ip_proto &&
+ !memcmp(&fltr_info->ip.v6.src_ip, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&fltr_info->ip.v6.dst_ip, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
+}
+
+/**
* ice_rx_flow_steer - steer the Rx flow to where application is being run
* @netdev: ptr to the netdev being adjusted
* @skb: buffer with required header information
@@ -448,6 +492,10 @@ ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
continue;
fltr_info = &arfs_entry->fltr_info;
+
+ if (!ice_arfs_cmp(fltr_info, &fk, n_proto, ip_proto))
+ continue;
+
ret = fltr_info->fltr_id;
if (fltr_info->q_index == rxq_idx ||
@@ -511,7 +559,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
struct hlist_head *arfs_fltr_list;
unsigned int i;
- if (!vsi || vsi->type != ICE_VSI_PF)
+ if (!vsi || vsi->type != ICE_VSI_PF || ice_is_arfs_active(vsi))
return;
arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
@@ -571,25 +619,6 @@ void ice_clear_arfs(struct ice_vsi *vsi)
}
/**
- * ice_free_cpu_rx_rmap - free setup CPU reverse map
- * @vsi: the VSI to be forwarded to
- */
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi)
-{
- struct net_device *netdev;
-
- if (!vsi || vsi->type != ICE_VSI_PF)
- return;
-
- netdev = vsi->netdev;
- if (!netdev || !netdev->rx_cpu_rmap)
- return;
-
- free_irq_cpu_rmap(netdev->rx_cpu_rmap);
- netdev->rx_cpu_rmap = NULL;
-}
-
-/**
* ice_set_cpu_rx_rmap - setup CPU reverse map for each queue
* @vsi: the VSI to be forwarded to
*/
@@ -597,7 +626,6 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
{
struct net_device *netdev;
struct ice_pf *pf;
- int i;
if (!vsi || vsi->type != ICE_VSI_PF)
return 0;
@@ -610,18 +638,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
netdev_dbg(netdev, "Setup CPU RMAP: vsi type 0x%x, ifname %s, q_vectors %d\n",
vsi->type, netdev->name, vsi->num_q_vectors);
- netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(vsi->num_q_vectors);
- if (unlikely(!netdev->rx_cpu_rmap))
- return -EINVAL;
-
- ice_for_each_q_vector(vsi, i)
- if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
- vsi->q_vectors[i]->irq.virq)) {
- ice_free_cpu_rx_rmap(vsi);
- return -EINVAL;
- }
-
- return 0;
+ return netif_enable_cpu_rmap(netdev, vsi->num_q_vectors);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index 9669ad9bf7b5..9706293128c3 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -45,7 +45,6 @@ int
ice_rx_flow_steer(struct net_device *netdev, const struct sk_buff *skb,
u16 rxq_idx, u32 flow_id);
void ice_clear_arfs(struct ice_vsi *vsi);
-void ice_free_cpu_rx_rmap(struct ice_vsi *vsi);
void ice_init_arfs(struct ice_vsi *vsi);
void ice_sync_arfs_fltrs(struct ice_pf *pf);
int ice_set_cpu_rx_rmap(struct ice_vsi *vsi);
@@ -56,7 +55,6 @@ ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
-static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
static inline void ice_init_arfs(struct ice_vsi *vsi) { }
static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
static inline void ice_remove_arfs(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 82a9cd4ec7ae..eadb1e3d12b3 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <net/xdp_sock_drv.h>
+#include <linux/net/intel/libie/rx.h>
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
@@ -147,10 +148,6 @@ skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
q_vector->vf_reg_idx = q_vector->irq.index;
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
-
/* This will not be called in the driver load path because the netdev
* will not be created yet. All other cases with register the NAPI
* handler here (i.e. resume, reset/rebuild, etc.)
@@ -246,7 +243,8 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
+static u16
+ice_calc_txq_handle(const struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
@@ -254,7 +252,7 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
return ring->q_index - ring->ch->base_q;
/* Idea here for calculation is that we subtract the number of queue
- * count from TC that ring belongs to from it's absolute queue index
+ * count from TC that ring belongs to from its absolute queue index
* and as a result we get the queue's index within TC.
*/
return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset;
@@ -276,35 +274,26 @@ static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state))
return;
- netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask,
+ netif_set_xps_queue(ring->netdev,
+ &ring->q_vector->napi.config->affinity_mask,
ring->q_index);
}
/**
- * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
- * @ring: The Tx ring to configure
- * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
- * @pf_q: queue index in the PF space
+ * ice_set_txq_ctx_vmvf - set queue context VM/VF type and number by VSI type
+ * @ring: the Tx ring to configure
+ * @vmvf_type: VM/VF type
+ * @vmvf_num: VM/VF number
*
- * Configure the Tx descriptor ring in TLAN context.
+ * Return: 0 on success and a negative value on error.
*/
-static void
-ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+static int
+ice_set_txq_ctx_vmvf(struct ice_tx_ring *ring, u8 *vmvf_type, u16 *vmvf_num)
{
struct ice_vsi *vsi = ring->vsi;
- struct ice_hw *hw = &vsi->back->hw;
-
- tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
-
- tlan_ctx->port_num = vsi->port_info->lport;
-
- /* Transmit Queue Length */
- tlan_ctx->qlen = ring->count;
-
- ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+ struct ice_hw *hw;
- /* PF number */
- tlan_ctx->pf_num = hw->pf_id;
+ hw = &vsi->back->hw;
/* queue belongs to a specific VSI type
* VF / VM index should be programmed per vmvf_type setting:
@@ -317,21 +306,60 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_CTRL:
case ICE_VSI_PF:
if (ring->ch)
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
else
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
- tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ *vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
case ICE_VSI_SF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ *vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
break;
default:
- return;
+ dev_info(ice_pf_to_dev(vsi->back),
+ "Unable to set VMVF type for VSI type %d\n",
+ vsi->type);
+ return -EINVAL;
}
+ return 0;
+}
+
+/**
+ * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
+ * @ring: the Tx ring to configure
+ * @tlan_ctx: pointer to the Tx LAN queue context structure to be initialized
+ * @pf_q: queue index in the PF space
+ *
+ * Configure the Tx descriptor ring in TLAN context.
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ struct ice_hw *hw;
+ int err;
+
+ hw = &vsi->back->hw;
+ tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
+ tlan_ctx->port_num = vsi->port_info->lport;
+
+ /* Transmit Queue Length */
+ tlan_ctx->qlen = ring->count;
+
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
+
+ /* PF number */
+ tlan_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(ring, &tlan_ctx->vmvf_type,
+ &tlan_ctx->vmvf_num);
+ if (err)
+ return err;
/* make sure the context is associated with the right VSI */
if (ring->ch)
@@ -358,22 +386,83 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
* 1: Legacy Host Interface
*/
tlan_ctx->legacy_int = ICE_TX_LEGACY;
+
+ return 0;
}
/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
+ * ice_setup_txtime_ctx - setup a struct ice_txtime_ctx instance
+ * @ring: the tstamp ring to configure
+ * @txtime_ctx: pointer to the Tx time queue context structure to be initialized
*
- * Returns the offset value for ring into the data buffer.
+ * Return: 0 on success and a negative value on error.
*/
-static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
+static int
+ice_setup_txtime_ctx(const struct ice_tstamp_ring *ring,
+ struct ice_txtime_ctx *txtime_ctx)
{
- if (ice_ring_uses_build_skb(rx_ring))
- return ICE_SKB_PAD;
+ struct ice_tx_ring *tx_ring = ring->tx_ring;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ int err;
+
+ txtime_ctx->base = ring->dma >> ICE_TXTIME_CTX_BASE_S;
+
+ /* Tx time Queue Length */
+ txtime_ctx->qlen = ring->count;
+ txtime_ctx->txtime_ena_q = 1;
+
+ /* PF number */
+ txtime_ctx->pf_num = hw->pf_id;
+
+ err = ice_set_txq_ctx_vmvf(tx_ring, &txtime_ctx->vmvf_type,
+ &txtime_ctx->vmvf_num);
+ if (err)
+ return err;
+
+ /* make sure the context is associated with the right VSI */
+ if (tx_ring->ch)
+ txtime_ctx->src_vsi = tx_ring->ch->vsi_num;
+ else
+ txtime_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+
+ txtime_ctx->ts_res = ICE_TXTIME_CTX_RESOLUTION_128NS;
+ txtime_ctx->drbell_mode_32 = ICE_TXTIME_CTX_DRBELL_MODE_32;
+ txtime_ctx->ts_fetch_prof_id = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+
return 0;
}
/**
+ * ice_calc_ts_ring_count - calculate the number of Tx time stamp descriptors
+ * @tx_ring: Tx ring to calculate the count for
+ *
+ * Return: the number of Tx time stamp descriptors.
+ */
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring)
+{
+ u16 prof = ICE_TXTIME_CTX_FETCH_PROF_ID_0;
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_hw *hw = &vsi->back->hw;
+ u16 max_fetch_desc = 0, fetch, i;
+ u32 reg;
+
+ for (i = 0; i < ICE_TXTIME_FETCH_PROFILE_CNT; i++) {
+ reg = rd32(hw, E830_GLTXTIME_FETCH_PROFILE(prof, 0));
+ fetch = FIELD_GET(E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M,
+ reg);
+ max_fetch_desc = max(fetch, max_fetch_desc);
+ }
+
+ if (!max_fetch_desc)
+ max_fetch_desc = ICE_TXTIME_FETCH_TS_DESC_DFLT;
+
+ max_fetch_desc = ALIGN(max_fetch_desc, ICE_REQ_DESC_MULTIPLE);
+
+ return tx_ring->count + max_fetch_desc;
+}
+
+/**
* ice_setup_rx_ctx - Configure a receive ring context
* @ring: The Rx ring to configure
*
@@ -435,8 +524,29 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
else
rlan_ctx.l2tsel = 1;
- rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
- rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ if (ring->hdr_pp) {
+ rlan_ctx.hbuf = ring->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
+ rlan_ctx.dtype = ICE_RX_DTYPE_HEADER_SPLIT;
+
+ /*
+ * If the frame is TCP/UDP/SCTP, it will be split by the
+ * payload.
+ * If not, but it's an IPv4/IPv6 frame, it will be split by
+ * the IP header.
+ * If not IP, it will be split by the Ethernet header.
+ *
+ * In any case, the header buffer will never be left empty.
+ */
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_SPLIT_L2 |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_IP |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_TCP_UDP |
+ ICE_RLAN_RX_HSPLIT_0_SPLIT_SCTP;
+ } else {
+ rlan_ctx.hbuf = 0;
+ rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
+ rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
+ }
+
rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
/* This controls whether VLAN is stripped from inner headers
@@ -448,12 +558,15 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
- rlan_ctx.rxmax = min_t(u32, ring->max_frame,
+ rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* Enable descriptor prefetch */
+ rlan_ctx.prefena = 1;
+
/* PF acts as uplink for switchdev; set flex descriptor with src_vsi
* metadata and flags to allow redirecting to PR netdev
*/
@@ -470,9 +583,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
if (vsi->type != ICE_VSI_VF)
ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
- else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
- false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
@@ -485,14 +595,6 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
if (vsi->type == ICE_VSI_VF)
return 0;
- /* configure Rx buffer alignment */
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- ice_clear_ring_build_skb_ena(ring);
- else
- ice_set_ring_build_skb_ena(ring);
-
- ring->rx_offset = ice_rx_offset(ring);
-
/* init queue specific tail register */
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
@@ -500,36 +602,51 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
return 0;
}
-static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
+static int ice_rxq_pp_create(struct ice_rx_ring *rq)
{
- void *ctx_ptr = &ring->pkt_ctx;
- struct xsk_cb_desc desc = {};
-
- XSK_CHECK_PRIV_TYPE(struct ice_xdp_buff);
- desc.src = &ctx_ptr;
- desc.off = offsetof(struct ice_xdp_buff, pkt_ctx) -
- sizeof(struct xdp_buff);
- desc.bytes = sizeof(ctx_ptr);
- xsk_pool_fill_cb(ring->xsk_pool, &desc);
-}
+ struct libeth_fq fq = {
+ .count = rq->count,
+ .nid = NUMA_NO_NODE,
+ .hsplit = rq->vsi->hsplit,
+ .xdp = ice_is_xdp_ena_vsi(rq->vsi),
+ .buf_len = LIBIE_MAX_RX_BUF_LEN,
+ };
+ int err;
-/**
- * ice_get_frame_sz - calculate xdp_buff::frame_sz
- * @rx_ring: the ring being configured
- *
- * Return frame size based on underlying PAGE_SIZE
- */
-static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
-{
- unsigned int frame_sz;
+ err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
+ if (err)
+ return err;
+
+ rq->pp = fq.pp;
+ rq->rx_fqes = fq.fqes;
+ rq->truesize = fq.truesize;
+ rq->rx_buf_len = fq.buf_len;
+
+ if (!fq.hsplit)
+ return 0;
+
+ fq = (struct libeth_fq){
+ .count = rq->count,
+ .type = LIBETH_FQE_HDR,
+ .nid = NUMA_NO_NODE,
+ .xdp = ice_is_xdp_ena_vsi(rq->vsi),
+ };
-#if (PAGE_SIZE >= 8192)
- frame_sz = rx_ring->rx_buf_len;
-#else
- frame_sz = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ err = libeth_rx_fq_create(&fq, &rq->q_vector->napi);
+ if (err)
+ goto destroy;
- return frame_sz;
+ rq->hdr_pp = fq.pp;
+ rq->hdr_fqes = fq.fqes;
+ rq->hdr_truesize = fq.truesize;
+ rq->rx_hdr_len = fq.buf_len;
+
+ return 0;
+
+destroy:
+ ice_rxq_pp_destroy(rq);
+
+ return err;
}
/**
@@ -541,7 +658,8 @@ static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
- u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
+ u32 num_bufs = ICE_DESC_UNUSED(ring);
+ u32 rx_buf_len;
int err;
if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
@@ -555,15 +673,19 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
ice_rx_xsk_pool(ring);
+ err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool);
+ if (err)
+ return err;
+
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);
- ring->rx_buf_len =
+ rx_buf_len =
xsk_pool_get_rx_frame_size(ring->xsk_pool);
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
- ring->rx_buf_len);
+ rx_buf_len);
if (err)
return err;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
@@ -572,36 +694,33 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (err)
return err;
xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
- ice_xsk_pool_fill_cb(ring);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
+ err = ice_rxq_pp_create(ring);
+ if (err)
+ return err;
+
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index,
ring->q_vector->napi.napi_id,
ring->rx_buf_len);
if (err)
- return err;
+ goto err_destroy_fq;
}
-
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err)
- return err;
+ xdp_rxq_info_attach_page_pool(&ring->xdp_rxq,
+ ring->pp);
}
}
- xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
ring->xdp.data = NULL;
- ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
ring->q_index, err);
- return err;
+ goto err_destroy_fq;
}
if (ring->xsk_pool) {
@@ -626,9 +745,20 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ice_alloc_rx_bufs(ring, num_bufs);
+ if (ring->vsi->type == ICE_VSI_CTRL)
+ ice_init_ctrl_rx_descs(ring, num_bufs);
+ else
+ err = ice_alloc_rx_bufs(ring, num_bufs);
+
+ if (err)
+ goto err_destroy_fq;
return 0;
+
+err_destroy_fq:
+ ice_rxq_pp_destroy(ring);
+
+ return err;
}
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
@@ -649,18 +779,10 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
*/
static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- ring->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
+ if (!vsi->netdev) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
} else {
- ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- ring->rx_buf_len = ICE_RXBUF_3072;
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
}
}
@@ -798,13 +920,11 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
return 0;
err_out:
- while (v_idx--)
- ice_free_q_vector(vsi, v_idx);
- dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n",
- vsi->num_q_vectors, vsi->vsi_num, err);
- vsi->num_q_vectors = 0;
- return err;
+ dev_info(dev, "Failed to allocate %d q_vectors for VSI %d, new value %d",
+ vsi->num_q_vectors, vsi->vsi_num, v_idx);
+ vsi->num_q_vectors = v_idx;
+ return v_idx ? 0 : err;
}
/**
@@ -884,13 +1004,49 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_tstamp - Configure Tx time stamp queue
+ * @tx_ring: Tx ring to be configured with timestamping
+ *
+ * Return: 0 on success and a negative value on error.
+ */
+static int
+ice_cfg_tstamp(struct ice_tx_ring *tx_ring)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_set_txtime_qgrp, txtime_qg_buf,
+ txtimeqs, 1);
+ u8 txtime_buf_len = struct_size(txtime_qg_buf, txtimeqs, 1);
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct ice_txtime_ctx txtime_ctx = {};
+ struct ice_vsi *vsi = tx_ring->vsi;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 pf_q = tx_ring->reg_idx;
+ int err;
+
+ err = ice_setup_txtime_ctx(tstamp_ring, &txtime_ctx);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx time queue context for queue %d, error: %d\n",
+ pf_q, err);
+ return err;
+ }
+ ice_pack_txtime_ctx(&txtime_ctx,
+ &txtime_qg_buf->txtimeqs[0].txtime_ctx);
+
+ tstamp_ring->tail = hw->hw_addr + E830_GLQTX_TXTIME_DBELL_LSB(pf_q);
+ return ice_aq_set_txtimeq(hw, pf_q, 1, txtime_qg_buf,
+ txtime_buf_len, NULL);
+}
+
+/**
* ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured
* @qg_buf: queue group buffer
+ *
+ * Return: 0 on success and a negative value on error.
*/
static int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
+ice_vsi_cfg_txq(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
@@ -899,19 +1055,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
+ u32 pf_q, vsi_idx;
int status;
- u16 pf_q;
u8 tc;
/* Configure XPS */
ice_cfg_xps_tx_ring(ring);
pf_q = ring->reg_idx;
- ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ status = ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to setup Tx context for queue %d, error: %d\n",
+ pf_q, status);
+ return status;
+ }
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ ice_pack_txq_ctx(&tlan_ctx, &qg_buf->txqs[0].txq_ctx);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
@@ -928,14 +1088,15 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
*/
ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- if (ch)
- status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
- else
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- ring->q_handle, 1, qg_buf, buf_len,
- NULL);
+ if (ch) {
+ tc = 0;
+ vsi_idx = ch->ch_vsi->idx;
+ } else {
+ vsi_idx = vsi->idx;
+ }
+
+ status = ice_ena_vsi_txq(vsi->port_info, vsi_idx, tc, ring->q_handle,
+ 1, qg_buf, buf_len, NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
@@ -950,7 +1111,32 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
if (pf_q == le16_to_cpu(txq->txq_id))
ring->txq_teid = le32_to_cpu(txq->q_teid);
+ if (ice_is_txtime_ena(ring)) {
+ status = ice_alloc_setup_tstamp_ring(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to allocate Tx timestamp ring, error: %d\n",
+ status);
+ goto err_setup_tstamp;
+ }
+
+ status = ice_cfg_tstamp(ring);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to set Tx Time queue context, error: %d\n",
+ status);
+ goto err_cfg_tstamp;
+ }
+ }
return 0;
+
+err_cfg_tstamp:
+ ice_free_tx_tstamp_ring(ring);
+err_setup_tstamp:
+ ice_dis_vsi_txq(vsi->port_info, vsi_idx, tc, 1, &ring->q_handle,
+ &ring->reg_idx, &ring->txq_teid, ICE_NO_RESET,
+ tlan_ctx.vmvf_num, NULL);
+
+ return status;
}
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
@@ -1209,3 +1395,148 @@ ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
txq_meta->tc = tc;
}
}
+
+/**
+ * ice_qp_reset_stats - Resets all stats for rings of given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf;
+
+ pf = vsi->back;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
+ sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
+ memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
+ sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
+ if (vsi->xdp_rings)
+ memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
+ sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
+}
+
+/**
+ * ice_qp_clean_rings - Cleans all the rings of a given index
+ * @vsi: VSI that contains rings of interest
+ * @q_idx: ring index in array
+ */
+static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
+{
+ ice_clean_tx_ring(vsi->tx_rings[q_idx]);
+ if (vsi->xdp_rings)
+ ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
+ ice_clean_rx_ring(vsi->rx_rings[q_idx]);
+}
+
+/**
+ * ice_qp_dis - Disables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_txq_meta txq_meta = { };
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+ int fail = 0;
+ int err;
+
+ if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
+ return -EINVAL;
+
+ tx_ring = vsi->tx_rings[q_idx];
+ rx_ring = vsi->rx_rings[q_idx];
+ q_vector = rx_ring->q_vector;
+
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
+ ice_qvec_dis_irq(vsi, rx_ring, q_vector);
+ ice_qvec_toggle_napi(vsi, q_vector, false);
+
+ ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
+ if (!fail)
+ fail = err;
+ if (vsi->xdp_rings) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ memset(&txq_meta, 0, sizeof(txq_meta));
+ ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
+ &txq_meta);
+ if (!fail)
+ fail = err;
+ }
+
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
+ ice_qp_clean_rings(vsi, q_idx);
+ ice_qp_reset_stats(vsi, q_idx);
+
+ return fail;
+}
+
+/**
+ * ice_qp_ena - Enables a queue pair
+ * @vsi: VSI of interest
+ * @q_idx: ring index in array
+ *
+ * Returns 0 on success, negative on failure.
+ */
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
+{
+ struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
+ int err;
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
+ if (!fail)
+ fail = err;
+
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
+
+ err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
+ if (!fail)
+ fail = err;
+ ice_set_ring_xdp(xdp_ring);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+
+ err = ice_vsi_cfg_single_rxq(vsi, q_idx);
+ if (!fail)
+ fail = err;
+
+ q_vector = vsi->rx_rings[q_idx]->q_vector;
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
+
+ err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
+ if (!fail)
+ fail = err;
+
+ ice_qvec_toggle_napi(vsi, q_vector, true);
+ ice_qvec_ena_irq(vsi, q_vector);
+
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
+
+ return fail;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index b711bc921928..d28294247599 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -32,4 +32,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
void
ice_fill_txq_meta(const struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
+int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx);
+int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx);
+u16 ice_calc_ts_ring_count(struct ice_tx_ring *tx_ring);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
deleted file mode 100644
index 10d9d74f3545..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ /dev/null
@@ -1,181 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018-2021, Intel Corporation. */
-
-#ifndef _ICE_CGU_REGS_H_
-#define _ICE_CGU_REGS_H_
-
-#define NAC_CGU_DWORD9 0x24
-union nac_cgu_dword9 {
- struct {
- u32 time_ref_freq_sel : 3;
- u32 clk_eref1_en : 1;
- u32 clk_eref0_en : 1;
- u32 time_ref_en : 1;
- u32 time_sync_en : 1;
- u32 one_pps_out_en : 1;
- u32 clk_ref_synce_en : 1;
- u32 clk_synce1_en : 1;
- u32 clk_synce0_en : 1;
- u32 net_clk_ref1_en : 1;
- u32 net_clk_ref0_en : 1;
- u32 clk_synce1_amp : 2;
- u32 misc6 : 1;
- u32 clk_synce0_amp : 2;
- u32 one_pps_out_amp : 2;
- u32 misc24 : 12;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD16_E825C 0x40
-union nac_cgu_dword16_e825c {
- struct {
- u32 synce_remndr : 6;
- u32 synce_phlmt_en : 1;
- u32 misc13 : 17;
- u32 tspll_ck_refclkfreq : 8;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD19 0x4c
-union nac_cgu_dword19 {
- struct {
- u32 tspll_fbdiv_intgr : 8;
- u32 fdpll_ulck_thr : 5;
- u32 misc15 : 3;
- u32 tspll_ndivratio : 4;
- u32 tspll_iref_ndivratio : 3;
- u32 misc19 : 1;
- u32 japll_ndivratio : 4;
- u32 japll_iref_ndivratio : 3;
- u32 misc27 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD22 0x58
-union nac_cgu_dword22 {
- struct {
- u32 fdpll_frac_div_out_nc : 2;
- u32 fdpll_lock_int_for : 1;
- u32 synce_hdov_int_for : 1;
- u32 synce_lock_int_for : 1;
- u32 fdpll_phlead_slip_nc : 1;
- u32 fdpll_acc1_ovfl_nc : 1;
- u32 fdpll_acc2_ovfl_nc : 1;
- u32 synce_status_nc : 6;
- u32 fdpll_acc1f_ovfl : 1;
- u32 misc18 : 1;
- u32 fdpllclk_div : 4;
- u32 time1588clk_div : 4;
- u32 synceclk_div : 4;
- u32 synceclk_sel_div2 : 1;
- u32 fdpllclk_sel_div2 : 1;
- u32 time1588clk_sel_div2 : 1;
- u32 misc3 : 1;
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD23_E825C 0x5C
-union nac_cgu_dword23_e825c {
- struct {
- u32 cgupll_fbdiv_intgr : 10;
- u32 ux56pll_fbdiv_intgr : 10;
- u32 misc20 : 4;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
-
- };
- u32 val;
-};
-
-#define NAC_CGU_DWORD24 0x60
-union nac_cgu_dword24 {
- struct {
- u32 tspll_fbdiv_frac : 22;
- u32 misc20 : 2;
- u32 ts_pll_enable : 1;
- u32 time_sync_tspll_align_sel : 1;
- u32 ext_synce_sel : 1;
- u32 ref1588_ck_div : 4;
- u32 time_ref_sel : 1;
- };
- u32 val;
-};
-
-#define TSPLL_CNTR_BIST_SETTINGS 0x344
-union tspll_cntr_bist_settings {
- struct {
- u32 i_irefgen_settling_time_cntr_7_0 : 8;
- u32 i_irefgen_settling_time_ro_standby_1_0 : 2;
- u32 reserved195 : 5;
- u32 i_plllock_sel_0 : 1;
- u32 i_plllock_sel_1 : 1;
- u32 i_plllock_cnt_6_0 : 7;
- u32 i_plllock_cnt_10_7 : 4;
- u32 reserved200 : 4;
- };
- u32 val;
-};
-
-#define TSPLL_RO_BWM_LF 0x370
-union tspll_ro_bwm_lf {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 biascaldone_cri : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 m2fbdivmod_cri_7_0 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_RO_LOCK_E825C 0x3f0
-union tspll_ro_lock_e825c {
- struct {
- u32 bw_freqov_high_cri_7_0 : 8;
- u32 bw_freqov_high_cri_9_8 : 2;
- u32 reserved455 : 1;
- u32 plllock_gain_tran_cri : 1;
- u32 plllock_true_lock_cri : 1;
- u32 pllunlock_flag_cri : 1;
- u32 afcerr_cri : 1;
- u32 afcdone_cri : 1;
- u32 feedfwrdgain_cal_cri_7_0 : 8;
- u32 reserved462 : 8;
- };
- u32 val;
-};
-
-#define TSPLL_BW_TDC_E825C 0x31c
-union tspll_bw_tdc_e825c {
- struct {
- u32 i_tdc_offset_lock_1_0 : 2;
- u32 i_bbthresh1_2_0 : 3;
- u32 i_bbthresh2_2_0 : 3;
- u32 i_tdcsel_1_0 : 2;
- u32 i_tdcovccorr_en_h : 1;
- u32 i_divretimeren : 1;
- u32 i_bw_ampmeas_window : 1;
- u32 i_bw_lowerbound_2_0 : 3;
- u32 i_bw_upperbound_2_0 : 3;
- u32 i_bw_mode_1_0 : 2;
- u32 i_ft_mode_sel_2_0 : 3;
- u32 i_bwphase_4_0 : 5;
- u32 i_plllock_sel_1_0 : 2;
- u32 i_afc_divratio : 1;
- };
- u32 val;
-};
-
-#endif /* _ICE_CGU_REGS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index b22e71dc59d4..046bc9c65c51 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -6,6 +6,7 @@
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
#include "ice_ptp_hw.h"
+#include <linux/packing.h>
#define ICE_PF_RESET_WAIT_COUNT 300
#define ICE_MAX_NETLIST_SIZE 10
@@ -170,6 +171,15 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E830_XXV_QSFP:
case ICE_DEV_ID_E830C_SFP:
case ICE_DEV_ID_E830_XXV_SFP:
+ case ICE_DEV_ID_E835CC_BACKPLANE:
+ case ICE_DEV_ID_E835CC_QSFP56:
+ case ICE_DEV_ID_E835CC_SFP:
+ case ICE_DEV_ID_E835C_BACKPLANE:
+ case ICE_DEV_ID_E835C_QSFP:
+ case ICE_DEV_ID_E835C_SFP:
+ case ICE_DEV_ID_E835_L_BACKPLANE:
+ case ICE_DEV_ID_E835_L_QSFP:
+ case ICE_DEV_ID_E835_L_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -185,7 +195,7 @@ static int ice_set_mac_type(struct ice_hw *hw)
* ice_is_generic_mac - check if device's mac_type is generic
* @hw: pointer to the hardware structure
*
- * Return: true if mac_type is generic (with SBQ support), false if not
+ * Return: true if mac_type is ICE_MAC_GENERIC*, false otherwise.
*/
bool ice_is_generic_mac(struct ice_hw *hw)
{
@@ -194,117 +204,39 @@ bool ice_is_generic_mac(struct ice_hw *hw)
}
/**
- * ice_is_e810
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E810 based, false if not.
- */
-bool ice_is_e810(struct ice_hw *hw)
-{
- return hw->mac_type == ICE_MAC_E810;
-}
-
-/**
- * ice_is_e810t
- * @hw: pointer to the hardware structure
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
*
- * returns true if the device is E810T based, false if not.
+ * Return: true if the device has c827 phy.
*/
-bool ice_is_e810t(struct ice_hw *hw)
+static bool ice_is_pf_c827(struct ice_hw *hw)
{
- switch (hw->device_id) {
- case ICE_DEV_ID_E810C_SFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T:
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T6:
- case ICE_SUBDEV_ID_E810T7:
- return true;
- }
- break;
- case ICE_DEV_ID_E810C_QSFP:
- switch (hw->subsystem_device_id) {
- case ICE_SUBDEV_ID_E810T2:
- case ICE_SUBDEV_ID_E810T3:
- case ICE_SUBDEV_ID_E810T5:
- return true;
- }
- break;
- default:
- break;
- }
-
- return false;
-}
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
-/**
- * ice_is_e822 - Check if a device is E822 family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E822 based, false if not.
- */
-bool ice_is_e822(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E822C_BACKPLANE:
- case ICE_DEV_ID_E822C_QSFP:
- case ICE_DEV_ID_E822C_SFP:
- case ICE_DEV_ID_E822C_10G_BASE_T:
- case ICE_DEV_ID_E822C_SGMII:
- case ICE_DEV_ID_E822L_BACKPLANE:
- case ICE_DEV_ID_E822L_SFP:
- case ICE_DEV_ID_E822L_10G_BASE_T:
- case ICE_DEV_ID_E822L_SGMII:
- return true;
- default:
+ if (hw->mac_type != ICE_MAC_E810)
return false;
- }
-}
-/**
- * ice_is_e823
- * @hw: pointer to the hardware structure
- *
- * returns true if the device is E823-L or E823-C based, false if not.
- */
-bool ice_is_e823(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E823L_BACKPLANE:
- case ICE_DEV_ID_E823L_SFP:
- case ICE_DEV_ID_E823L_10G_BASE_T:
- case ICE_DEV_ID_E823L_1GBE:
- case ICE_DEV_ID_E823L_QSFP:
- case ICE_DEV_ID_E823C_BACKPLANE:
- case ICE_DEV_ID_E823C_QSFP:
- case ICE_DEV_ID_E823C_SFP:
- case ICE_DEV_ID_E823C_10G_BASE_T:
- case ICE_DEV_ID_E823C_SGMII:
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
return true;
- default:
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
return false;
- }
-}
-/**
- * ice_is_e825c - Check if a device is E825C family device
- * @hw: pointer to the hardware structure
- *
- * Return: true if the device is E825-C based, false if not.
- */
-bool ice_is_e825c(struct ice_hw *hw)
-{
- switch (hw->device_id) {
- case ICE_DEV_ID_E825C_BACKPLANE:
- case ICE_DEV_ID_E825C_QSFP:
- case ICE_DEV_ID_E825C_SFP:
- case ICE_DEV_ID_E825C_SGMII:
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
return true;
- default:
- return false;
- }
+
+ return false;
}
/**
@@ -316,7 +248,7 @@ bool ice_is_e825c(struct ice_hw *hw)
*/
int ice_clear_pf_cfg(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
@@ -344,12 +276,12 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u16 flags;
u8 i;
- cmd = &desc.params.mac_read;
+ cmd = libie_aq_raw(&desc);
if (buf_size < sizeof(*resp))
return -EINVAL;
@@ -398,12 +330,12 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
{
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
const char *prefix;
struct ice_hw *hw;
int status;
- cmd = &desc.params.get_phy;
+ cmd = libie_aq_raw(&desc);
if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
return -EINVAL;
@@ -492,9 +424,9 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.get_link_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
@@ -522,19 +454,20 @@ int
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_get_link_topo *resp;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
+ resp = libie_aq_raw(&desc);
+ *resp = *cmd;
if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
return -EINTR;
if (node_handle)
- *node_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *node_handle = le16_to_cpu(resp->addr.handle);
if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
+ *node_part_number = resp->node_part_num;
return 0;
}
@@ -542,7 +475,8 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
/**
* ice_find_netlist_node
* @hw: pointer to the hw struct
- * @node_type_ctx: type of netlist node to look for
+ * @node_type: type of netlist node to look for
+ * @ctx: context of the search
* @node_part_number: node part number to look for
* @node_handle: output parameter if node found - optional
*
@@ -552,10 +486,12 @@ ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
* valid if the function returns zero, and should be ignored on any non-zero
* return value.
*
- * Returns: 0 if the node is found, -ENOENT if no handle was found, and
- * a negative error code on failure to access the AQ.
+ * Return:
+ * * 0 if the node is found,
+ * * -ENOENT if no handle was found,
+ * * negative error code on failure to access the AQ.
*/
-static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type, u8 ctx,
u8 node_part_number, u16 *node_handle)
{
u8 idx;
@@ -566,8 +502,8 @@ static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
int status;
cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
- node_type_ctx);
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, node_type) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ctx);
cmd.addr.topo_params.index = idx;
status = ice_aq_get_netlist_node(hw, &cmd,
@@ -754,8 +690,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info;
+ struct libie_aq_desc desc;
bool tx_pause, rx_pause;
- struct ice_aq_desc desc;
struct ice_hw *hw;
u16 cmd_flags;
int status;
@@ -770,7 +706,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
- resp = &desc.params.get_link_status;
+ resp = libie_aq_raw(&desc);
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
@@ -899,9 +835,9 @@ int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_cfg;
+ cmd = libie_aq_raw(&desc);
if (max_frame_size == 0)
return -EINVAL;
@@ -1022,6 +958,64 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ *
+ * Return: 0 on success, -ETIMEDOUT on timeout.
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int __fwlog_send_cmd(void *priv, struct libie_aq_desc *desc, void *buf,
+ u16 size)
+{
+ struct ice_hw *hw = priv;
+
+ return ice_aq_send_cmd(hw, desc, buf, size, NULL);
+}
+
+static int __fwlog_init(struct ice_hw *hw)
+{
+ struct ice_pf *pf = hw->back;
+ struct libie_fwlog_api api = {
+ .pdev = pf->pdev,
+ .send_cmd = __fwlog_send_cmd,
+ .priv = hw,
+ };
+ int err;
+
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return -EINVAL;
+
+ err = ice_debugfs_pf_init(pf);
+ if (err)
+ return err;
+
+ api.debugfs_root = pf->ice_debugfs_pf;
+
+ return libie_fwlog_init(&hw->fwlog, &api);
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -1049,7 +1043,7 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- status = ice_fwlog_init(hw);
+ status = __fwlog_init(hw);
if (status)
ice_debug(hw, ICE_DBG_FW_LOG, "Error initializing FW logging: %d\n",
status);
@@ -1167,11 +1161,27 @@ int ice_init_hw(struct ice_hw *hw)
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
+
+ ice_init_dev_hw(hw->back);
+
mutex_init(&hw->tnl_lock);
ice_init_chk_recipe_reuse_support(hw);
- return 0;
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ status = ice_wait_for_fw(hw, 30000);
+ if (status) {
+ dev_err(ice_hw_to_dev(hw), "ice_wait_for_fw timed out");
+ goto err_unroll_fltr_mgmt_struct;
+ }
+ }
+ hw->lane_num = ice_get_phy_lane_number(hw);
+
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
err_unroll_sched:
@@ -1183,6 +1193,16 @@ err_unroll_cqinit:
return status;
}
+static void __fwlog_deinit(struct ice_hw *hw)
+{
+ /* only support fw log commands on PF 0 */
+ if (hw->bus.func)
+ return;
+
+ ice_debugfs_pf_deinit(hw->back);
+ libie_fwlog_deinit(&hw->fwlog);
+}
+
/**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure
@@ -1201,8 +1221,7 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_seg(hw);
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
-
- ice_fwlog_deinit(hw);
+ __fwlog_deinit(hw);
ice_destroy_all_ctrlq(hw);
/* Clear VSI contexts if not already cleared */
@@ -1357,39 +1376,51 @@ int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
}
/**
- * ice_copy_rxq_ctx_to_hw
+ * ice_copy_rxq_ctx_to_hw - Copy packed Rx queue context to HW registers
* @hw: pointer to the hardware structure
- * @ice_rxq_ctx: pointer to the rxq context
+ * @rxq_ctx: pointer to the packed Rx queue context
* @rxq_index: the index of the Rx queue
- *
- * Copies rxq context from dense structure to HW register space
*/
-static int
-ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
+static void ice_copy_rxq_ctx_to_hw(struct ice_hw *hw,
+ const ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
{
- u8 i;
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)rxq_ctx)[i];
- if (!ice_rxq_ctx)
- return -EINVAL;
+ wr32(hw, QRX_CONTEXT(i, rxq_index), ctx);
- if (rxq_index > QRX_CTRL_MAX_INDEX)
- return -EINVAL;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, ctx);
+ }
+}
- /* Copy each dword separately to HW */
- for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
- wr32(hw, QRX_CONTEXT(i, rxq_index),
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
+/**
+ * ice_copy_rxq_ctx_from_hw - Copy packed Rx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @rxq_ctx: pointer to the packed Rx queue context
+ * @rxq_index: the index of the Rx queue
+ */
+static void ice_copy_rxq_ctx_from_hw(struct ice_hw *hw,
+ ice_rxq_ctx_buf_t *rxq_ctx,
+ u32 rxq_index)
+{
+ u32 *ctx = (u32 *)rxq_ctx;
- ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
- *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
- }
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, QRX_CONTEXT(i, rxq_index));
- return 0;
+ ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
+ }
}
+#define ICE_CTX_STORE(struct_name, struct_field, width, lsb) \
+ PACKED_FIELD((lsb) + (width) - 1, (lsb), struct struct_name, struct_field)
+
/* LAN Rx Queue Context */
-static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
- /* Field Width LSB */
+static const struct packed_field_u8 ice_rlan_ctx_fields[] = {
+ /* Field Width LSB */
ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
@@ -1410,35 +1441,90 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
- { 0 }
};
/**
- * ice_write_rxq_ctx
+ * ice_pack_rxq_ctx - Pack Rx queue context into a HW buffer
+ * @ctx: the Rx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Rx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout.
+ */
+static void ice_pack_rxq_ctx(const struct ice_rlan_ctx *ctx,
+ ice_rxq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_rxq_ctx - Unpack Rx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Rx queue context to unpack
+ *
+ * Unpack the Rx queue context from the HW buffer into the CPU-friendly
+ * structure.
+ */
+static void ice_unpack_rxq_ctx(const ice_rxq_ctx_buf_t *buf,
+ struct ice_rlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_rlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_write_rxq_ctx - Write Rx Queue context to hardware
* @hw: pointer to the hardware structure
- * @rlan_ctx: pointer to the rxq context
+ * @rlan_ctx: pointer to the unpacked Rx queue context
* @rxq_index: the index of the Rx queue
*
- * Converts rxq context from sparse to dense structure and then writes
- * it to HW register space and enables the hardware to prefetch descriptors
- * instead of only fetching them on demand
+ * Pack the sparse Rx Queue context into dense hardware format and write it
+ * into the HW register space.
+ *
+ * Return: 0 on success, or -EINVAL if the Rx queue index is invalid.
*/
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
- u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
+ ice_rxq_ctx_buf_t buf = {};
+
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
+ return -EINVAL;
+
+ ice_pack_rxq_ctx(rlan_ctx, &buf);
+ ice_copy_rxq_ctx_to_hw(hw, &buf, rxq_index);
+
+ return 0;
+}
+
+/**
+ * ice_read_rxq_ctx - Read Rx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @rlan_ctx: pointer to the Rx queue context
+ * @rxq_index: the index of the Rx queue
+ *
+ * Read the Rx queue context from the hardware registers, and unpack it into
+ * the sparse Rx queue context structure.
+ *
+ * Returns: 0 on success, or -EINVAL if the Rx queue index is invalid.
+ */
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index)
+{
+ ice_rxq_ctx_buf_t buf = {};
- if (!rlan_ctx)
+ if (rxq_index > QRX_CTRL_MAX_INDEX)
return -EINVAL;
- rlan_ctx->prefena = 1;
+ ice_copy_rxq_ctx_from_hw(hw, &buf, rxq_index);
+ ice_unpack_rxq_ctx(&buf, rlan_ctx);
- ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
- return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
+ return 0;
}
/* LAN Tx Queue Context */
-const struct ice_ctx_ele ice_tlan_ctx_info[] = {
+static const struct packed_field_u8 ice_tlan_ctx_fields[] = {
/* Field Width LSB */
ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
@@ -1467,10 +1553,227 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
- ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
- { 0 }
};
+/**
+ * ice_pack_txq_ctx - Pack Tx queue context into Admin Queue buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the Admin Queue HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed Admin Queue layout.
+ */
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_pack_txq_ctx_full - Pack Tx queue context into a HW buffer
+ * @ctx: the Tx queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx queue context from the CPU-friendly unpacked buffer into its
+ * bit-packed HW layout, including the internal data portion.
+ */
+static void ice_pack_txq_ctx_full(const struct ice_tlan_ctx *ctx,
+ ice_txq_ctx_buf_full_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_unpack_txq_ctx_full - Unpack Tx queue context from a HW buffer
+ * @buf: the HW buffer to unpack from
+ * @ctx: the Tx queue context to unpack
+ *
+ * Unpack the Tx queue context from the HW buffer (including the full internal
+ * state) into the CPU-friendly structure.
+ */
+static void ice_unpack_txq_ctx_full(const ice_txq_ctx_buf_full_t *buf,
+ struct ice_tlan_ctx *ctx)
+{
+ unpack_fields(buf, sizeof(*buf), ctx, ice_tlan_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
+/**
+ * ice_copy_txq_ctx_from_hw - Copy Tx Queue context from HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ *
+ * Copy Tx Queue context from HW register space to dense structure
+ */
+static void ice_copy_txq_ctx_from_hw(struct ice_hw *hw,
+ ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 *ctx = (u32 *)txq_ctx;
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_READ) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ /* Copy each dword separately from HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++, ctx++) {
+ *ctx = rd32(hw, GLCOMM_QTX_CNTX_DATA(i));
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, *ctx);
+ }
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_copy_txq_ctx_to_hw - Copy Tx Queue context into HW registers
+ * @hw: pointer to the hardware structure
+ * @txq_ctx: pointer to the packed Tx queue context, including internal state
+ * @txq_index: the index of the Tx queue
+ */
+static void ice_copy_txq_ctx_to_hw(struct ice_hw *hw,
+ const ice_txq_ctx_buf_full_t *txq_ctx,
+ u32 txq_index)
+{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ u32 txq_base, reg;
+
+ /* Get Tx queue base within card space */
+ txq_base = rd32(hw, PFLAN_TX_QALLOC(hw->pf_id));
+ txq_base = FIELD_GET(PFLAN_TX_QALLOC_FIRSTQ_M, txq_base);
+
+ reg = FIELD_PREP(GLCOMM_QTX_CNTX_CTL_CMD_M,
+ GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN) |
+ FIELD_PREP(GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M,
+ txq_base + txq_index) |
+ GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M;
+
+ /* Prevent other PFs on the same adapter from accessing the Tx queue
+ * context interface concurrently.
+ */
+ spin_lock(&pf->adapter->txq_ctx_lock);
+
+ /* Copy each dword separately to HW */
+ for (int i = 0; i < ICE_TXQ_CTX_FULL_SIZE_DWORDS; i++) {
+ u32 ctx = ((const u32 *)txq_ctx)[i];
+
+ wr32(hw, GLCOMM_QTX_CNTX_DATA(i), ctx);
+
+ ice_debug(hw, ICE_DBG_QCTX, "qtxdata[%d]: %08X\n", i, ctx);
+ }
+
+ wr32(hw, GLCOMM_QTX_CNTX_CTL, reg);
+ ice_flush(hw);
+
+ spin_unlock(&pf->adapter->txq_ctx_lock);
+}
+
+/**
+ * ice_read_txq_ctx - Read Tx queue context from HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Read the Tx queue context from the HW registers, then unpack it into the
+ * ice_tlan_ctx structure for use.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_copy_txq_ctx_from_hw(hw, &buf, txq_index);
+ ice_unpack_txq_ctx_full(&buf, tlan_ctx);
+
+ return 0;
+}
+
+/**
+ * ice_write_txq_ctx - Write Tx queue context to HW
+ * @hw: pointer to the hardware structure
+ * @tlan_ctx: pointer to the Tx queue context
+ * @txq_index: the index of the Tx queue
+ *
+ * Pack the Tx queue context into the dense HW layout, then write it into the
+ * HW registers.
+ *
+ * Returns: 0 on success, or -EINVAL on an invalid Tx queue index.
+ */
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index)
+{
+ ice_txq_ctx_buf_full_t buf = {};
+
+ if (txq_index > QTX_COMM_HEAD_MAX_INDEX)
+ return -EINVAL;
+
+ ice_pack_txq_ctx_full(tlan_ctx, &buf);
+ ice_copy_txq_ctx_to_hw(hw, &buf, txq_index);
+
+ return 0;
+}
+
+/* Tx time Queue Context */
+static const struct packed_field_u8 ice_txtime_ctx_fields[] = {
+ /* Field Width LSB */
+ ICE_CTX_STORE(ice_txtime_ctx, base, 57, 0),
+ ICE_CTX_STORE(ice_txtime_ctx, pf_num, 3, 57),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_num, 10, 60),
+ ICE_CTX_STORE(ice_txtime_ctx, vmvf_type, 2, 70),
+ ICE_CTX_STORE(ice_txtime_ctx, src_vsi, 10, 72),
+ ICE_CTX_STORE(ice_txtime_ctx, cpuid, 8, 82),
+ ICE_CTX_STORE(ice_txtime_ctx, tphrd_desc, 1, 90),
+ ICE_CTX_STORE(ice_txtime_ctx, qlen, 13, 91),
+ ICE_CTX_STORE(ice_txtime_ctx, timer_num, 1, 104),
+ ICE_CTX_STORE(ice_txtime_ctx, txtime_ena_q, 1, 105),
+ ICE_CTX_STORE(ice_txtime_ctx, drbell_mode_32, 1, 106),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_res, 4, 107),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_round_type, 2, 111),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_pacing_slot, 3, 113),
+ ICE_CTX_STORE(ice_txtime_ctx, merging_ena, 1, 116),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_prof_id, 4, 117),
+ ICE_CTX_STORE(ice_txtime_ctx, ts_fetch_cache_line_aln_thld, 4, 121),
+ ICE_CTX_STORE(ice_txtime_ctx, tx_pipe_delay_mode, 1, 125),
+};
+
+/**
+ * ice_pack_txtime_ctx - pack Tx time queue context into a HW buffer
+ * @ctx: the Tx time queue context to pack
+ * @buf: the HW buffer to pack into
+ *
+ * Pack the Tx time queue context from the CPU-friendly unpacked buffer into
+ * its bit-packed HW layout.
+ */
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf)
+{
+ pack_fields(buf, sizeof(*buf), ctx, ice_txtime_ctx_fields,
+ QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST);
+}
+
/* Sideband Queue command wrappers */
/**
@@ -1486,7 +1789,7 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd)
{
return ice_sq_send_cmd(hw, ice_get_sbq(hw),
- (struct ice_aq_desc *)desc, buf, buf_size, cd);
+ (struct libie_aq_desc *)desc, buf, buf_size, cd);
}
/**
@@ -1571,10 +1874,10 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
*/
static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc_cpy;
+ struct libie_aq_desc desc_cpy;
bool is_cmd_for_retry;
u8 idx = 0;
u16 opcode;
@@ -1595,7 +1898,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
if (!is_cmd_for_retry || !status ||
- hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
+ hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY)
break;
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
@@ -1618,10 +1921,10 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd = &desc->params.res_owner;
+ struct libie_aqc_req_res *cmd = libie_aq_raw(desc);
bool lock_acquired = false;
int status;
@@ -1652,7 +1955,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_get_recipe_to_profile:
break;
case ice_aqc_opc_release_res:
- if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
+ if (le16_to_cpu(cmd->res_id) == LIBIE_AQC_RES_ID_GLBL_LOCK)
break;
fallthrough;
default:
@@ -1677,8 +1980,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*/
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
- struct ice_aqc_get_ver *resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_get_ver *resp;
+ struct libie_aq_desc desc;
int status;
resp = &desc.params.get_ver;
@@ -1714,8 +2017,8 @@ int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
- struct ice_aqc_driver_ver *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_driver_ver *cmd;
+ struct libie_aq_desc desc;
u16 len;
cmd = &desc.params.driver_ver;
@@ -1725,7 +2028,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->major_ver = dv->major_ver;
cmd->minor_ver = dv->minor_ver;
cmd->build_ver = dv->build_ver;
@@ -1750,9 +2053,9 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.q_shutdown;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
@@ -1793,8 +2096,8 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd_resp;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd_resp;
+ struct libie_aq_desc desc;
int status;
cmd_resp = &desc.params.res_owner;
@@ -1816,20 +2119,20 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
/* Global config lock response utilizes an additional status field.
*
* If the Global config lock resource is held by some other driver, the
- * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
+ * command completes with LIBIE_AQ_RES_GLBL_IN_PROG in the status field
* and the timeout field indicates the maximum time the current owner
* of the resource has to free it.
*/
if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
- if (le16_to_cpu(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
+ if (le16_to_cpu(cmd_resp->status) == LIBIE_AQ_RES_GLBL_SUCCESS) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return 0;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_IN_PROG) {
+ LIBIE_AQ_RES_GLBL_IN_PROG) {
*timeout = le32_to_cpu(cmd_resp->timeout);
return -EIO;
} else if (le16_to_cpu(cmd_resp->status) ==
- ICE_AQ_RES_GLBL_DONE) {
+ LIBIE_AQ_RES_GLBL_DONE) {
return -EALREADY;
}
@@ -1842,7 +2145,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
* with a busy return value and the timeout field indicates the maximum
* time the current owner of the resource has to free it.
*/
- if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
+ if (!status || hw->adminq.sq_last_status == LIBIE_AQ_RC_EBUSY)
*timeout = le32_to_cpu(cmd_resp->timeout);
return status;
@@ -1861,8 +2164,8 @@ static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
- struct ice_aqc_req_res *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_req_res *cmd;
+ struct libie_aq_desc desc;
cmd = &desc.params.res_owner;
@@ -1971,16 +2274,16 @@ int ice_aq_alloc_free_res(struct ice_hw *hw,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.sw_res_ctrl;
+ cmd = libie_aq_raw(&desc);
if (!buf || buf_size < flex_array_size(buf, elem, 1))
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_entries = cpu_to_le16(1);
@@ -2094,7 +2397,7 @@ static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
*/
static bool
ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
- struct ice_aqc_list_caps_elem *elem, const char *prefix)
+ struct libie_aqc_list_caps_elem *elem, const char *prefix)
{
u32 logical_id = le32_to_cpu(elem->logical_id);
u32 phys_id = le32_to_cpu(elem->phys_id);
@@ -2103,17 +2406,17 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
bool found = true;
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
caps->valid_functions);
break;
- case ICE_AQC_CAPS_SRIOV:
+ case LIBIE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
caps->sr_iov_1_1);
break;
- case ICE_AQC_CAPS_DCB:
+ case LIBIE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
@@ -2122,7 +2425,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->active_tc_bitmap);
ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
break;
- case ICE_AQC_CAPS_RSS:
+ case LIBIE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
@@ -2130,7 +2433,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
caps->rss_table_entry_width);
break;
- case ICE_AQC_CAPS_RXQS:
+ case LIBIE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
@@ -2138,7 +2441,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
caps->rxq_first_id);
break;
- case ICE_AQC_CAPS_TXQS:
+ case LIBIE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
@@ -2146,7 +2449,7 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
caps->txq_first_id);
break;
- case ICE_AQC_CAPS_MSIX:
+ case LIBIE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
@@ -2154,55 +2457,59 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
caps->msix_vector_first_id);
break;
- case ICE_AQC_CAPS_PENDING_NVM_VER:
+ case LIBIE_AQC_CAPS_PENDING_NVM_VER:
caps->nvm_update_pending_nvm = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_nvm\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_OROM_VER:
+ case LIBIE_AQC_CAPS_PENDING_OROM_VER:
caps->nvm_update_pending_orom = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_orom\n", prefix);
break;
- case ICE_AQC_CAPS_PENDING_NET_VER:
+ case LIBIE_AQC_CAPS_PENDING_NET_VER:
caps->nvm_update_pending_netlist = true;
ice_debug(hw, ICE_DBG_INIT, "%s: update_pending_netlist\n", prefix);
break;
- case ICE_AQC_CAPS_NVM_MGMT:
+ case LIBIE_AQC_CAPS_NVM_MGMT:
caps->nvm_unified_update =
(number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
true : false;
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
- case ICE_AQC_CAPS_RDMA:
- caps->rdma = (number == 1);
+ case LIBIE_AQC_CAPS_RDMA:
+ if (IS_ENABLED(CONFIG_INFINIBAND_IRDMA))
+ caps->rdma = (number == 1);
ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
break;
- case ICE_AQC_CAPS_MAX_MTU:
+ case LIBIE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
prefix, caps->max_mtu);
break;
- case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
+ case LIBIE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
caps->pcie_reset_avoidance = (number > 0);
ice_debug(hw, ICE_DBG_INIT,
"%s: pcie_reset_avoidance = %d\n", prefix,
caps->pcie_reset_avoidance);
break;
- case ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
+ case LIBIE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT:
caps->reset_restrict_support = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
- case ICE_AQC_CAPS_FW_LAG_SUPPORT:
- caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+ case LIBIE_AQC_CAPS_FW_LAG_SUPPORT:
+ caps->roce_lag = number & LIBIE_AQC_BIT_ROCEV2_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
- caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+ caps->sriov_lag = number & LIBIE_AQC_BIT_SRIOV_LAG;
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
+ caps->sriov_aa_lag = number & LIBIE_AQC_BIT_SRIOV_AA_LAG;
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_aa_lag = %u\n",
+ prefix, caps->sriov_aa_lag);
break;
- case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ case LIBIE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
caps->tx_sched_topo_comp_mode_en = (number == 1);
break;
default:
@@ -2256,7 +2563,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
*/
static void
ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 logical_id = le32_to_cpu(cap->logical_id);
u32 number = le32_to_cpu(cap->number);
@@ -2279,7 +2586,7 @@ ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
@@ -2298,7 +2605,7 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
static void
ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_func_info *info = &func_p->ts_func_info;
u32 number = le32_to_cpu(cap->number);
@@ -2311,16 +2618,16 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- if (!ice_is_e825c(hw)) {
+ if (hw->mac_type != ICE_MAC_GENERIC_3K_E825) {
info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
} else {
- info->clk_freq = ICE_TIME_REF_FREQ_156_250;
- info->clk_src = ICE_CLK_SRC_TCXO;
+ info->clk_freq = ICE_TSPLL_FREQ_156_250;
+ info->clk_src = ICE_CLK_SRC_TIME_REF;
}
- if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
- info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
+ if (info->clk_freq < NUM_ICE_TSPLL_FREQ) {
+ info->time_ref = (enum ice_tspll_freq)info->clk_freq;
} else {
/* Unknown clock frequency, so assume a (probably incorrect)
* default to avoid out-of-bounds look ups of frequency
@@ -2328,7 +2635,7 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
*/
ice_debug(hw, ICE_DBG_INIT, "1588 func caps: unknown clock frequency %u\n",
info->clk_freq);
- info->time_ref = ICE_TIME_REF_FREQ_25_000;
+ info->time_ref = ICE_TSPLL_FREQ_25_000;
}
ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
@@ -2397,7 +2704,7 @@ static void
ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2412,16 +2719,16 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
&cap_resp[i], "func caps");
switch (cap) {
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
default:
@@ -2465,7 +2772,7 @@ static int ice_func_id_to_logical_id(u32 active_function_bitmap, u8 pf_id)
*/
static void
ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2486,7 +2793,7 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2505,7 +2812,7 @@ ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2524,7 +2831,7 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
u32 logical_id = le32_to_cpu(cap->logical_id);
@@ -2544,6 +2851,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
info->ts_ll_int_read = ((number & ICE_TS_LL_TX_TS_INT_READ_M) != 0);
+ info->ll_phy_tmr_update = ((number & ICE_TS_LL_PHY_TMR_UPDATE_M) != 0);
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2566,6 +2874,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_int_read = %u\n",
info->ts_ll_int_read);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ll_phy_tmr_update = %u\n",
+ info->ll_phy_tmr_update);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2582,7 +2892,7 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
u32 number = le32_to_cpu(cap->number);
@@ -2602,7 +2912,7 @@ ice_parse_fdir_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void
ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->supported_sensors = le32_to_cpu(cap->number);
@@ -2621,7 +2931,7 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
*/
static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
struct ice_hw_dev_caps *dev_p,
- struct ice_aqc_list_caps_elem *cap)
+ struct libie_aqc_list_caps_elem *cap)
{
dev_p->nac_topo.mode = le32_to_cpu(cap->number);
dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
@@ -2657,7 +2967,7 @@ static void
ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
void *buf, u32 cap_count)
{
- struct ice_aqc_list_caps_elem *cap_resp;
+ struct libie_aqc_list_caps_elem *cap_resp;
u32 i;
cap_resp = buf;
@@ -2672,25 +2982,25 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
&cap_resp[i], "dev caps");
switch (cap) {
- case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ case LIBIE_AQC_CAPS_VALID_FUNCTIONS:
ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VF:
+ case LIBIE_AQC_CAPS_VF:
ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_VSI:
+ case LIBIE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_1588:
+ case LIBIE_AQC_CAPS_1588:
ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_FD:
+ case LIBIE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_SENSOR_READING:
+ case LIBIE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
- case ICE_AQC_CAPS_NAC_TOPOLOGY:
+ case LIBIE_AQC_CAPS_NAC_TOPOLOGY:
ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
break;
default:
@@ -2706,40 +3016,6 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
- * ice_is_pf_c827 - check if pf contains c827 phy
- * @hw: pointer to the hw struct
- */
-bool ice_is_pf_c827(struct ice_hw *hw)
-{
- struct ice_aqc_get_link_topo cmd = {};
- u8 node_part_number;
- u16 node_handle;
- int status;
-
- if (hw->mac_type != ICE_MAC_E810)
- return false;
-
- if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
- return true;
-
- cmd.addr.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
- cmd.addr.topo_params.index = 0;
-
- status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
- &node_handle);
-
- if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
- return false;
-
- if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
- return true;
-
- return false;
-}
-
-/**
* ice_is_phy_rclk_in_netlist
* @hw: pointer to the hw struct
*
@@ -2747,9 +3023,11 @@ bool ice_is_pf_c827(struct ice_hw *hw)
*/
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
{
- if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
- ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PORT,
ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
return false;
@@ -2765,6 +3043,7 @@ bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
NULL))
return false;
@@ -2785,12 +3064,14 @@ bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
bool ice_is_cgu_in_netlist(struct ice_hw *hw)
{
if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
return true;
} else if (!ice_find_netlist_node(hw,
ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
NULL)) {
hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
@@ -2809,6 +3090,7 @@ bool ice_is_cgu_in_netlist(struct ice_hw *hw)
bool ice_is_gps_in_netlist(struct ice_hw *hw)
{
if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_LINK_TOPO_NODE_CTX_GLOBAL,
ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
return false;
@@ -2838,8 +3120,8 @@ int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aqc_list_caps *cmd;
- struct ice_aq_desc desc;
+ struct libie_aqc_list_caps *cmd;
+ struct libie_aq_desc desc;
int status;
cmd = &desc.params.get_cap;
@@ -2880,7 +3162,7 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_dev_caps, NULL);
@@ -2914,7 +3196,7 @@ ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
* device will return, we can simply send a 4KB buffer, the maximum
* possible size that firmware can return.
*/
- cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
+ cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct libie_aqc_list_caps_elem);
status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
ice_aqc_opc_list_func_caps, NULL);
@@ -3023,9 +3305,9 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_write *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.mac_write;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
cmd->flags = flags;
@@ -3042,10 +3324,12 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*/
static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_clear_pxe *cmd;
+ struct libie_aq_desc desc;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
- desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
+ cmd->rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -3078,10 +3362,10 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
{
struct ice_aqc_set_port_params *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 cmd_flags = 0;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
if (double_vlan)
@@ -3108,6 +3392,7 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
case ICE_DEV_ID_E822L_SGMII:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823C_SGMII:
+ case ICE_DEV_ID_E825C_SGMII:
return true;
default:
return false;
@@ -3318,7 +3603,8 @@ int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_set_phy_cfg *cmd;
+ struct libie_aq_desc desc;
int status;
if (!cfg)
@@ -3333,8 +3619,9 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
}
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
- desc.params.set_phy.lport_num = pi->lport;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ cmd->lport_num = pi->lport;
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
@@ -3350,7 +3637,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
cfg->link_fec_opt);
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
status = 0;
if (!status)
@@ -3407,17 +3694,17 @@ int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
{
struct ice_aqc_dnl_call_command *cmd;
struct ice_aqc_dnl_call buf = {};
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int err;
buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
buf.sto.txrx_equa_reqs.op_code_serdes_sel =
cpu_to_le16(op_code | (serdes_num & 0xF));
- cmd = &desc.params.dnl_call;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF |
- ICE_AQ_FLAG_RD |
- ICE_AQ_FLAG_SI);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF |
+ LIBIE_AQ_FLAG_RD |
+ LIBIE_AQ_FLAG_SI);
desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
@@ -3455,7 +3742,7 @@ static const u32 fec_reg[][ICE_FEC_MAX] = {
int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
enum ice_fec_stats_types fec_type, u32 *output)
{
- u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ u16 flag = (LIBIE_AQ_FLAG_RD | LIBIE_AQ_FLAG_BUF | LIBIE_AQ_FLAG_SI);
struct ice_sbq_msg_input msg = {};
u32 receiver_id, reg_offset;
int err;
@@ -3475,7 +3762,7 @@ int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
msg.msg_addr_low = lower_16_bits(reg_offset);
msg.msg_addr_high = receiver_id;
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
err = ice_sbq_rw_reg(hw, &msg, flag);
if (err)
@@ -3878,9 +4165,9 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
struct ice_aqc_restart_an *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.restart_an;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
@@ -3908,9 +4195,9 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
struct ice_aqc_set_event_mask *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_event_mask;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
@@ -3932,9 +4219,9 @@ int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_mac_lb;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
if (ena_lpbk)
@@ -3957,9 +4244,9 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
{
struct ice_aqc_set_port_id_led *cmd;
struct ice_hw *hw = pi->hw;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.set_port_id_led;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
@@ -3995,7 +4282,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
u8 *pending_option_idx, bool *pending_option_valid)
{
struct ice_aqc_get_port_options *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
u8 i;
@@ -4003,7 +4290,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.get_port_options;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
if (lport_valid)
@@ -4069,12 +4356,12 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option)
{
struct ice_aqc_set_port_option *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
return -EINVAL;
- cmd = &desc.params.set_port_option;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
if (lport_valid)
@@ -4087,6 +4374,68 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
}
/**
+ * ice_get_phy_lane_number - Get PHY lane number for current adapter
+ * @hw: pointer to the hw struct
+ *
+ * Return: PHY lane number on success, negative error code otherwise.
+ */
+int ice_get_phy_lane_number(struct ice_hw *hw)
+{
+ struct ice_aqc_get_port_options_elem *options;
+ unsigned int lport = 0;
+ unsigned int lane;
+ int err;
+
+ /* E82X does not have sequential IDs, lane number is PF ID.
+ * For E825 device, the exception is the variant with external
+ * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
+ * mapping.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->device_id == ICE_DEV_ID_E825C_SGMII)
+ return hw->pf_id;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return -ENOMEM;
+
+ for (lane = 0; lane < ICE_MAX_PORT_PER_PCI_DEV; lane++) {
+ u8 options_count = ICE_AQC_PORT_OPT_MAX;
+ u8 speed, active_idx, pending_idx;
+ bool active_valid, pending_valid;
+
+ err = ice_aq_get_port_options(hw, options, &options_count, lane,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (err)
+ goto err;
+
+ if (!active_valid)
+ continue;
+
+ speed = options[active_idx].max_lane_speed;
+ /* If we don't get speed for this lane, it's unoccupied */
+ if (speed > ICE_AQC_PORT_OPT_MAX_LANE_40G)
+ continue;
+
+ if (hw->pf_id == lport) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 &&
+ ice_is_dual(hw) && !ice_is_primary(hw))
+ lane += ICE_PORTS_PER_QUAD;
+ kfree(options);
+ return lane;
+ }
+ lport++;
+ }
+
+ /* PHY lane not found */
+ err = -ENXIO;
+err:
+ kfree(options);
+ return err;
+}
+
+/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
* @lport: bits [7:0] = logical port, bit [8] = logical port valid
@@ -4107,7 +4456,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i2c_bus_addr;
int status;
@@ -4115,8 +4464,8 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
- cmd = &desc.params.read_write_sff_param;
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd = libie_aq_raw(&desc);
+ desc.flags = cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->lport_num = (u8)(lport & 0xff);
cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
i2c_bus_addr = FIELD_PREP(ICE_AQC_SFF_I2CBUS_7BIT_M, bus_addr >> 1) |
@@ -4176,7 +4525,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
struct ice_aqc_get_set_rss_lut *desc_params;
enum ice_aqc_lut_flags flags;
enum ice_lut_size lut_size;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u8 *lut = params->lut;
@@ -4192,9 +4541,9 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw,
opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
if (set)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- desc_params = &desc.params.get_set_rss_lut;
+ desc_params = libie_aq_raw(&desc);
vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
@@ -4249,16 +4598,16 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
{
struct ice_aqc_get_set_rss_key *desc_params;
u16 key_size = sizeof(*key);
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
}
- desc_params = &desc.params.get_set_rss_key;
+ desc_params = libie_aq_raw(&desc);
desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
@@ -4330,10 +4679,10 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_add_tx_qgrp *list;
struct ice_aqc_add_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
@@ -4352,7 +4701,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qgrps = num_qgrps;
@@ -4379,12 +4728,12 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
{
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 vmvf_and_timeout;
u16 i, sz = 0;
int status;
- cmd = &desc.params.dis_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
/* qg_list can be NULL only in VM/VF reset flow */
@@ -4425,7 +4774,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
/* set RD bit to indicate that command buffer is provided by the driver
* and it needs to be read by the firmware
*/
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
for (i = 0, item = qg_list; i < num_qgrps; i++) {
u16 item_size = struct_size(item, q_id, item->num_qs);
@@ -4457,40 +4806,42 @@ do_aq:
}
/**
- * ice_aq_cfg_lan_txq
+ * ice_aq_cfg_lan_txq - send AQ command 0x0C32 to FW
* @hw: pointer to the hardware structure
* @buf: buffer for command
* @buf_size: size of buffer in bytes
* @num_qs: number of queues being configured
* @oldport: origination lport
* @newport: destination lport
+ * @mode: cmd_type for move to use
* @cd: pointer to command details structure or NULL
*
* Move/Configure LAN Tx queue (0x0C32)
*
- * There is a better AQ command to use for moving nodes, so only coding
- * this one for configuring the node.
+ * Return: Zero on success, associated error code on failure.
*/
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd)
+ u8 mode, struct ice_sq_cd *cd)
{
struct ice_aqc_cfg_txqs *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.cfg_txqs;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (!buf)
return -EINVAL;
- cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
+ cmd->cmd_type = mode;
cmd->num_qs = num_qs;
cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_DST_PRT_M, newport);
+ cmd->port_num_chng |= FIELD_PREP(ICE_AQC_Q_CFG_MODE_M,
+ ICE_AQC_Q_CFG_MODE_KEEP_OWN);
cmd->time_out = FIELD_PREP(ICE_AQC_Q_CFG_TIMEOUT_M, 5);
cmd->blocked_cgds = 0;
@@ -4518,10 +4869,10 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
{
struct ice_aqc_add_rdma_qset_data *list;
struct ice_aqc_add_rdma_qset *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 i, sum_size = 0;
- cmd = &desc.params.add_rdma_qset;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
@@ -4539,213 +4890,54 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
if (buf_size != sum_size)
return -EINVAL;
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_qset_grps = num_qset_grps;
return ice_aq_send_cmd(hw, &desc, qset_list, buf_size, cd);
}
-/* End of FW Admin Queue command wrappers */
-
-/**
- * ice_pack_ctx_byte - write a byte to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u8 src_byte, dest_byte, mask;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- src_byte = *from;
- src_byte <<= shift_width;
- src_byte &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_byte, dest, sizeof(dest_byte));
-
- dest_byte &= ~mask; /* get the bits not changing */
- dest_byte |= src_byte; /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_byte, sizeof(dest_byte));
-}
-
-/**
- * ice_pack_ctx_word - write a word to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u16 src_word, mask;
- __le16 dest_word;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_word = *(u16 *)from;
- src_word <<= shift_width;
- src_word &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_word, dest, sizeof(dest_word));
-
- dest_word &= ~(cpu_to_le16(mask)); /* get the bits not changing */
- dest_word |= cpu_to_le16(src_word); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_word, sizeof(dest_word));
-}
-
-/**
- * ice_pack_ctx_dword - write a dword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
- */
-static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- u32 src_dword, mask;
- __le32 dest_dword;
- u8 *from, *dest;
- u16 shift_width;
-
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK(ce_info->width - 1 + shift_width, shift_width);
-
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_dword = *(u32 *)from;
- src_dword <<= shift_width;
- src_dword &= mask;
-
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
-
- memcpy(&dest_dword, dest, sizeof(dest_dword));
-
- dest_dword &= ~(cpu_to_le32(mask)); /* get the bits not changing */
- dest_dword |= cpu_to_le32(src_dword); /* add in the new bits */
-
- /* put it all back */
- memcpy(dest, &dest_dword, sizeof(dest_dword));
-}
-
/**
- * ice_pack_ctx_qword - write a qword to a packed context structure
- * @src_ctx: unpacked source context structure
- * @dest_ctx: packed destination context data
- * @ce_info: context element description
+ * ice_aq_set_txtimeq - set Tx time queues
+ * @hw: pointer to the hardware structure
+ * @txtimeq: first Tx time queue id to configure
+ * @q_count: number of queues to configure
+ * @txtime_qg: queue group to be set
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set Tx Time queue (0x0C35)
+ * Return: 0 on success or negative value on failure.
*/
-static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg, u16 buf_size,
+ struct ice_sq_cd *cd)
{
- u64 src_qword, mask;
- __le64 dest_qword;
- u8 *from, *dest;
- u16 shift_width;
+ struct ice_aqc_set_txtimeqs *cmd;
+ struct libie_aq_desc desc;
+ u16 size;
- /* copy from the next struct field */
- from = src_ctx + ce_info->offset;
-
- /* prepare the bits and mask */
- shift_width = ce_info->lsb % 8;
- mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width);
+ if (!txtime_qg || txtimeq > ICE_TXTIME_MAX_QUEUE ||
+ q_count < 1 || q_count > ICE_SET_TXTIME_MAX_Q_AMOUNT)
+ return -EINVAL;
- /* don't swizzle the bits until after the mask because the mask bits
- * will be in a different bit position on big endian machines
- */
- src_qword = *(u64 *)from;
- src_qword <<= shift_width;
- src_qword &= mask;
+ size = struct_size(txtime_qg, txtimeqs, q_count);
+ if (buf_size != size)
+ return -EINVAL;
- /* get the current bits from the target bit string */
- dest = dest_ctx + (ce_info->lsb / 8);
+ cmd = libie_aq_raw(&desc);
- memcpy(&dest_qword, dest, sizeof(dest_qword));
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_txtimeqs);
- dest_qword &= ~(cpu_to_le64(mask)); /* get the bits not changing */
- dest_qword |= cpu_to_le64(src_qword); /* add in the new bits */
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
- /* put it all back */
- memcpy(dest, &dest_qword, sizeof(dest_qword));
+ cmd->q_id = cpu_to_le16(txtimeq);
+ cmd->q_amount = cpu_to_le16(q_count);
+ return ice_aq_send_cmd(hw, &desc, txtime_qg, buf_size, cd);
}
-/**
- * ice_set_ctx - set context bits in packed structure
- * @hw: pointer to the hardware structure
- * @src_ctx: pointer to a generic non-packed context structure
- * @dest_ctx: pointer to memory for the packed structure
- * @ce_info: List of Rx context elements
- */
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
-{
- int f;
-
- for (f = 0; ce_info[f].width; f++) {
- /* We have to deal with each element of the FW response
- * using the correct size so that we are correct regardless
- * of the endianness of the machine.
- */
- if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
- ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
- f, ce_info[f].width, ce_info[f].size_of);
- continue;
- }
- switch (ce_info[f].size_of) {
- case sizeof(u8):
- ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u16):
- ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u32):
- ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- case sizeof(u64):
- ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]);
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
+/* End of FW Admin Queue command wrappers */
/**
* ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
@@ -5156,6 +5348,32 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
/**
+ * ice_aq_get_cgu_input_pin_measure - get input pin signal measurements
+ * @hw: pointer to the HW struct
+ * @dpll_idx: index of dpll to be measured
+ * @meas: array to be filled with results
+ * @meas_num: max number of results array can hold
+ *
+ * Get CGU measurements (0x0C59) of phase and frequency offsets for input
+ * pins on given dpll.
+ *
+ * Return: 0 on success or negative value on failure.
+ */
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num)
+{
+ struct ice_aqc_get_cgu_input_measure *cmd;
+ struct libie_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_measure);
+ cmd = libie_aq_raw(&desc);
+ cmd->dpll_idx_opt = dpll_idx & ICE_AQC_GET_CGU_IN_MEAS_DPLL_IDX_M;
+
+ return ice_aq_send_cmd(hw, &desc, meas, meas_num * sizeof(*meas), NULL);
+}
+
+/**
* ice_aq_get_cgu_abilities - get cgu abilities
* @hw: pointer to the HW struct
* @abilities: CGU abilities
@@ -5167,7 +5385,7 @@ int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
@@ -5190,10 +5408,10 @@ ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
- cmd = &desc.params.set_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
cmd->flags1 = flags1;
cmd->flags2 = flags2;
@@ -5222,11 +5440,11 @@ ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
{
struct ice_aqc_get_cgu_input_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
- cmd = &desc.params.get_cgu_input_config;
+ cmd = libie_aq_raw(&desc);
cmd->input_idx = input_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5265,10 +5483,10 @@ ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
u8 src_sel, u32 freq, s32 phase_delay)
{
struct ice_aqc_set_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
- cmd = &desc.params.set_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
cmd->flags = flags;
cmd->src_sel = src_sel;
@@ -5295,11 +5513,11 @@ ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
u8 *src_sel, u32 *freq, u32 *src_freq)
{
struct ice_aqc_get_cgu_output_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int ret;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
- cmd = &desc.params.get_cgu_output_config;
+ cmd = libie_aq_raw(&desc);
cmd->output_idx = output_idx;
ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5336,11 +5554,11 @@ ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
u8 *eec_mode)
{
struct ice_aqc_get_cgu_dpll_status *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
- cmd = &desc.params.get_cgu_dpll_status;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5374,10 +5592,10 @@ ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
u8 config, u8 eec_mode)
{
struct ice_aqc_set_cgu_dpll_config *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
- cmd = &desc.params.set_cgu_dpll_config;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_state = ref_state;
cmd->config = config;
@@ -5401,10 +5619,10 @@ ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 ref_priority)
{
struct ice_aqc_set_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
- cmd = &desc.params.set_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
cmd->ref_priority = ref_priority;
@@ -5427,11 +5645,11 @@ ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
u8 *ref_prio)
{
struct ice_aqc_get_cgu_ref_prio *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
- cmd = &desc.params.get_cgu_ref_prio;
+ cmd = libie_aq_raw(&desc);
cmd->dpll_num = dpll_num;
cmd->ref_idx = ref_idx;
@@ -5457,11 +5675,11 @@ ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
u32 *cgu_fw_ver)
{
struct ice_aqc_get_cgu_info *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
- cmd = &desc.params.get_cgu_info;
+ cmd = libie_aq_raw(&desc);
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status) {
@@ -5488,11 +5706,11 @@ ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
u32 *freq)
{
struct ice_aqc_set_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
- cmd = &desc.params.set_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = phy_output;
cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
@@ -5521,11 +5739,11 @@ ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
u8 *flags, u16 *node_handle)
{
struct ice_aqc_get_phy_rec_clk_out *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
- cmd = &desc.params.get_phy_rec_clk_out;
+ cmd = libie_aq_raw(&desc);
cmd->phy_output = *phy_output;
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
@@ -5553,11 +5771,11 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
struct ice_aqc_get_sensor_reading_resp *data)
{
struct ice_aqc_get_sensor_reading *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sensor_reading);
- cmd = &desc.params.get_sensor_reading;
+ cmd = libie_aq_raw(&desc);
#define ICE_INTERNAL_TEMP_SENSOR_FORMAT 0
#define ICE_INTERNAL_TEMP_SENSOR 0
cmd->sensor = ICE_INTERNAL_TEMP_SENSOR;
@@ -5565,7 +5783,7 @@ int ice_aq_get_sensor_reading(struct ice_hw *hw,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!status)
- memcpy(data, &desc.params.get_sensor_reading_resp,
+ memcpy(data, &desc.params.raw,
sizeof(*data));
return status;
@@ -5762,13 +5980,13 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
if (!data)
return -EINVAL;
@@ -5785,7 +6003,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
struct ice_aqc_read_i2c_resp *resp;
u8 i;
- resp = &desc.params.read_i2c_resp;
+ resp = libie_aq_raw(&desc);
for (i = 0; i < data_size; i++) {
*data = resp->i2c_data[i];
data++;
@@ -5817,12 +6035,12 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc = { 0 };
+ struct libie_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
u8 data_size;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_i2c);
- cmd = &desc.params.read_write_i2c;
+ cmd = libie_aq_raw(&desc);
data_size = FIELD_GET(ICE_AQC_I2C_DATA_SIZE_M, params);
@@ -5841,6 +6059,95 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
+ * ice_get_pca9575_handle - find and return the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value.
+ *
+ * Return: 0 on success, -ENXIO when there's no PCA9575 present.
+ */
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct libie_aq_desc desc;
+ int err;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -ENXIO;
+
+ /* If handle was not detected read it from the netlist */
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ cmd = libie_aq_raw(&desc);
+ cmd->addr.topo_params.node_type_ctx =
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL;
+ cmd->addr.topo_params.index = idx;
+
+ err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (err)
+ return -ENXIO;
+
+ /* Verify if we found the right IO expander type */
+ if (cmd->node_part_num != ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -ENXIO;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(cmd->addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_pca9575_reg - read the register from the PCA9575 controller
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
+/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
@@ -5854,11 +6161,11 @@ int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
cmd->gpio_val = value ? 1 : 0;
@@ -5881,12 +6188,12 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd)
{
+ struct libie_aq_desc desc;
struct ice_aqc_gpio *cmd;
- struct ice_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
- cmd = &desc.params.read_write_gpio;
+ cmd = libie_aq_raw(&desc);
cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
cmd->gpio_num = pin_idx;
@@ -6023,6 +6330,44 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
}
/**
+ * ice_is_fw_health_report_supported - checks if firmware supports health events
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if firmware supports health status reports,
+ * false otherwise
+ */
+bool ice_is_fw_health_report_supported(struct ice_hw *hw)
+{
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_HEALTH_REPORT_MAJ,
+ ICE_FW_API_HEALTH_REPORT_MIN,
+ ICE_FW_API_HEALTH_REPORT_PATCH);
+}
+
+/**
+ * ice_aq_set_health_status_cfg - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF. The supported event types are: PF-specific, all PFs, and global.
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source)
+{
+ struct ice_aqc_set_health_status_cfg *cmd;
+ struct libie_aq_desc desc;
+
+ cmd = libie_aq_raw(&desc);
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_health_status_cfg);
+
+ cmd->event_source = event_source;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
@@ -6037,16 +6382,16 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_local_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
- desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16((u16)LIBIE_AQ_FLAG_RD);
desc.datalen = cpu_to_le16(buf_size);
cmd->type = mib_type;
@@ -6072,16 +6417,22 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
/**
* ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
* @hw: pointer to HW struct
- * @vsi_num: absolute HW index for VSI
+ * @vsi: VSI to add the filter to
* @add: boolean for if adding or removing a filter
+ *
+ * Return: 0 on success, -EOPNOTSUPP if the operation cannot be performed
+ * with this HW or VSI, otherwise an error corresponding to
+ * the AQ transaction result.
*/
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_filter_ctrl;
+ if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
+
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
@@ -6090,7 +6441,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
else
cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
- cmd->vsi_num = cpu_to_le16(vsi_num);
+ cmd->vsi_num = cpu_to_le16(vsi->vsi_num);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
@@ -6101,7 +6452,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
int ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
@@ -6157,3 +6508,86 @@ u32 ice_get_link_speed(u16 index)
return ice_aq_to_link_speed[index];
}
+
+/**
+ * ice_get_dest_cgu - get destination CGU dev for given HW
+ * @hw: pointer to the HW struct
+ *
+ * Get CGU client id for CGU register read/write operations.
+ *
+ * Return: CGU device id to use in SBQ transactions.
+ */
+static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
+{
+ /* On dual complex E825 only complex 0 has functional CGU powering all
+ * the PHYs.
+ * SBQ destination device cgu points to CGU on a current complex and to
+ * access primary CGU from the secondary complex, the driver should use
+ * cgu_peer as a destination device.
+ */
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
+ !ice_is_primary(hw))
+ return ice_sbq_dev_cgu_peer;
+ return ice_sbq_dev_cgu;
+}
+
+/**
+ * ice_read_cgu_reg - Read a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to read
+ * @val: Storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU.
+ */
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg - Write a CGU register
+ * @hw: Pointer to the HW struct
+ * @addr: Register address to write
+ * @val: Value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E82X devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU.
+ */
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .dest_dev = ice_get_dest_cgu(hw),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, LIBIE_AQ_FLAG_RD);
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 27208a60cece..e700ac0dc347 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -39,6 +39,47 @@
#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+#define ICE_CGU_R9 0x24
+#define ICE_CGU_R9_TIME_REF_FREQ_SEL GENMASK(2, 0)
+#define ICE_CGU_R9_CLK_EREF0_EN BIT(4)
+#define ICE_CGU_R9_TIME_REF_EN BIT(5)
+#define ICE_CGU_R9_TIME_SYNC_EN BIT(6)
+#define ICE_CGU_R9_ONE_PPS_OUT_EN BIT(7)
+#define ICE_CGU_R9_ONE_PPS_OUT_AMP GENMASK(19, 18)
+
+#define ICE_CGU_R16 0x40
+#define ICE_CGU_R16_TSPLL_CK_REFCLKFREQ GENMASK(31, 24)
+
+#define ICE_CGU_R19 0x4C
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X GENMASK(7, 0)
+#define ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 GENMASK(9, 0)
+#define ICE_CGU_R19_TSPLL_NDIVRATIO GENMASK(19, 16)
+
+#define ICE_CGU_R22 0x58
+#define ICE_CGU_R22_TIME1588CLK_DIV GENMASK(23, 20)
+#define ICE_CGU_R22_TIME1588CLK_DIV2 BIT(30)
+
+#define ICE_CGU_R23 0x5C
+#define ICE_CGU_R24 0x60
+#define ICE_CGU_R24_FBDIV_FRAC GENMASK(21, 0)
+#define ICE_CGU_R23_R24_TSPLL_ENABLE BIT(24)
+#define ICE_CGU_R23_R24_REF1588_CK_DIV GENMASK(30, 27)
+#define ICE_CGU_R23_R24_TIME_REF_SEL BIT(31)
+
+#define ICE_CGU_BW_TDC 0x31C
+#define ICE_CGU_BW_TDC_PLLLOCK_SEL GENMASK(30, 29)
+
+#define ICE_CGU_RO_LOCK 0x3F0
+#define ICE_CGU_RO_LOCK_TRUE_LOCK BIT(12)
+#define ICE_CGU_RO_LOCK_UNLOCK BIT(13)
+
+#define ICE_CGU_CNTR_BIST 0x344
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 BIT(15)
+#define ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1 BIT(16)
+
+#define ICE_CGU_RO_BWM_LF 0x370
+#define ICE_CGU_RO_BWM_LF_TRUE_LOCK BIT(12)
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
@@ -68,7 +109,7 @@ bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
int ice_get_caps(struct ice_hw *hw);
@@ -77,6 +118,12 @@ void ice_set_safe_mode_caps(struct ice_hw *hw);
int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
+int ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
+ u32 rxq_index);
+int ice_read_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
+int ice_write_txq_ctx(struct ice_hw *hw, struct ice_tlan_ctx *tlan_ctx,
+ u32 txq_index);
int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
@@ -91,15 +138,14 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
-extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info);
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode);
+
+void ice_pack_txq_ctx(const struct ice_tlan_ctx *ctx, ice_txq_ctx_buf_t *buf);
extern struct mutex ice_global_cfg_lock_sw;
int
-ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
+ice_aq_send_cmd(struct ice_hw *hw, struct libie_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
@@ -113,7 +159,6 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-bool ice_is_pf_c827(struct ice_hw *hw);
bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
bool ice_is_cgu_in_netlist(struct ice_hw *hw);
@@ -133,7 +178,6 @@ int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
bool ice_is_generic_mac(struct ice_hw *hw);
-bool ice_is_e810(struct ice_hw *hw);
int ice_clear_pf_cfg(struct ice_hw *hw);
int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -143,6 +187,8 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+bool ice_is_fw_health_report_supported(struct ice_hw *hw);
+int ice_aq_set_health_status_cfg(struct ice_hw *hw, u8 event_source);
int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
u8 serdes_num, int *output);
int
@@ -193,6 +239,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
+int ice_get_phy_lane_number(struct ice_hw *hw);
int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
@@ -223,12 +270,21 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
int
ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
- struct ice_sq_cd *cd);
+ u8 mode, struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+int
+ice_aq_set_txtimeq(struct ice_hw *hw, u16 txtimeq, u8 q_count,
+ struct ice_aqc_set_txtime_qgrp *txtime_qg,
+ u16 buf_size, struct ice_sq_cd *cd);
+void ice_pack_txtime_ctx(const struct ice_txtime_ctx *ctx,
+ ice_txtime_ctx_buf_t *buf);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
+int ice_aq_get_cgu_input_pin_measure(struct ice_hw *hw, u8 dpll_idx,
+ struct ice_cgu_input_measure *meas,
+ u16 meas_num);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -275,10 +331,6 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
-bool ice_is_e810t(struct ice_hw *hw);
-bool ice_is_e822(struct ice_hw *hw);
-bool ice_is_e823(struct ice_hw *hw);
-bool ice_is_e825c(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -294,8 +346,7 @@ int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-int
-ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add);
int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
@@ -305,5 +356,9 @@ int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
+int ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle);
+int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
+int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val);
+int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index e3959ad442a2..dcb837cadd18 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -90,7 +90,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_sq_entries * sizeof(struct libie_aq_desc);
cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->sq.desc_buf.pa,
@@ -110,7 +110,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
+ size_t size = cq->num_rq_entries * sizeof(struct libie_aq_desc);
cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
&cq->rq.desc_buf.pa,
@@ -159,7 +159,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* allocate the mapped buffers */
for (i = 0; i < cq->num_rq_entries; i++) {
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
bi = &cq->rq.r.rq_bi[i];
@@ -173,9 +173,9 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* now configure the descriptors for use */
desc = ICE_CTL_Q_DESC(cq->rq, i);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->opcode = 0;
/* This is in accordance with control queue design, there is no
* register for buffer size configuration
@@ -858,7 +858,7 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
desc = ICE_CTL_Q_DESC(*sq, ntc);
@@ -912,7 +912,7 @@ static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
void *desc, void *buf, u16 buf_len, bool response)
{
- struct ice_aq_desc *cq_desc = desc;
+ struct libie_aq_desc *cq_desc = desc;
u16 datalen, flags;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
@@ -939,7 +939,8 @@ static void ice_debug_cq(struct ice_hw *hw, struct ice_ctl_q_info *cq,
* by the DD and/or CMP flag set or a command with the RD flag set.
*/
if (buf && cq_desc->datalen &&
- (flags & (ICE_AQ_FLAG_DD | ICE_AQ_FLAG_CMP | ICE_AQ_FLAG_RD))) {
+ (flags & (LIBIE_AQ_FLAG_DD | LIBIE_AQ_FLAG_CMP |
+ LIBIE_AQ_FLAG_RD))) {
char prefix[] = KBUILD_MODNAME " 0x12341234 0x12341234 ";
sprintf(prefix, KBUILD_MODNAME " 0x%08X 0x%08X ",
@@ -992,11 +993,11 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*/
int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
- struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct libie_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_dma_mem *dma_buf = NULL;
- struct ice_aq_desc *desc_on_ring;
+ struct libie_aq_desc *desc_on_ring;
bool cmd_completed = false;
int status = 0;
u16 retval = 0;
@@ -1007,7 +1008,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
return -EBUSY;
mutex_lock(&cq->sq_lock);
- cq->sq_last_status = ICE_AQ_RC_OK;
+ cq->sq_last_status = LIBIE_AQ_RC_OK;
if (!cq->sq.count) {
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Send queue not initialized.\n");
@@ -1028,9 +1029,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
goto sq_send_command_error;
}
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
}
val = rd32(hw, cq->sq.head);
@@ -1112,9 +1113,9 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval &= 0xff;
}
cmd_completed = true;
- if (!status && retval != ICE_AQ_RC_OK)
+ if (!status && retval != LIBIE_AQ_RC_OK)
status = -EIO;
- cq->sq_last_status = (enum ice_aq_err)retval;
+ cq->sq_last_status = (enum libie_aq_err)retval;
}
ice_debug(hw, ICE_DBG_AQ_MSG, "ATQ: desc and buffer writeback:\n");
@@ -1149,12 +1150,12 @@ sq_send_command_error:
*
* Fill the desc with default values
*/
-void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
+void ice_fill_dflt_direct_cmd_desc(struct libie_aq_desc *desc, u16 opcode)
{
/* zero out the desc */
memset(desc, 0, sizeof(*desc));
desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_SI);
}
/**
@@ -1172,9 +1173,9 @@ int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
+ enum libie_aq_err rq_last_status;
u16 ntc = cq->rq.next_to_clean;
- enum ice_aq_err rq_last_status;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
struct ice_dma_mem *bi;
int ret_code = 0;
u16 desc_idx;
@@ -1207,9 +1208,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
desc = ICE_CTL_Q_DESC(cq->rq, ntc);
desc_idx = ntc;
- rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
+ rq_last_status = (enum libie_aq_err)le16_to_cpu(desc->retval);
flags = le16_to_cpu(desc->flags);
- if (flags & ICE_AQ_FLAG_ERR) {
+ if (flags & LIBIE_AQ_FLAG_ERR) {
ret_code = -EIO;
ice_debug(hw, ICE_DBG_AQ_MSG, "Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode), rq_last_status);
@@ -1230,9 +1231,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
bi = &cq->rq.r.rq_bi[ntc];
memset(desc, 0, sizeof(*desc));
- desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
- if (cq->rq_buf_size > ICE_AQ_LG_BUF)
- desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
+ desc->flags = cpu_to_le16(LIBIE_AQ_FLAG_BUF);
+ if (cq->rq_buf_size > LIBIE_AQ_LG_BUF)
+ desc->flags |= cpu_to_le16(LIBIE_AQ_FLAG_LB);
desc->datalen = cpu_to_le16(bi->size);
desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index ca97b7365a1b..788040dd662e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -12,7 +12,7 @@
#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
- (&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
+ (&(((struct libie_aq_desc *)((R).desc_buf.va))[i]))
#define ICE_CTL_Q_DESC_UNUSED(R) \
((u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
@@ -76,12 +76,12 @@ struct ice_ctl_q_ring {
/* sq transaction details */
struct ice_sq_cd {
- struct ice_aq_desc *wb_desc;
+ struct libie_aq_desc *wb_desc;
};
/* rq event information */
struct ice_rq_event_info {
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 msg_len;
u16 buf_len;
u8 *msg_buf;
@@ -96,7 +96,7 @@ struct ice_ctl_q_info {
u16 num_sq_entries; /* send queue depth */
u16 rq_buf_size; /* receive queue buffer size */
u16 sq_buf_size; /* send queue buffer size */
- enum ice_aq_err sq_last_status; /* last status on send queue */
+ enum libie_aq_err sq_last_status; /* last status on send queue */
struct mutex sq_lock; /* Send queue lock */
struct mutex rq_lock; /* Receive queue lock */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 74418c445cc4..abea84f14658 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -24,10 +24,10 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.lldp_get_mib;
+ cmd = libie_aq_raw(&desc);
if (buf_size == 0 || !buf)
return -EINVAL;
@@ -64,9 +64,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_mib_change *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_set_event;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_mib_change);
@@ -95,9 +95,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_stop;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_stop);
@@ -121,9 +121,9 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
int ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.lldp_start;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_start);
@@ -677,11 +677,11 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 opcode;
int status;
- cmd = &desc.params.lldp_agent_ctrl;
+ cmd = libie_aq_raw(&desc);
opcode = ice_aqc_opc_lldp_stop_start_specific_agent;
@@ -714,7 +714,7 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cee_dcb_cfg);
@@ -733,13 +733,13 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return -EINVAL;
- cmd = &desc.params.set_query_pfc_mode;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
@@ -914,7 +914,7 @@ static int ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
- if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
ret = 0;
out:
@@ -941,7 +941,7 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
/* CEE mode */
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
- } else if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) {
+ } else if (pi->hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT) {
/* CEE mode not enabled try querying IEEE data */
dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
@@ -965,7 +965,7 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_aqc_lldp_get_mib *mib;
u8 change_type, dcbx_mode;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
@@ -1288,7 +1288,7 @@ ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype);
/* bytes 0 - 63 - IPv4 DSCP2UP LUT */
- for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ for (i = 0; i < DSCP_MAX; i++) {
/* IPv4 mapping */
buf[i] = dcbcfg->dscp_map[i];
/* IPv6 mapping */
@@ -1537,12 +1537,12 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!pi)
return -EINVAL;
- cmd = &desc.params.port_ets;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_port_ets);
cmd->port_teid = pi->root->info.node_teid;
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index a7c510832824..9fc8681cc58e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -352,8 +352,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
+ struct iidc_rdma_event *event;
int ret = ICE_DCB_NO_HW_CHG;
- struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -405,7 +405,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
- set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_BEFORE_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
@@ -740,7 +740,9 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- struct iidc_event *event;
+ struct iidc_rdma_priv_dev_info *privd;
+ struct iidc_rdma_core_dev_info *cdev;
+ struct iidc_rdma_event *event;
u8 tc_map = 0;
int v, ret;
@@ -783,13 +785,17 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- if (!locked) {
+
+ cdev = pf->cdev_info;
+ if (cdev && !locked) {
+ privd = cdev->iidc_priv;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
/* Notify the AUX drivers that TC change is finished */
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -846,7 +852,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- ice_cfg_sw_lldp(pf_vsi, false, true);
+ ice_cfg_sw_rx_lldp(pf, true);
pf->dcbx_cap = ice_dcb_get_mode(port_info, true);
return 0;
@@ -945,6 +951,37 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_setup_dcb_qos_info - Setup DCB QoS information
+ * @pf: ptr to ice_pf
+ * @qos_info: QoS param instance
+ */
+void ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ if (!pf || !qos_info)
+ return;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos_info->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos_info->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos_info->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+
+ qos_info->pfc_mode = dcbx_cfg->pfc_mode;
+ if (qos_info->pfc_mode == IIDC_DSCP_PFC_MODE)
+ for (i = 0; i < DSCP_MAX; i++)
+ qos_info->dscp_map[i] = dcbx_cfg->dscp_map[i];
+}
+
+/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
@@ -983,7 +1020,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
pi = pf->hw.port_info;
- mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+ mib = libie_aq_raw(&event->desc);
/* Ignore if event is not for Nearest Bridge */
mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 800879a88c5e..da9ba814b4e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -31,6 +31,9 @@ void
ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
+ice_setup_dcb_qos_info(struct ice_pf *pf,
+ struct iidc_rdma_qos_params *qos_info);
+void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
/**
@@ -134,5 +137,11 @@ static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
+static inline void
+ice_setup_dcb_qos_info(struct ice_pf *pf, struct iidc_rdma_qos_params *qos_info)
+{
+ qos_info->num_tc = 1;
+ qos_info->tc_info[0].rel_bw = 100;
+}
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 6d50b90a7359..a10c1c8d8697 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -754,7 +754,7 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
if (!ice_is_feature_supported(pf, ICE_F_DSCP))
return -EOPNOTSUPP;
- if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ if (app->protocol >= DSCP_MAX) {
netdev_err(netdev, "DSCP value 0x%04X out of range\n",
app->protocol);
return -EINVAL;
@@ -931,7 +931,7 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
/* if the last DSCP mapping just got deleted, need to switch
* to L2 VLAN QoS mode
*/
- if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ if (bitmap_empty(new_cfg->dscp_mapped, DSCP_MAX) &&
new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
ret = ice_aq_set_pfc_mode(&pf->hw,
ICE_AQC_PFC_VLAN_BASED_PFC,
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 03988be03729..3b2d9c436979 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1101,16 +1101,16 @@ struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
return &bld->buf;
}
-static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum libie_aq_err aq_err)
{
switch (aq_err) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
+ case LIBIE_AQ_RC_ENOSEC:
+ case LIBIE_AQ_RC_EBADSIG:
return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
- case ICE_AQ_RC_ESVN:
+ case LIBIE_AQ_RC_ESVN:
return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
+ case LIBIE_AQ_RC_EBADMAN:
+ case LIBIE_AQ_RC_EBADBUF:
return ICE_DDP_PKG_LOAD_ERROR;
default:
return ICE_DDP_PKG_ERR;
@@ -1180,7 +1180,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1188,9 +1188,9 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1259,7 +1259,7 @@ static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx,
struct ice_buf_hdr *prev_hunk = ctx->hdr;
struct ice_hw *hw = ctx->hw;
bool prev_was_last = !hunk;
- enum ice_aq_err aq_err;
+ enum libie_aq_err aq_err;
u32 offset, info;
int attempt, err;
@@ -1278,7 +1278,8 @@ static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx,
prev_was_last, &offset, &info, NULL);
aq_err = hw->adminq.sq_last_status;
- if (aq_err != ICE_AQ_RC_ENOSEC && aq_err != ICE_AQ_RC_EBADSIG)
+ if (aq_err != LIBIE_AQ_RC_ENOSEC &&
+ aq_err != LIBIE_AQ_RC_EBADSIG)
break;
}
@@ -1537,7 +1538,7 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
static enum ice_ddp_state
ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
- enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum libie_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
struct ice_ddp_send_ctx ctx = { .hw = hw };
int status;
@@ -1687,7 +1688,7 @@ static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
@@ -1711,7 +1712,7 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (error_offset)
@@ -1719,9 +1720,9 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
if (error_info)
*error_info = 0;
- cmd = &desc.params.download_pkg;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (last_buf)
cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
@@ -1753,10 +1754,10 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
}
@@ -2301,6 +2302,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return ICE_DDP_PKG_ERR;
buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+ if (!buf_copy)
+ return ICE_DDP_PKG_ERR;
state = ice_init_pkg(hw, buf_copy, len);
if (!ice_is_init_pkg_successful(state)) {
@@ -2333,10 +2336,10 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
struct ice_sq_cd *cd, u8 *flags, bool set)
{
struct ice_aqc_get_set_tx_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.get_set_tx_topo;
+ cmd = libie_aq_raw(&desc);
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
@@ -2345,22 +2348,22 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
- if (ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
- }
- if (!ice_is_e825c(hw))
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ if (hw->mac_type == ICE_MAC_E810 ||
+ hw->mac_type == ICE_MAC_GENERIC)
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ }
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
return status;
/* read the return flag values (first byte) for get operation */
if (!set && flags)
- *flags = desc.params.get_set_tx_topo.set_flags;
+ *flags = cmd->set_flags;
return 0;
}
@@ -2374,7 +2377,13 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* The function will apply the new Tx topology from the package buffer
* if available.
*
- * Return: zero when update was successful, negative values otherwise.
+ * Return:
+ * * 0 - Successfully applied topology configuration.
+ * * -EBUSY - Failed to acquire global configuration lock.
+ * * -EEXIST - Topology configuration has already been applied.
+ * * -EIO - Unable to apply topology configuration.
+ * * -ENODEV - Failed to re-initialize device after applying configuration.
+ * * Other negative error codes indicate unexpected failures.
*/
int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
{
@@ -2407,7 +2416,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
- return status;
+ return -EIO;
}
/* Is default topology already applied ? */
@@ -2494,31 +2503,45 @@ update_topo:
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
- return status;
+ return -EBUSY;
}
/* Check if reset was triggered already. */
reg = rd32(hw, GLGEN_RSTAT);
if (reg & GLGEN_RSTAT_DEVSTATE_M) {
- /* Reset is in progress, re-init the HW again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
ice_check_reset(hw);
- return 0;
+ /* Reset is in progress, re-init the HW again */
+ goto reinit_hw;
}
/* Set new topology */
status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
if (status) {
- ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
- return status;
+ ice_debug(hw, ICE_DBG_INIT, "Failed to set Tx topology, status %pe\n",
+ ERR_PTR(status));
+ /* only report -EIO here as the caller checks the error value
+ * and reports an informational error message informing that
+ * the driver failed to program Tx topology.
+ */
+ status = -EIO;
}
- /* New topology is updated, delay 1 second before issuing the CORER */
+ /* Even if Tx topology config failed, we need to CORE reset here to
+ * clear the global configuration lock. Delay 1 second to allow
+ * hardware to settle then issue a CORER
+ */
msleep(1000);
ice_reset(hw, ICE_RESET_CORER);
- /* CORER will clear the global lock, so no explicit call
- * required for release.
- */
+ ice_check_reset(hw);
+
+reinit_hw:
+ /* Since we triggered a CORER, re-initialize hardware */
+ ice_deinit_hw(hw);
+ if (ice_init_hw(hw)) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to re-init hardware after setting Tx topology\n");
+ return -ENODEV;
+ }
- return 0;
+ return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c
index 9fc0fd95a13d..f450250fc827 100644
--- a/drivers/net/ethernet/intel/ice/ice_debugfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c
@@ -1,647 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022, Intel Corporation. */
-#include <linux/fs.h>
#include <linux/debugfs.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
#include "ice.h"
static struct dentry *ice_debugfs_root;
-/* create a define that has an extra module that doesn't really exist. this
- * is so we can add a module 'all' to easily enable/disable all the modules
- */
-#define ICE_NR_FW_LOG_MODULES (ICE_AQC_FW_LOG_ID_MAX + 1)
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_aqc_fw_logging_mod
- */
-static const char * const ice_fwlog_module_string[] = {
- "general",
- "ctrl",
- "link",
- "link_topo",
- "dnl",
- "i2c",
- "sdp",
- "mdio",
- "adminq",
- "hdma",
- "lldp",
- "dcbx",
- "dcb",
- "xlr",
- "nvm",
- "auth",
- "vpd",
- "iosf",
- "parser",
- "sw",
- "scheduler",
- "txq",
- "rsvd",
- "post",
- "watchdog",
- "task_dispatch",
- "mng",
- "synce",
- "health",
- "tsdrv",
- "pfreg",
- "mdlver",
- "all",
-};
-
-/* the ordering in this array is important. it matches the ordering of the
- * values in the FW so the index is the same value as in ice_fwlog_level
- */
-static const char * const ice_fwlog_level_string[] = {
- "none",
- "error",
- "warning",
- "normal",
- "verbose",
-};
-
-static const char * const ice_fwlog_log_size[] = {
- "128K",
- "256K",
- "512K",
- "1M",
- "2M",
-};
-
-/**
- * ice_fwlog_print_module_cfg - print current FW logging module configuration
- * @hw: pointer to the HW structure
- * @module: module to print
- * @s: the seq file to put data into
- */
-static void
-ice_fwlog_print_module_cfg(struct ice_hw *hw, int module, struct seq_file *s)
-{
- struct ice_fwlog_cfg *cfg = &hw->fwlog_cfg;
- struct ice_fwlog_module_entry *entry;
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- entry = &cfg->module_entries[module];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- } else {
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- entry = &cfg->module_entries[i];
-
- seq_printf(s, "\tModule: %s, Log Level: %s\n",
- ice_fwlog_module_string[entry->module_id],
- ice_fwlog_level_string[entry->log_level]);
- }
- }
-}
-
-static int ice_find_module_by_dentry(struct ice_pf *pf, struct dentry *d)
-{
- int i, module;
-
- module = -1;
- /* find the module based on the dentry */
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- if (d == pf->ice_debugfs_pf_fwlog_modules[i]) {
- module = i;
- break;
- }
- }
-
- return module;
-}
-
-/**
- * ice_debugfs_module_show - read from 'module' file
- * @s: the opened file
- * @v: pointer to the offset
- */
-static int ice_debugfs_module_show(struct seq_file *s, void *v)
-{
- const struct file *filp = s->file;
- struct dentry *dentry;
- struct ice_pf *pf;
- int module;
-
- dentry = file_dentry(filp);
- pf = s->private;
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(ice_pf_to_dev(pf), "unknown module\n");
- return -EINVAL;
- }
-
- ice_fwlog_print_module_cfg(&pf->hw, module, s);
-
- return 0;
-}
-
-static int ice_debugfs_module_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, ice_debugfs_module_show, inode->i_private);
-}
-
-/**
- * ice_debugfs_module_write - write into 'module' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_module_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = file_inode(filp)->i_private;
- struct dentry *dentry = file_dentry(filp);
- struct device *dev = ice_pf_to_dev(pf);
- char user_val[16], *cmd_buf;
- int module, log_level, cnt;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 8)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- module = ice_find_module_by_dentry(pf, dentry);
- if (module < 0) {
- dev_info(dev, "unknown module\n");
- return -EINVAL;
- }
-
- cnt = sscanf(cmd_buf, "%s", user_val);
- if (cnt != 1)
- return -EINVAL;
-
- log_level = sysfs_match_string(ice_fwlog_level_string, user_val);
- if (log_level < 0) {
- dev_info(dev, "unknown log level '%s'\n", user_val);
- return -EINVAL;
- }
-
- if (module != ICE_AQC_FW_LOG_ID_MAX) {
- ice_pf_fwlog_update_module(pf, log_level, module);
- } else {
- /* the module 'all' is a shortcut so that we can set
- * all of the modules to the same level quickly
- */
- int i;
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++)
- ice_pf_fwlog_update_module(pf, log_level, i);
- }
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_module_fops = {
- .owner = THIS_MODULE,
- .open = ice_debugfs_module_open,
- .read = seq_read,
- .release = single_release,
- .write = ice_debugfs_module_write,
-};
-
-/**
- * ice_debugfs_nr_messages_read - read from 'nr_messages' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_nr_messages_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%d\n",
- hw->fwlog_cfg.log_resolution);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_nr_messages_write - write into 'nr_messages' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- s16 nr_messages;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 4)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtos16(user_val, 0, &nr_messages);
- if (ret)
- return ret;
-
- if (nr_messages < ICE_AQC_FW_LOG_MIN_RESOLUTION ||
- nr_messages > ICE_AQC_FW_LOG_MAX_RESOLUTION) {
- dev_err(dev, "Invalid FW log number of messages %d, value must be between %d - %d\n",
- nr_messages, ICE_AQC_FW_LOG_MIN_RESOLUTION,
- ICE_AQC_FW_LOG_MAX_RESOLUTION);
- return -EINVAL;
- }
-
- hw->fwlog_cfg.log_resolution = nr_messages;
-
- return count;
-}
-
-static const struct file_operations ice_debugfs_nr_messages_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_nr_messages_read,
- .write = ice_debugfs_nr_messages_write,
-};
-
-/**
- * ice_debugfs_enable_read - read from 'enable' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_enable_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
-
- snprintf(buff, sizeof(buff), "%u\n",
- (u16)(hw->fwlog_cfg.options &
- ICE_FWLOG_OPTION_IS_REGISTERED) >> 3);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_enable_write - write into 'enable' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_enable_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- bool enable;
- ssize_t ret;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 2)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- ret = kstrtobool(user_val, &enable);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_ARQ_ENA;
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
-
- ret = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (ret)
- goto enable_write_error;
-
- if (enable)
- ret = ice_fwlog_register(hw);
- else
- ret = ice_fwlog_unregister(hw);
-
- if (ret)
- goto enable_write_error;
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-enable_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_enable_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_enable_read,
- .write = ice_debugfs_enable_write,
-};
-
-/**
- * ice_debugfs_log_size_read - read from 'log_size' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_log_size_read(struct file *filp,
- char __user *buffer, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- char buff[32] = {};
- int index;
-
- index = hw->fwlog_ring.index;
- snprintf(buff, sizeof(buff), "%s\n", ice_fwlog_log_size[index]);
-
- return simple_read_from_buffer(buffer, count, ppos, buff, strlen(buff));
-}
-
-/**
- * ice_debugfs_log_size_write - write into 'log_size' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- char user_val[8], *cmd_buf;
- ssize_t ret;
- int index;
-
- /* don't allow partial writes or invalid input */
- if (*ppos != 0 || count > 5)
- return -EINVAL;
-
- cmd_buf = memdup_user_nul(buf, count);
- if (IS_ERR(cmd_buf))
- return PTR_ERR(cmd_buf);
-
- ret = sscanf(cmd_buf, "%s", user_val);
- if (ret != 1)
- return -EINVAL;
-
- index = sysfs_match_string(ice_fwlog_log_size, user_val);
- if (index < 0) {
- dev_info(dev, "Invalid log size '%s'. The value must be one of 128K, 256K, 512K, 1M, 2M\n",
- user_val);
- ret = -EINVAL;
- goto log_size_write_error;
- } else if (hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED) {
- dev_info(dev, "FW logging is currently running. Please disable FW logging to change log_size\n");
- ret = -EINVAL;
- goto log_size_write_error;
- }
-
- /* free all the buffers and the tracking info and resize */
- ice_fwlog_realloc_rings(hw, index);
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-log_size_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_log_size_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_log_size_read,
- .write = ice_debugfs_log_size_write,
-};
-
-/**
- * ice_debugfs_data_read - read from 'data' file
- * @filp: the opened file
- * @buffer: where to write the data for the user to read
- * @count: the size of the user's buffer
- * @ppos: file position offset
- */
-static ssize_t ice_debugfs_data_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct ice_hw *hw = &pf->hw;
- int data_copied = 0;
- bool done = false;
-
- if (ice_fwlog_ring_empty(&hw->fwlog_ring))
- return 0;
-
- while (!ice_fwlog_ring_empty(&hw->fwlog_ring) && !done) {
- struct ice_fwlog_data *log;
- u16 cur_buf_len;
-
- log = &hw->fwlog_ring.rings[hw->fwlog_ring.head];
- cur_buf_len = log->data_size;
- if (cur_buf_len >= count) {
- done = true;
- continue;
- }
-
- if (copy_to_user(buffer, log->data, cur_buf_len)) {
- /* if there is an error then bail and return whatever
- * the driver has copied so far
- */
- done = true;
- continue;
- }
-
- data_copied += cur_buf_len;
- buffer += cur_buf_len;
- count -= cur_buf_len;
- *ppos += cur_buf_len;
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-
- return data_copied;
-}
-
-/**
- * ice_debugfs_data_write - write into 'data' file
- * @filp: the opened file
- * @buf: where to find the user's data
- * @count: the length of the user's data
- * @ppos: file position offset
- */
-static ssize_t
-ice_debugfs_data_write(struct file *filp, const char __user *buf, size_t count,
- loff_t *ppos)
-{
- struct ice_pf *pf = filp->private_data;
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
- ssize_t ret;
-
- /* don't allow partial writes */
- if (*ppos != 0)
- return 0;
-
- /* any value is allowed to clear the buffer so no need to even look at
- * what the value is
- */
- if (!(hw->fwlog_cfg.options & ICE_FWLOG_OPTION_IS_REGISTERED)) {
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
- } else {
- dev_info(dev, "Can't clear FW log data while FW log running\n");
- ret = -EINVAL;
- goto nr_buffs_write_error;
- }
-
- /* if we get here, nothing went wrong; return count since we didn't
- * really write anything
- */
- ret = (ssize_t)count;
-
-nr_buffs_write_error:
- /* This function always consumes all of the written input, or produces
- * an error. Check and enforce this. Otherwise, the write operation
- * won't complete properly.
- */
- if (WARN_ON(ret != (ssize_t)count && ret >= 0))
- ret = -EIO;
-
- return ret;
-}
-
-static const struct file_operations ice_debugfs_data_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = ice_debugfs_data_read,
- .write = ice_debugfs_data_write,
-};
-
-/**
- * ice_debugfs_fwlog_init - setup the debugfs directory
- * @pf: the ice that is starting up
- */
-void ice_debugfs_fwlog_init(struct ice_pf *pf)
+int ice_debugfs_pf_init(struct ice_pf *pf)
{
const char *name = pci_name(pf->pdev);
- struct dentry *fw_modules_dir;
- struct dentry **fw_modules;
- int i;
-
- /* only support fw log commands on PF 0 */
- if (pf->hw.bus.func)
- return;
-
- /* allocate space for this first because if it fails then we don't
- * need to unwind
- */
- fw_modules = kcalloc(ICE_NR_FW_LOG_MODULES, sizeof(*fw_modules),
- GFP_KERNEL);
- if (!fw_modules)
- return;
pf->ice_debugfs_pf = debugfs_create_dir(name, ice_debugfs_root);
if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
-
- pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog",
- pf->ice_debugfs_pf);
- if (IS_ERR(pf->ice_debugfs_pf))
- goto err_create_module_files;
+ return PTR_ERR(pf->ice_debugfs_pf);
- fw_modules_dir = debugfs_create_dir("modules",
- pf->ice_debugfs_pf_fwlog);
- if (IS_ERR(fw_modules_dir))
- goto err_create_module_files;
-
- for (i = 0; i < ICE_NR_FW_LOG_MODULES; i++) {
- fw_modules[i] = debugfs_create_file(ice_fwlog_module_string[i],
- 0600, fw_modules_dir, pf,
- &ice_debugfs_module_fops);
- if (IS_ERR(fw_modules[i]))
- goto err_create_module_files;
- }
-
- debugfs_create_file("nr_messages", 0600,
- pf->ice_debugfs_pf_fwlog, pf,
- &ice_debugfs_nr_messages_fops);
-
- pf->ice_debugfs_pf_fwlog_modules = fw_modules;
-
- debugfs_create_file("enable", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_enable_fops);
-
- debugfs_create_file("log_size", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_log_size_fops);
-
- debugfs_create_file("data", 0600, pf->ice_debugfs_pf_fwlog,
- pf, &ice_debugfs_data_fops);
-
- return;
-
-err_create_module_files:
- debugfs_remove_recursive(pf->ice_debugfs_pf_fwlog);
- kfree(fw_modules);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 34fd604132f5..bd4e66df0372 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -6,6 +6,24 @@
/* Device IDs */
#define ICE_DEV_ID_E822_SI_DFLT 0x1888
+/* Intel(R) Ethernet Controller E835-CC for backplane */
+#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835CC_QSFP56 0x1249
+/* Intel(R) Ethernet Controller E835-CC for SFP */
+#define ICE_DEV_ID_E835CC_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-C for backplane */
+#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
+/* Intel(R) Ethernet Controller E835-C for QSFP */
+#define ICE_DEV_ID_E835C_QSFP 0x1262
+/* Intel(R) Ethernet Controller E835-C for SFP */
+#define ICE_DEV_ID_E835C_SFP 0x1263
+/* Intel(R) Ethernet Controller E835-L for backplane */
+#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
+/* Intel(R) Ethernet Controller E835-L for QSFP */
+#define ICE_DEV_ID_E835_L_QSFP 0x1266
+/* Intel(R) Ethernet Controller E835-L for SFP */
+#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Connection E823-L for backplane */
#define ICE_DEV_ID_E823L_BACKPLANE 0x124C
/* Intel(R) Ethernet Connection E823-L for SFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
index d5ad6d84007c..53b54e395a2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.c
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -11,6 +11,43 @@
#define ICE_DPLL_RCLK_NUM_PER_PF 1
#define ICE_DPLL_PIN_ESYNC_PULSE_HIGH_PERCENT 25
#define ICE_DPLL_PIN_GEN_RCLK_FREQ 1953125
+#define ICE_DPLL_PIN_PRIO_OUTPUT 0xff
+#define ICE_DPLL_INPUT_REF_NUM 10
+#define ICE_DPLL_PHASE_OFFSET_PERIOD 2
+#define ICE_DPLL_SW_PIN_INPUT_BASE_SFP 4
+#define ICE_DPLL_SW_PIN_INPUT_BASE_QSFP 6
+#define ICE_DPLL_SW_PIN_OUTPUT_BASE 0
+
+#define ICE_DPLL_PIN_SW_INPUT_ABS(in_idx) \
+ (ICE_DPLL_SW_PIN_INPUT_BASE_SFP + (in_idx))
+
+#define ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_INPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_DPLL_PIN_SW_OUTPUT_ABS(out_idx) \
+ (ICE_DPLL_SW_PIN_OUTPUT_BASE + (out_idx))
+
+#define ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_1_IDX))
+
+#define ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX \
+ (ICE_DPLL_PIN_SW_OUTPUT_ABS(ICE_DPLL_PIN_SW_2_IDX))
+
+#define ICE_SR_PFA_DPLL_DEFAULTS 0x152
+#define ICE_DPLL_PFA_REF_SYNC_TYPE 0x2420
+#define ICE_DPLL_PFA_REF_SYNC_TYPE2 0x2424
+#define ICE_DPLL_PFA_END 0xFFFF
+#define ICE_DPLL_PFA_HEADER_LEN 4
+#define ICE_DPLL_PFA_ENTRY_LEN 3
+#define ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S 4
+#define ICE_DPLL_PFA_MASK_OFFSET 1
+#define ICE_DPLL_PFA_VALUE_OFFSET 2
+
+#define ICE_DPLL_E810C_SFP_NC_PINS 2
+#define ICE_DPLL_E810C_SFP_NC_START 4
/**
* enum ice_dpll_pin_type - enumerate ice pin types:
@@ -18,25 +55,61 @@
* @ICE_DPLL_PIN_TYPE_INPUT: input pin
* @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
* @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ * @ICE_DPLL_PIN_TYPE_SOFTWARE: software controlled SMA/U.FL pins
*/
enum ice_dpll_pin_type {
ICE_DPLL_PIN_INVALID,
ICE_DPLL_PIN_TYPE_INPUT,
ICE_DPLL_PIN_TYPE_OUTPUT,
ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
};
static const char * const pin_type_name[] = {
[ICE_DPLL_PIN_TYPE_INPUT] = "input",
[ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
[ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+ [ICE_DPLL_PIN_TYPE_SOFTWARE] = "software",
};
+static const char * const ice_dpll_sw_pin_sma[] = { "SMA1", "SMA2" };
+static const char * const ice_dpll_sw_pin_ufl[] = { "U.FL1", "U.FL2" };
+
static const struct dpll_pin_frequency ice_esync_range[] = {
DPLL_PIN_FREQUENCY_RANGE(0, DPLL_PIN_FREQUENCY_1_HZ),
};
/**
+ * ice_dpll_is_sw_pin - check if given pin shall be controlled by SW
+ * @pf: private board structure
+ * @index: index of a pin as understood by FW
+ * @input: true for input, false for output
+ *
+ * Check if the pin shall be controlled by SW - instead of providing raw access
+ * for pin control. For E810 NIC with dpll there is additional MUX-related logic
+ * between SMA/U.FL pins/connectors and dpll device, best to give user access
+ * with series of wrapper functions as from user perspective they convey single
+ * functionality rather then separated pins.
+ *
+ * Return:
+ * * true - pin controlled by SW
+ * * false - pin not controlled by SW
+ */
+static bool ice_dpll_is_sw_pin(struct ice_pf *pf, u8 index, bool input)
+{
+ if (input && pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ index -= ICE_DPLL_SW_PIN_INPUT_BASE_QSFP -
+ ICE_DPLL_SW_PIN_INPUT_BASE_SFP;
+
+ if ((input && (index == ICE_DPLL_PIN_SW_1_INPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_INPUT_ABS_IDX)) ||
+ (!input && (index == ICE_DPLL_PIN_SW_1_OUTPUT_ABS_IDX ||
+ index == ICE_DPLL_PIN_SW_2_OUTPUT_ABS_IDX)))
+ return true;
+ return false;
+}
+
+/**
* ice_dpll_is_reset - check if reset is in progress
* @pf: private board structure
* @extack: error reporting
@@ -95,9 +168,9 @@ ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
}
if (ret) {
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ "err:%d %s failed to set pin freq:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
freq, pin->idx);
return ret;
}
@@ -280,6 +353,87 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_pin_frequency_set - callback to set frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls set frequency command for corresponding and active input/output pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT)
+ ret = ice_dpll_input_frequency_set(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ else
+ ret = ice_dpll_output_frequency_set(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_frequency_get - callback for get frequency of SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Calls get frequency command for corresponding active input/output.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not active or couldn't get from hw
+ */
+static int
+ice_dpll_sw_pin_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv;
+ int ret;
+
+ if (!sma->active) {
+ *frequency = 0;
+ return 0;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_input_frequency_get(NULL, sma->input, dpll,
+ dpll_priv, frequency,
+ extack);
+ } else {
+ ret = ice_dpll_output_frequency_get(NULL, sma->output, dpll,
+ dpll_priv, frequency,
+ extack);
+ }
+
+ return ret;
+}
+
+/**
* ice_dpll_pin_enable - enable a pin on dplls
* @hw: board private hw structure
* @pin: pointer to a pin
@@ -322,8 +476,8 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to enable %s pin:%u\n",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ "err:%d %s failed to enable %s pin:%u",
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
@@ -367,14 +521,75 @@ ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
}
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to disable %s pin:%u\n",
- ret, ice_aq_str(hw->adminq.sq_last_status),
+ "err:%d %s failed to disable %s pin:%u",
+ ret, libie_aq_str(hw->adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
/**
+ * ice_dpll_sw_pins_update - update status of all SW pins
+ * @pf: private board struct
+ *
+ * Determine and update pin struct fields (direction/active) of their current
+ * values for all the SW controlled pins.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pins_update(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *p;
+ u8 data = 0;
+ int ret;
+
+ ret = ice_read_sma_ctrl(&pf->hw, &data);
+ if (ret)
+ return ret;
+ /* no change since last check */
+ if (d->sma_data == data)
+ return 0;
+
+ /*
+ * SMA1/U.FL1 vs SMA2/U.FL2 are using different bit scheme to decide
+ * on their direction and if are active
+ */
+ p = &d->sma[ICE_DPLL_PIN_SW_1_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if (data & ICE_SMA1_DIR_EN) {
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ if (data & ICE_SMA1_TX_EN)
+ p->active = false;
+ }
+
+ p = &d->sma[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = true;
+ p->direction = DPLL_PIN_DIRECTION_INPUT;
+ if ((data & ICE_SMA2_INACTIVE_MASK) == ICE_SMA2_INACTIVE_MASK)
+ p->active = false;
+ else if (data & ICE_SMA2_DIR_EN)
+ p->direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_1_IDX];
+ if (!(data & (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)))
+ p->active = true;
+ else
+ p->active = false;
+
+ p = &d->ufl[ICE_DPLL_PIN_SW_2_IDX];
+ p->active = (data & ICE_SMA2_DIR_EN) && !(data & ICE_SMA2_UFL2_RX_DIS);
+ d->sma_data = data;
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_update - update pin's state
* @pf: private board struct
* @pin: structure with pin attributes to be updated
@@ -471,6 +686,11 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
DPLL_PIN_STATE_DISCONNECTED;
}
break;
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ ret = ice_dpll_sw_pins_update(pf);
+ if (ret)
+ goto err;
+ break;
default:
return -EINVAL;
}
@@ -479,15 +699,15 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
err:
if (extack)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to update %s pin:%u\n",
+ "err:%d %s failed to update %s pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
else
dev_err_ratelimited(ice_pf_to_dev(pf),
"err:%d %s failed to update %s pin:%u\n",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
pin_type_name[pin_type], pin->idx);
return ret;
}
@@ -518,9 +738,9 @@ ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
(u8)prio);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ "err:%d %s failed to set pin prio:%u on pin:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
prio, pin->idx);
else
dpll->input_prio[pin->idx] = prio;
@@ -588,6 +808,67 @@ static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
}
/**
+ * ice_dpll_phase_offset_monitor_set - set phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: feature state to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enable/disable phase offset monitor feature of dpll.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_set(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (state == DPLL_FEATURE_STATE_ENABLE)
+ d->phase_offset_monitor_period = ICE_DPLL_PHASE_OFFSET_PERIOD;
+ else
+ d->phase_offset_monitor_period = 0;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_phase_offset_monitor_get - get phase offset monitor state
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds current state of phase offset monitor
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides current state of phase offset monitor
+ * features on dpll device.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return: 0 - success
+ */
+static int ice_dpll_phase_offset_monitor_get(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_feature_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (d->phase_offset_monitor_period)
+ *state = DPLL_FEATURE_STATE_ENABLE;
+ else
+ *state = DPLL_FEATURE_STATE_DISABLE;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_state_set - set pin's state on dpll
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -793,6 +1074,270 @@ ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sma_direction_set - set direction of SMA pin
+ * @p: pointer to a pin
+ * @direction: requested direction of the pin
+ * @extack: error reporting
+ *
+ * Wrapper for dpll subsystem callback. Set direction of a SMA pin.
+ *
+ * Context: Call with pf->dplls.lock held
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int ice_dpll_sma_direction_set(struct ice_dpll_pin *p,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ u8 data;
+ int ret;
+
+ if (p->direction == direction && p->active)
+ return 0;
+ ret = ice_read_sma_ctrl(&p->pf->hw, &data);
+ if (ret)
+ return ret;
+
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ data &= ~ICE_SMA1_MASK;
+ if (direction == DPLL_PIN_DIRECTION_OUTPUT)
+ data |= ICE_SMA1_DIR_EN;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (direction == DPLL_PIN_DIRECTION_INPUT) {
+ data &= ~ICE_SMA2_DIR_EN;
+ } else {
+ data &= ~ICE_SMA2_TX_EN;
+ data |= ICE_SMA2_DIR_EN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = ice_write_sma_ctrl(&p->pf->hw, data);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(p->pf, p,
+ ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_ufl_pin_state_set - set U.FL pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set the state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_ufl_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ enum ice_dpll_pin_type type;
+ struct ice_pf *pf = p->pf;
+ struct ice_hw *hw;
+ bool enable;
+ u8 data;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ hw = &pf->hw;
+ ret = ice_read_sma_ctrl(hw, &data);
+ if (ret)
+ goto unlock;
+
+ ret = -EINVAL;
+ switch (p->idx) {
+ case ICE_DPLL_PIN_SW_1_IDX:
+ if (state == DPLL_PIN_STATE_CONNECTED) {
+ data &= ~ICE_SMA1_MASK;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA1_TX_EN;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ break;
+ case ICE_DPLL_PIN_SW_2_IDX:
+ if (state == DPLL_PIN_STATE_SELECTABLE) {
+ data |= ICE_SMA2_DIR_EN;
+ data &= ~ICE_SMA2_UFL2_RX_DIS;
+ enable = true;
+ } else if (state == DPLL_PIN_STATE_DISCONNECTED) {
+ data |= ICE_SMA2_UFL2_RX_DIS;
+ enable = false;
+ } else {
+ goto unlock;
+ }
+ target = p->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ break;
+ default:
+ goto unlock;
+ }
+
+ ret = ice_write_sma_ctrl(hw, data);
+ if (ret)
+ goto unlock;
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ extack);
+ if (ret)
+ goto unlock;
+
+ if (enable)
+ ret = ice_dpll_pin_enable(hw, target, d->dpll_idx, type, extack);
+ else
+ ret = ice_dpll_pin_disable(hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sw_pin_state_get - get SW pin state
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a SW pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = 0;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (!p->active) {
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ goto unlock;
+ }
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT) {
+ ret = ice_dpll_pin_state_update(pf, p->input,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->input->state[d->dpll_idx];
+ } else {
+ ret = ice_dpll_pin_state_update(pf, p->output,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ extack);
+ if (ret)
+ goto unlock;
+ *state = p->output->state[d->dpll_idx];
+ }
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_sma_pin_state_set - set SMA pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: requested state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set state of a pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_sma_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *sma = pin_priv, *target;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = sma->pf;
+ enum ice_dpll_pin_type type;
+ bool enable;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ if (!sma->active) {
+ ret = ice_dpll_sma_direction_set(sma, sma->direction, extack);
+ if (ret)
+ goto unlock;
+ }
+ if (sma->direction == DPLL_PIN_DIRECTION_INPUT) {
+ enable = state == DPLL_PIN_STATE_SELECTABLE;
+ target = sma->input;
+ type = ICE_DPLL_PIN_TYPE_INPUT;
+ } else {
+ enable = state == DPLL_PIN_STATE_CONNECTED;
+ target = sma->output;
+ type = ICE_DPLL_PIN_TYPE_OUTPUT;
+ }
+
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, target, d->dpll_idx, type,
+ extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, target, type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, target, type, extack);
+
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
* ice_dpll_input_prio_get - get dpll's input prio
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -860,6 +1405,47 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
return ret;
}
+static int
+ice_dpll_sw_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (p->input && p->direction == DPLL_PIN_DIRECTION_INPUT)
+ *prio = d->input_prio[p->input->idx];
+ else
+ *prio = ICE_DPLL_PIN_PRIO_OUTPUT;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+static int
+ice_dpll_sw_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ if (!p->input || p->direction != DPLL_PIN_DIRECTION_INPUT)
+ return -EINVAL;
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p->input, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
/**
* ice_dpll_input_direction - callback for get input pin direction
* @pin: pointer to a pin
@@ -911,6 +1497,76 @@ ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_pin_sma_direction_set - callback for set SMA pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: requested pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sma_direction_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_sma_direction_set(p, direction, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_sw_direction_get - callback for get SW pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: on success holds pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a SMA pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_sw_direction_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ *direction = p->direction;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
* ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1004,9 +1660,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
mutex_unlock(&pf->dplls.lock);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
phase_adjust, p->idx, d->dpll_idx);
return ret;
@@ -1024,7 +1680,7 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
* Dpll subsystem callback. Wraps a handler for setting phase adjust on input
* pin.
*
- * Context: Calls a function which acquires pf->dplls.lock
+ * Context: Calls a function which acquires and releases pf->dplls.lock
* Return:
* * 0 - success
* * negative - error
@@ -1068,6 +1724,82 @@ ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
ICE_DPLL_PIN_TYPE_OUTPUT);
}
+/**
+ * ice_dpll_sw_phase_adjust_get - callback for get SW pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: on success holds phase adjust value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for getting phase adjust on sw
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_get(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+ else
+ return ice_dpll_pin_phase_adjust_get(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack);
+}
+
+/**
+ * ice_dpll_sw_phase_adjust_set - callback for set SW pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on output
+ * pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_pin_phase_adjust_set(p->input->pin, p->input,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_INPUT);
+ else
+ return ice_dpll_pin_phase_adjust_set(p->output->pin, p->output,
+ dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100
#define ICE_DPLL_PHASE_OFFSET_FACTOR \
(DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER)
@@ -1093,12 +1825,16 @@ ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
const struct dpll_device *dpll, void *dpll_priv,
s64 *phase_offset, struct netlink_ext_ack *extack)
{
+ struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv;
struct ice_pf *pf = d->pf;
mutex_lock(&pf->dplls.lock);
- if (d->active_input == pin)
+ if (d->active_input == pin || (p->input &&
+ d->active_input == p->input->pin))
*phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
+ else if (d->phase_offset_monitor_period)
+ *phase_offset = p->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
else
*phase_offset = 0;
mutex_unlock(&pf->dplls.lock);
@@ -1315,6 +2051,219 @@ ice_dpll_input_esync_get(const struct dpll_pin *pin, void *pin_priv,
}
/**
+ * ice_dpll_sw_esync_set - callback for setting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @freq: requested embedded sync frequency
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting embedded sync frequency value
+ * on SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 freq, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (!p->active) {
+ NL_SET_ERR_MSG(extack, "pin is not active");
+ return -EINVAL;
+ }
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_set(p->input->pin, p->input, dpll,
+ dpll_priv, freq, extack);
+ else
+ return ice_dpll_output_esync_set(p->output->pin, p->output,
+ dpll, dpll_priv, freq, extack);
+}
+
+/**
+ * ice_dpll_sw_esync_get - callback for getting embedded sync on SW pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @esync: on success holds embedded sync frequency and properties
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting embedded sync frequency value
+ * of SW pin.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_esync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ struct dpll_pin_esync *esync,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ if (p->direction == DPLL_PIN_DIRECTION_INPUT)
+ return ice_dpll_input_esync_get(p->input->pin, p->input, dpll,
+ dpll_priv, esync, extack);
+ else
+ return ice_dpll_output_esync_get(p->output->pin, p->output,
+ dpll, dpll_priv, esync,
+ extack);
+}
+
+/*
+ * ice_dpll_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+ u8 flags_en = 0;
+ int ret;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en = ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ if (state == DPLL_PIN_STATE_CONNECTED)
+ flags_en |= ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, 0, flags_en, 0, 0);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_INPUT,
+ extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync frequency
+ * feature for input pin.
+ *
+ * Context: Acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin, void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ if (ice_dpll_is_reset(pf, extack))
+ return -EBUSY;
+ mutex_lock(&pf->dplls.lock);
+ if (p->flags[0] & ICE_AQC_CGU_IN_CFG_FLG2_REFSYNC_EN)
+ *state = DPLL_PIN_STATE_CONNECTED;
+ else
+ *state = DPLL_PIN_STATE_DISCONNECTED;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/*
+ * ice_dpll_sw_input_ref_sync_set - callback for setting reference sync feature
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: requested state for reference sync for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync
+ * feature for input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_set(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
+ * ice_dpll_sw_input_ref_sync_get - callback for getting reference sync config
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @ref_pin: pin pointer for reference sync pair
+ * @ref_pin_priv: private data pointer of ref_pin
+ * @state: on success holds reference sync state for pin pair
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting reference sync feature for
+ * input pins.
+ *
+ * Context: Calls a function which acquires and releases pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_sw_input_ref_sync_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *ref_pin,
+ void *ref_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+
+ return ice_dpll_input_ref_sync_get(pin, p->input, ref_pin, ref_pin_priv,
+ state, extack);
+}
+
+/**
* ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
* @pin: pointer to a pin
* @pin_priv: private data pointer passed on pin registration
@@ -1362,9 +2311,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
&p->freq);
if (ret)
NL_SET_ERR_MSG_FMT(extack,
- "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u",
ret,
- ice_aq_str(pf->hw.adminq.sq_last_status),
+ libie_aq_str(pf->hw.adminq.sq_last_status),
state, p->idx, parent->idx);
unlock:
mutex_unlock(&pf->dplls.lock);
@@ -1427,6 +2376,37 @@ static const struct dpll_pin_ops ice_dpll_rclk_ops = {
.direction_get = ice_dpll_input_direction,
};
+static const struct dpll_pin_ops ice_dpll_pin_sma_ops = {
+ .state_on_dpll_set = ice_dpll_sma_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .direction_set = ice_dpll_pin_sma_direction_set,
+ .prio_get = ice_dpll_sw_input_prio_get,
+ .prio_set = ice_dpll_sw_input_prio_set,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .ref_sync_set = ice_dpll_sw_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_sw_input_ref_sync_get,
+};
+
+static const struct dpll_pin_ops ice_dpll_pin_ufl_ops = {
+ .state_on_dpll_set = ice_dpll_ufl_pin_state_set,
+ .state_on_dpll_get = ice_dpll_sw_pin_state_get,
+ .direction_get = ice_dpll_pin_sw_direction_get,
+ .frequency_get = ice_dpll_sw_pin_frequency_get,
+ .frequency_set = ice_dpll_sw_pin_frequency_set,
+ .esync_set = ice_dpll_sw_esync_set,
+ .esync_get = ice_dpll_sw_esync_get,
+ .phase_adjust_get = ice_dpll_sw_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_sw_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+};
+
static const struct dpll_pin_ops ice_dpll_input_ops = {
.frequency_get = ice_dpll_input_frequency_get,
.frequency_set = ice_dpll_input_frequency_set,
@@ -1440,6 +2420,8 @@ static const struct dpll_pin_ops ice_dpll_input_ops = {
.phase_offset_get = ice_dpll_phase_offset_get,
.esync_set = ice_dpll_input_esync_set,
.esync_get = ice_dpll_input_esync_get,
+ .ref_sync_set = ice_dpll_input_ref_sync_set,
+ .ref_sync_get = ice_dpll_input_ref_sync_get,
};
static const struct dpll_pin_ops ice_dpll_output_ops = {
@@ -1459,6 +2441,13 @@ static const struct dpll_device_ops ice_dpll_ops = {
.mode_get = ice_dpll_mode_get,
};
+static const struct dpll_device_ops ice_dpll_pom_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_get = ice_dpll_mode_get,
+ .phase_offset_monitor_set = ice_dpll_phase_offset_monitor_set,
+ .phase_offset_monitor_get = ice_dpll_phase_offset_monitor_get,
+};
+
/**
* ice_generate_clock_id - generates unique clock_id for registering dpll.
* @pf: board private structure
@@ -1504,6 +2493,110 @@ static void ice_dpll_notify_changes(struct ice_dpll *d)
}
/**
+ * ice_dpll_is_pps_phase_monitor - check if dpll capable of phase offset monitor
+ * @pf: pf private structure
+ *
+ * Check if firmware is capable of supporting admin command to provide
+ * phase offset monitoring on all the input pins on PPS dpll.
+ *
+ * Returns:
+ * * true - PPS dpll phase offset monitoring is supported
+ * * false - PPS dpll phase offset monitoring is not supported
+ */
+static bool ice_dpll_is_pps_phase_monitor(struct ice_pf *pf)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ int ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_ESRCH)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_dpll_pins_notify_mask - notify dpll subsystem about bulk pin changes
+ * @pins: array of ice_dpll_pin pointers registered within dpll subsystem
+ * @pin_num: number of pins
+ * @phase_offset_ntf_mask: bitmask of pin indexes to notify
+ *
+ * Iterate over array of pins and call dpll subsystem pin notify if
+ * corresponding pin index within bitmask is set.
+ *
+ * Context: Must be called while pf->dplls.lock is released.
+ */
+static void ice_dpll_pins_notify_mask(struct ice_dpll_pin *pins,
+ u8 pin_num,
+ u32 phase_offset_ntf_mask)
+{
+ int i = 0;
+
+ for (i = 0; i < pin_num; i++)
+ if (phase_offset_ntf_mask & (1 << i))
+ dpll_pin_change_ntf(pins[i].pin);
+}
+
+/**
+ * ice_dpll_pps_update_phase_offsets - update phase offset measurements
+ * @pf: pf private structure
+ * @phase_offset_pins_updated: returns mask of updated input pin indexes
+ *
+ * Read phase offset measurements for PPS dpll device and store values in
+ * input pins array. On success phase_offset_pins_updated - fills bitmask of
+ * updated input pin indexes, pins shall be notified.
+ *
+ * Context: Shall be called with pf->dplls.lock being locked.
+ * Returns:
+ * * 0 - success or no data available
+ * * negative - AQ failure
+ */
+static int ice_dpll_pps_update_phase_offsets(struct ice_pf *pf,
+ u32 *phase_offset_pins_updated)
+{
+ struct ice_cgu_input_measure meas[ICE_DPLL_INPUT_REF_NUM];
+ struct ice_dpll_pin *p;
+ s64 phase_offset, tmp;
+ int i, j, ret;
+
+ *phase_offset_pins_updated = 0;
+ ret = ice_aq_get_cgu_input_pin_measure(&pf->hw, DPLL_TYPE_PPS, meas,
+ ARRAY_SIZE(meas));
+ if (ret && pf->hw.adminq.sq_last_status == LIBIE_AQ_RC_EAGAIN) {
+ return 0;
+ } else if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed to get input pin measurements dpll=%d, ret=%d %s\n",
+ DPLL_TYPE_PPS, ret,
+ libie_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ for (i = 0; i < pf->dplls.num_inputs; i++) {
+ p = &pf->dplls.inputs[i];
+ phase_offset = 0;
+ for (j = 0; j < ICE_CGU_INPUT_PHASE_OFFSET_BYTES; j++) {
+ tmp = meas[i].phase_offset[j];
+#ifdef __LITTLE_ENDIAN
+ phase_offset += tmp << 8 * j;
+#else
+ phase_offset += tmp << 8 *
+ (ICE_CGU_INPUT_PHASE_OFFSET_BYTES - 1 - j);
+#endif
+ }
+ phase_offset = sign_extend64(phase_offset, 47);
+ if (p->phase_offset != phase_offset) {
+ dev_dbg(ice_pf_to_dev(pf),
+ "phase offset changed for pin:%d old:%llx, new:%llx\n",
+ p->idx, p->phase_offset, phase_offset);
+ p->phase_offset = phase_offset;
+ *phase_offset_pins_updated |= (1 << i);
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_update_state - update dpll state
* @pf: pf private structure
* @d: pointer to queried dpll device
@@ -1534,7 +2627,7 @@ ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init)
dev_err(ice_pf_to_dev(pf),
"update dpll=%d state failed, ret=%d %s\n",
d->dpll_idx, ret,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
return ret;
}
if (init) {
@@ -1589,14 +2682,19 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
struct ice_dpll *de = &pf->dplls.eec;
struct ice_dpll *dp = &pf->dplls.pps;
+ u32 phase_offset_ntf = 0;
int ret = 0;
if (ice_is_reset_in_progress(pf->state))
goto resched;
mutex_lock(&pf->dplls.lock);
+ d->periodic_counter++;
ret = ice_dpll_update_state(pf, de, false);
if (!ret)
ret = ice_dpll_update_state(pf, dp, false);
+ if (!ret && dp->phase_offset_monitor_period &&
+ d->periodic_counter % dp->phase_offset_monitor_period == 0)
+ ret = ice_dpll_pps_update_phase_offsets(pf, &phase_offset_ntf);
if (ret) {
d->cgu_state_acq_err_num++;
/* stop rescheduling this worker */
@@ -1611,6 +2709,9 @@ static void ice_dpll_periodic_work(struct kthread_work *work)
mutex_unlock(&pf->dplls.lock);
ice_dpll_notify_changes(de);
ice_dpll_notify_changes(dp);
+ if (phase_offset_ntf)
+ ice_dpll_pins_notify_mask(d->inputs, d->num_inputs,
+ phase_offset_ntf);
resched:
/* Run twice a second or reschedule if update failed */
@@ -1620,6 +2721,88 @@ resched:
}
/**
+ * ice_dpll_init_ref_sync_inputs - initialize reference sync pin pairs
+ * @pf: pf private structure
+ *
+ * Read DPLL TLV capabilities and initialize reference sync pin pairs in
+ * dpll subsystem.
+ *
+ * Return:
+ * * 0 - success or nothing to do (no ref-sync tlv are present)
+ * * negative - AQ failure
+ */
+static int ice_dpll_init_ref_sync_inputs(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *inputs = pf->dplls.inputs;
+ struct ice_hw *hw = &pf->hw;
+ u16 addr, len, end, hdr;
+ int ret;
+
+ ret = ice_get_pfa_module_tlv(hw, &hdr, &len, ICE_SR_PFA_DPLL_DEFAULTS);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to read PFA dpll defaults TLV ret=%d\n", ret);
+ return ret;
+ }
+ end = hdr + len;
+
+ for (addr = hdr + ICE_DPLL_PFA_HEADER_LEN; addr < end;
+ addr += ICE_DPLL_PFA_ENTRY_LEN) {
+ unsigned long bit, ul_mask, offset;
+ u16 pin, mask, buf;
+ bool valid = false;
+
+ ret = ice_read_sr_word(hw, addr, &buf);
+ if (ret)
+ return ret;
+
+ switch (buf) {
+ case ICE_DPLL_PFA_REF_SYNC_TYPE:
+ case ICE_DPLL_PFA_REF_SYNC_TYPE2:
+ {
+ u16 mask_addr = addr + ICE_DPLL_PFA_MASK_OFFSET;
+ u16 val_addr = addr + ICE_DPLL_PFA_VALUE_OFFSET;
+
+ ret = ice_read_sr_word(hw, mask_addr, &mask);
+ if (ret)
+ return ret;
+ ret = ice_read_sr_word(hw, val_addr, &pin);
+ if (ret)
+ return ret;
+ if (buf == ICE_DPLL_PFA_REF_SYNC_TYPE)
+ pin >>= ICE_DPLL_PFA_MAILBOX_REF_SYNC_PIN_S;
+ valid = true;
+ break;
+ }
+ case ICE_DPLL_PFA_END:
+ addr = end;
+ break;
+ default:
+ continue;
+ }
+ if (!valid)
+ continue;
+
+ ul_mask = mask;
+ offset = 0;
+ for_each_set_bit(bit, &ul_mask, BITS_PER_TYPE(u16)) {
+ int i, j;
+
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP &&
+ pin > ICE_DPLL_E810C_SFP_NC_START)
+ offset = -ICE_DPLL_E810C_SFP_NC_PINS;
+ i = pin + offset;
+ j = bit + offset;
+ if (i < 0 || j < 0)
+ return -ERANGE;
+ inputs[i].ref_sync = j;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_dpll_release_pins - release pins resources from dpll subsystem
* @pins: pointer to pins array
* @count: number of pins
@@ -1689,7 +2872,38 @@ ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int i;
for (i = 0; i < count; i++)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+}
+
+/**
+ * ice_dpll_pin_ref_sync_register - register reference sync pins
+ * @pins: pointer to pins array
+ * @count: number of pins
+ *
+ * Register reference sync pins in dpll subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_pin_ref_sync_register(struct ice_dpll_pin *pins, int count)
+{
+ int ret, i;
+
+ for (i = 0; i < count; i++) {
+ if (!pins[i].hidden && pins[i].ref_sync) {
+ int j = pins[i].ref_sync;
+
+ ret = dpll_pin_ref_sync_pair_add(pins[i].pin,
+ pins[j].pin);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
}
/**
@@ -1712,16 +2926,19 @@ ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
int ret, i;
for (i = 0; i < count; i++) {
- ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
- if (ret)
- goto unregister_pins;
+ if (!pins[i].hidden) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
}
return 0;
unregister_pins:
while (--i >= 0)
- dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ if (!pins[i].hidden)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
return ret;
}
@@ -1909,6 +3126,18 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
ice_dpll_unregister_pins(de->dpll, outputs,
&ice_dpll_output_ops, num_outputs);
ice_dpll_release_pins(outputs, num_outputs);
+ if (!pf->dplls.generic) {
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ }
}
}
@@ -1926,8 +3155,7 @@ static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
*/
static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
{
- u32 rclk_idx;
- int ret;
+ int ret, count;
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
pf->dplls.num_inputs,
@@ -1935,23 +3163,64 @@ static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
pf->dplls.eec.dpll, pf->dplls.pps.dpll);
if (ret)
return ret;
+ count = pf->dplls.num_inputs;
if (cgu) {
ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
- pf->dplls.num_inputs,
+ count,
pf->dplls.num_outputs,
&ice_dpll_output_ops,
pf->dplls.eec.dpll,
pf->dplls.pps.dpll);
if (ret)
goto deinit_inputs;
+ count += pf->dplls.num_outputs;
+ if (!pf->dplls.generic) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.sma,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_outputs;
+ count += ICE_DPLL_PIN_SW_NUM;
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.ufl,
+ count,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_sma;
+ count += ICE_DPLL_PIN_SW_NUM;
+ }
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.inputs,
+ pf->dplls.num_inputs);
+ if (ret)
+ goto deinit_ufl;
+ ret = ice_dpll_pin_ref_sync_register(pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM);
+ if (ret)
+ goto deinit_ufl;
+ } else {
+ count += pf->dplls.num_outputs + 2 * ICE_DPLL_PIN_SW_NUM;
}
- rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
- ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, count + pf->hw.pf_id,
&ice_dpll_rclk_ops);
if (ret)
- goto deinit_outputs;
+ goto deinit_ufl;
return 0;
+deinit_ufl:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.ufl,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_ufl_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
+deinit_sma:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.sma,
+ ICE_DPLL_PIN_SW_NUM,
+ &ice_dpll_pin_sma_ops,
+ pf->dplls.pps.dpll, pf->dplls.eec.dpll);
deinit_outputs:
ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
pf->dplls.num_outputs,
@@ -1977,7 +3246,7 @@ static void
ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
{
if (cgu)
- dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_unregister(d->dpll, d->ops, d);
dpll_device_put(d->dpll);
}
@@ -2011,12 +3280,17 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
+ const struct dpll_device_ops *ops = &ice_dpll_ops;
+
+ if (type == DPLL_TYPE_PPS && ice_dpll_is_pps_phase_monitor(pf))
+ ops = &ice_dpll_pom_ops;
ice_dpll_update_state(pf, d, true);
- ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ ret = dpll_device_register(d->dpll, type, ops, d);
if (ret) {
dpll_device_put(d->dpll);
return ret;
}
+ d->ops = ops;
}
return 0;
@@ -2053,7 +3327,7 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct kthread_worker *kworker;
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
- kworker = kthread_create_worker(0, "ice-dplls-%s",
+ kworker = kthread_run_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -2065,6 +3339,18 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
}
/**
+ * ice_dpll_phase_range_set - initialize phase adjust range helper
+ * @range: pointer to phase adjust range struct to be initialized
+ * @phase_adj: a value to be used as min(-)/max(+) boundary
+ */
+static void ice_dpll_phase_range_set(struct dpll_pin_phase_adjust_range *range,
+ u32 phase_adj)
+{
+ range->min = -phase_adj;
+ range->max = phase_adj;
+}
+
+/**
* ice_dpll_init_info_pins_generic - initializes generic pins info
* @pf: board private structure
* @input: if input pins initialized
@@ -2105,8 +3391,8 @@ static int ice_dpll_init_info_pins_generic(struct ice_pf *pf, bool input)
for (i = 0; i < pin_num; i++) {
pins[i].idx = i;
pins[i].prop.board_label = labels[i];
- pins[i].prop.phase_range.min = phase_adj_max;
- pins[i].prop.phase_range.max = -phase_adj_max;
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = cap;
pins[i].pf = pf;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
@@ -2152,6 +3438,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
+ u32 phase_adj_max;
u8 freq_supp_num;
bool input;
@@ -2159,18 +3446,22 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
case ICE_DPLL_PIN_TYPE_INPUT:
pins = pf->dplls.inputs;
num_pins = pf->dplls.num_inputs;
+ phase_adj_max = pf->dplls.input_phase_adj_max;
input = true;
break;
case ICE_DPLL_PIN_TYPE_OUTPUT:
pins = pf->dplls.outputs;
num_pins = pf->dplls.num_outputs;
+ phase_adj_max = pf->dplls.output_phase_adj_max;
input = false;
break;
default:
return -EINVAL;
}
- if (num_pins != ice_cgu_get_num_pins(hw, input))
+ if (num_pins != ice_cgu_get_num_pins(hw, input)) {
+ pf->dplls.generic = true;
return ice_dpll_init_info_pins_generic(pf, input);
+ }
for (i = 0; i < num_pins; i++) {
caps = 0;
@@ -2188,19 +3479,17 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
return ret;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
- pins[i].prop.phase_range.min =
- pf->dplls.input_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.input_phase_adj_max;
+ if (ice_dpll_is_sw_pin(pf, i, true))
+ pins[i].hidden = true;
} else {
- pins[i].prop.phase_range.min =
- pf->dplls.output_phase_adj_max;
- pins[i].prop.phase_range.max =
- -pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
+ if (ice_dpll_is_sw_pin(pf, i, false))
+ pins[i].hidden = true;
}
+ ice_dpll_phase_range_set(&pins[i].prop.phase_range,
+ phase_adj_max);
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
@@ -2210,6 +3499,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
pins[i].prop.freq_supported_num = freq_supp_num;
pins[i].pf = pf;
}
+ if (input)
+ ret = ice_dpll_init_ref_sync_inputs(pf);
return ret;
}
@@ -2237,6 +3528,91 @@ static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
}
/**
+ * ice_dpll_init_info_sw_pins - initializes software controlled pin information
+ * @pf: board private structure
+ *
+ * Init information for software controlled pins, cache them in
+ * pf->dplls.sma and pf->dplls.ufl.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_sw_pins(struct ice_pf *pf)
+{
+ u8 freq_supp_num, pin_abs_idx, input_idx_offset = 0;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll_pin *pin;
+ u32 phase_adj_max, caps;
+ int i, ret;
+
+ if (pf->hw.device_id == ICE_DEV_ID_E810C_QSFP)
+ input_idx_offset = ICE_E810_RCLK_PINS_NUM;
+ phase_adj_max = max(d->input_phase_adj_max, d->output_phase_adj_max);
+ caps = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->sma[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) + input_idx_offset;
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_sma[i];
+ pin->input = &d->inputs[pin_abs_idx];
+ if (pin->input->ref_sync)
+ pin->ref_sync = pin->input->ref_sync - pin_abs_idx;
+ pin->output = &d->outputs[ICE_DPLL_PIN_SW_OUTPUT_ABS(i)];
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ for (i = 0; i < ICE_DPLL_PIN_SW_NUM; i++) {
+ pin = &d->ufl[i];
+ pin->idx = i;
+ pin->prop.type = DPLL_PIN_TYPE_EXT;
+ pin->prop.capabilities = caps;
+ pin->pf = pf;
+ pin->prop.board_label = ice_dpll_sw_pin_ufl[i];
+ if (i == ICE_DPLL_PIN_SW_1_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_OUTPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_OUTPUT_ABS(i);
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ false,
+ &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->input = NULL;
+ pin->output = &d->outputs[pin_abs_idx];
+ } else if (i == ICE_DPLL_PIN_SW_2_IDX) {
+ pin->direction = DPLL_PIN_DIRECTION_INPUT;
+ pin_abs_idx = ICE_DPLL_PIN_SW_INPUT_ABS(i) +
+ input_idx_offset;
+ pin->output = NULL;
+ pin->input = &d->inputs[pin_abs_idx];
+ pin->prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(&pf->hw, pin_abs_idx,
+ true, &freq_supp_num);
+ pin->prop.freq_supported_num = freq_supp_num;
+ pin->prop.capabilities =
+ (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ caps);
+ }
+ ice_dpll_phase_range_set(&pin->prop.phase_range, phase_adj_max);
+ }
+ ret = ice_dpll_pin_state_update(pf, pin, ICE_DPLL_PIN_TYPE_SOFTWARE,
+ NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
* ice_dpll_init_pins_info - init pins info wrapper
* @pf: board private structure
* @pin_type: type of pins being initialized
@@ -2256,6 +3632,8 @@ ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
return ice_dpll_init_info_direct_pins(pf, pin_type);
case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
return ice_dpll_init_info_rclk_pin(pf);
+ case ICE_DPLL_PIN_TYPE_SOFTWARE:
+ return ice_dpll_init_info_sw_pins(pf);
default:
return -EINVAL;
}
@@ -2300,7 +3678,7 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
if (ret) {
dev_err(ice_pf_to_dev(pf),
"err:%d %s failed to read cgu abilities\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
return ret;
}
@@ -2308,8 +3686,10 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
dp->dpll_idx = abilities.pps_dpll_idx;
d->num_inputs = abilities.num_inputs;
d->num_outputs = abilities.num_outputs;
- d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
- d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj) &
+ ICE_AQC_GET_CGU_MAX_PHASE_ADJ;
alloc_size = sizeof(*d->inputs) * d->num_inputs;
d->inputs = kzalloc(alloc_size, GFP_KERNEL);
@@ -2340,6 +3720,9 @@ static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
if (ret)
goto deinit_info;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_SOFTWARE);
+ if (ret)
+ goto deinit_info;
}
ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
index c320f1bf7d6d..c0da03384ce9 100644
--- a/drivers/net/ethernet/intel/ice/ice_dpll.h
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -8,6 +8,18 @@
#define ICE_DPLL_RCLK_NUM_MAX 4
+/**
+ * enum ice_dpll_pin_sw - enumerate ice software pin indices:
+ * @ICE_DPLL_PIN_SW_1_IDX: index of first SW pin
+ * @ICE_DPLL_PIN_SW_2_IDX: index of second SW pin
+ * @ICE_DPLL_PIN_SW_NUM: number of SW pins in pair
+ */
+enum ice_dpll_pin_sw {
+ ICE_DPLL_PIN_SW_1_IDX,
+ ICE_DPLL_PIN_SW_2_IDX,
+ ICE_DPLL_PIN_SW_NUM
+};
+
/** ice_dpll_pin - store info about pins
* @pin: dpll pin structure
* @pf: pointer to pf, which has registered the dpll_pin
@@ -19,6 +31,8 @@
* @prop: pin properties
* @freq: current frequency of a pin
* @phase_adjust: current phase adjust value
+ * @phase_offset: monitored phase offset value
+ * @ref_sync: store id of reference sync pin
*/
struct ice_dpll_pin {
struct dpll_pin *pin;
@@ -31,7 +45,14 @@ struct ice_dpll_pin {
struct dpll_pin_properties prop;
u32 freq;
s32 phase_adjust;
+ struct ice_dpll_pin *input;
+ struct ice_dpll_pin *output;
+ enum dpll_pin_direction direction;
+ s64 phase_offset;
u8 status;
+ u8 ref_sync;
+ bool active;
+ bool hidden;
};
/** ice_dpll - store info required for DPLL control
@@ -47,8 +68,10 @@ struct ice_dpll_pin {
* @input_prio: priorities of each input
* @dpll_state: current dpll sync state
* @prev_dpll_state: last dpll sync state
+ * @phase_offset_monitor_period: period for phase offset monitor read frequency
* @active_input: pointer to active input pin
* @prev_input: pointer to previous active input pin
+ * @ops: holds the registered ops
*/
struct ice_dpll {
struct dpll_device *dpll;
@@ -64,8 +87,10 @@ struct ice_dpll {
enum dpll_lock_status dpll_state;
enum dpll_lock_status prev_dpll_state;
enum dpll_mode mode;
+ u32 phase_offset_monitor_period;
struct dpll_pin *active_input;
struct dpll_pin *prev_input;
+ const struct dpll_device_ops *ops;
};
/** ice_dplls - store info required for CCU (clock controlling unit)
@@ -84,6 +109,7 @@ struct ice_dpll {
* @clock_id: clock_id of dplls
* @input_phase_adj_max: max phase adjust value for an input pins
* @output_phase_adj_max: max phase adjust value for an output pins
+ * @periodic_counter: counter of periodic work executions
*/
struct ice_dplls {
struct kthread_worker *kworker;
@@ -93,14 +119,19 @@ struct ice_dplls {
struct ice_dpll pps;
struct ice_dpll_pin *inputs;
struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin sma[ICE_DPLL_PIN_SW_NUM];
+ struct ice_dpll_pin ufl[ICE_DPLL_PIN_SW_NUM];
struct ice_dpll_pin rclk;
u8 num_inputs;
u8 num_outputs;
- int cgu_state_acq_err_num;
+ u8 sma_data;
u8 base_rclk_idx;
+ int cgu_state_acq_err_num;
u64 clock_id;
s32 input_phase_adj_max;
s32 output_phase_adj_max;
+ u32 periodic_counter;
+ bool generic;
};
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index fb527434b58b..2e4f0969035f 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -29,6 +29,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, false);
netif_addr_lock_bh(netdev);
__dev_uc_unsync(netdev, NULL);
@@ -38,8 +39,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_vlan_zero;
- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
- ICE_FLTR_RX))
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
@@ -50,9 +50,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (vlan_ops->dis_rx_filtering(uplink_vsi))
goto err_vlan_filtering;
- if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_uplink;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
@@ -64,8 +61,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
err_up:
ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
err_vlan_filtering:
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
@@ -251,6 +246,10 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
u64 cd_cmd, dst_vsi;
if (!dst) {
+ struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
+
+ if (unlikely(eth->h_proto == htons(ETH_P_LLDP)))
+ return;
cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
} else {
@@ -276,7 +275,6 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
ICE_FLTR_TX);
@@ -285,6 +283,7 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
+ ice_vsi_cfg_sw_lldp(uplink_vsi, true, true);
}
/**
@@ -509,10 +508,14 @@ err_create_repr:
*/
int ice_eswitch_attach_vf(struct ice_pf *pf, struct ice_vf *vf)
{
- struct ice_repr *repr = ice_repr_create_vf(vf);
struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_repr *repr;
int err;
+ if (!ice_is_eswitch_mode_switchdev(pf))
+ return 0;
+
+ repr = ice_repr_create_vf(vf);
if (IS_ERR(repr))
return PTR_ERR(repr);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index ac7db100e2cd..5c7dcf21b222 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -5,7 +5,7 @@
#define _ICE_ESWITCH_H_
#include <net/devlink.h>
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#ifdef CONFIG_ICE_SWITCHDEV
void ice_eswitch_detach_vf(struct ice_pf *pf, struct ice_vf *vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 3072634bf049..969d4f8f9c02 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -10,6 +10,7 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include <net/dcbnl.h>
+#include <net/libeth/rx.h>
struct ice_stats {
char stat_string[ETH_GSTRING_LEN];
@@ -340,7 +341,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
ICE_FLAG_VF_TRUE_PROMISC_ENA),
ICE_PRIV_FLAG("mdd-auto-reset-vf", ICE_FLAG_MDD_AUTO_RESET_VF),
ICE_PRIV_FLAG("vf-vlan-pruning", ICE_FLAG_VF_VLAN_PRUNING),
- ICE_PRIV_FLAG("legacy-rx", ICE_FLAG_LEGACY_RX),
};
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
@@ -667,7 +667,8 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
port_topology->serdes_lane_count = 4;
- else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G ||
+ max_speed == ICE_AQC_PORT_OPT_MAX_LANE_40G)
port_topology->serdes_lane_count = 2;
else
port_topology->serdes_lane_count = 1;
@@ -710,7 +711,6 @@ static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
{ ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 },
{ ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf },
{ ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf },
- { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate },
{ ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf },
{ ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf },
{ ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc },
@@ -794,8 +794,7 @@ static int ice_get_extended_regs(struct net_device *netdev, void *p)
static void
ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
u32 *regs_buf = (u32 *)p;
unsigned int i;
@@ -810,8 +809,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
static u32 ice_get_msglevel(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#ifndef CONFIG_DYNAMIC_DEBUG
if (pf->hw.debug_mask)
@@ -824,8 +822,7 @@ static u32 ice_get_msglevel(struct net_device *netdev)
static void ice_set_msglevel(struct net_device *netdev, u32 data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#ifndef CONFIG_DYNAMIC_DEBUG
if (ICE_DBG_USER & data)
@@ -837,10 +834,17 @@ static void ice_set_msglevel(struct net_device *netdev, u32 data)
#endif /* !CONFIG_DYNAMIC_DEBUG */
}
+static void ice_get_link_ext_stats(struct net_device *netdev,
+ struct ethtool_link_ext_stats *stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ stats->link_down_events = pf->link_down_events;
+}
+
static int ice_get_eeprom_len(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
return (int)pf->hw.flash.flash_size;
}
@@ -849,9 +853,7 @@ static int
ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
u8 *bytes)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
struct device *dev;
int ret;
@@ -870,7 +872,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
ret = ice_acquire_nvm(hw, ICE_RES_READ);
if (ret) {
dev_err(dev, "ice_acquire_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -878,7 +880,7 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
false);
if (ret) {
dev_err(dev, "ice_read_flat_nvm failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto release;
}
@@ -950,8 +952,7 @@ static u64 ice_link_test(struct net_device *netdev)
*/
static u64 ice_eeprom_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
netdev_info(netdev, "EEPROM test\n");
return !!(ice_nvm_validate_checksum(&pf->hw));
@@ -1230,8 +1231,9 @@ static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
*/
static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_buf;
int valid_frames, i;
+ struct page *page;
u8 *received_buf;
valid_frames = 0;
@@ -1246,8 +1248,10 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
cpu_to_le16(BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)))))
continue;
- rx_buf = &rx_ring->rx_buf[i];
- received_buf = page_address(rx_buf->page) + rx_buf->page_offset;
+ rx_buf = &rx_ring->rx_fqes[i];
+ page = __netmem_to_page(rx_buf->netmem);
+ received_buf = page_address(page) + rx_buf->offset +
+ page->pp->p.offset;
if (ice_lbtest_check_frame(received_buf))
valid_frames++;
@@ -1265,9 +1269,8 @@ static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
*/
static u64 ice_loopback_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
- struct ice_pf *pf = orig_vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *test_vsi;
u8 *tx_frame __free(kfree) = NULL;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
@@ -1356,8 +1359,7 @@ lbtest_vsi_close:
*/
static u64 ice_intr_test(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
u16 swic_old = pf->sw_int_count;
netdev_info(netdev, "interrupt test\n");
@@ -1385,9 +1387,8 @@ static void
ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
u64 *data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
bool if_running = netif_running(netdev);
- struct ice_pf *pf = np->vsi->back;
struct device *dev;
dev = ice_pf_to_dev(pf);
@@ -1711,9 +1712,7 @@ static int ice_nway_reset(struct net_device *netdev)
*/
static u32 ice_get_priv_flags(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
u32 i, ret_flags = 0;
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++) {
@@ -1819,7 +1818,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
- ice_cfg_sw_lldp(vsi, false, false);
+ ice_cfg_sw_rx_lldp(vsi->back, false);
/* AQ command to start FW LLDP agent will return an
* error if the agent is already started
@@ -1860,10 +1859,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
ice_nway_reset(netdev);
}
}
- if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
- /* down and up VSI so that changes of Rx cfg are reflected. */
- ice_down_up(vsi);
- }
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
*/
@@ -2789,14 +2784,7 @@ done:
return err;
}
-/**
- * ice_parse_hdrs - parses headers from RSS hash input
- * @nfc: ethtool rxnfc command
- *
- * This function parses the rxnfc command and returns intended
- * header types for RSS configuration
- */
-static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
+static u32 ice_parse_hdrs(const struct ethtool_rxfh_fields *nfc)
{
u32 hdrs = ICE_FLOW_SEG_HDR_NONE;
@@ -2861,15 +2849,7 @@ static u32 ice_parse_hdrs(struct ethtool_rxnfc *nfc)
return hdrs;
}
-/**
- * ice_parse_hash_flds - parses hash fields from RSS hash input
- * @nfc: ethtool rxnfc command
- * @symm: true if Symmetric Topelitz is set
- *
- * This function parses the rxnfc command and returns intended
- * hash fields for RSS configuration
- */
-static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
+static u64 ice_parse_hash_flds(const struct ethtool_rxfh_fields *nfc, bool symm)
{
u64 hfld = ICE_HASH_INVALID;
@@ -2966,16 +2946,13 @@ static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
return hfld;
}
-/**
- * ice_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- */
static int
-ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+ice_set_rxfh_fields(struct net_device *netdev,
+ const struct ethtool_rxfh_fields *nfc,
+ struct netlink_ext_ack *extack)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_rss_hash_cfg cfg;
struct device *dev;
@@ -3021,14 +2998,11 @@ ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
return 0;
}
-/**
- * ice_get_rss_hash_opt - Retrieve hash fields for a given flow-type
- * @vsi: the VSI being configured
- * @nfc: ethtool rxnfc command
- */
-static void
-ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
+static int
+ice_get_rxfh_fields(struct net_device *netdev, struct ethtool_rxfh_fields *nfc)
{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct device *dev;
u64 hash_flds;
@@ -3041,21 +3015,21 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
if (ice_is_safe_mode(pf)) {
dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hdrs = ice_parse_hdrs(nfc);
if (hdrs == ICE_FLOW_SEG_HDR_NONE) {
dev_dbg(dev, "Header type is not valid, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
if (hash_flds == ICE_HASH_INVALID) {
dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
vsi->vsi_num);
- return;
+ return 0;
}
if (hash_flds & ICE_FLOW_HASH_FLD_IPV4_SA ||
@@ -3082,6 +3056,8 @@ ice_get_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
hash_flds & ICE_FLOW_HASH_FLD_GTPU_UP_TEID ||
hash_flds & ICE_FLOW_HASH_FLD_GTPU_DWN_TEID)
nfc->data |= (u64)RXH_GTP_TEID;
+
+ return 0;
}
/**
@@ -3101,8 +3077,6 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return ice_add_fdir_ethtool(vsi, cmd);
case ETHTOOL_SRXCLSRLDEL:
return ice_del_fdir_ethtool(vsi, cmd);
- case ETHTOOL_SRXFH:
- return ice_set_rss_hash_opt(vsi, cmd);
default:
break;
}
@@ -3110,6 +3084,20 @@ static int ice_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
}
/**
+ * ice_get_rx_ring_count - get RX ring count
+ * @netdev: network interface device structure
+ *
+ * Return: number of RX rings.
+ */
+static u32 ice_get_rx_ring_count(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+
+ return vsi->rss_size;
+}
+
+/**
* ice_get_rxnfc - command to get Rx flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -3129,10 +3117,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
hw = &vsi->back->hw;
switch (cmd->cmd) {
- case ETHTOOL_GRXRINGS:
- cmd->data = vsi->rss_size;
- ret = 0;
- break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = hw->fdir_active_fltr;
/* report total rule count */
@@ -3145,10 +3129,6 @@ ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ice_get_fdir_fltr_ids(hw, cmd, (u32 *)rule_locs);
break;
- case ETHTOOL_GRXFH:
- ice_get_rss_hash_opt(vsi, cmd);
- ret = 0;
- break;
default:
break;
}
@@ -3163,9 +3143,11 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ struct ice_hw *hw;
- ring->rx_max_pending = ICE_MAX_NUM_DESC;
- ring->tx_max_pending = ICE_MAX_NUM_DESC;
+ hw = &vsi->back->hw;
+ ring->rx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
+ ring->tx_max_pending = ICE_MAX_NUM_DESC_BY_MAC(hw);
if (vsi->tx_rings && vsi->rx_rings) {
ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0]->count;
@@ -3179,6 +3161,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
ring->rx_jumbo_max_pending = 0;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
+
+ kernel_ring->tcp_data_split = vsi->hsplit ?
+ ETHTOOL_TCP_DATA_SPLIT_ENABLED :
+ ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
static int
@@ -3193,15 +3179,17 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
+ struct ice_hw *hw = &pf->hw;
u16 new_rx_cnt, new_tx_cnt;
+ bool hsplit;
- if (ring->tx_pending > ICE_MAX_NUM_DESC ||
+ if (ring->tx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->tx_pending < ICE_MIN_NUM_DESC ||
- ring->rx_pending > ICE_MAX_NUM_DESC ||
+ ring->rx_pending > ICE_MAX_NUM_DESC_BY_MAC(hw) ||
ring->rx_pending < ICE_MIN_NUM_DESC) {
netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
ring->tx_pending, ring->rx_pending,
- ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
+ ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC_BY_MAC(hw),
ICE_REQ_DESC_MULTIPLE);
return -EINVAL;
}
@@ -3219,9 +3207,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
new_rx_cnt);
+ hsplit = kernel_ring->tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED;
+
/* if nothing to do return success */
if (new_tx_cnt == vsi->tx_rings[0]->count &&
- new_rx_cnt == vsi->rx_rings[0]->count) {
+ new_rx_cnt == vsi->rx_rings[0]->count &&
+ hsplit == vsi->hsplit) {
netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
return 0;
}
@@ -3251,6 +3242,8 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
+ vsi->hsplit = hsplit;
+
netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
goto done;
}
@@ -3274,6 +3267,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tstamp_ring = NULL;
tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
@@ -3333,7 +3327,8 @@ process_rx:
rx_rings[i].count = new_rx_cnt;
rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
- rx_rings[i].rx_buf = NULL;
+ rx_rings[i].xdp_buf = NULL;
+
/* this is to allow wr32 to have something to write to
* during early allocation of Rx buffers
*/
@@ -3342,10 +3337,6 @@ process_rx:
err = ice_setup_rx_ring(&rx_rings[i]);
if (err)
goto rx_unwind;
-
- /* allocate Rx buffers */
- err = ice_alloc_rx_bufs(&rx_rings[i],
- ICE_RX_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
@@ -3359,6 +3350,8 @@ rx_unwind:
}
process_link:
+ vsi->hsplit = hsplit;
+
/* Bring interface down, copy in the new ring info, then restore the
* interface. if VSI is up, bring it down and then back up
*/
@@ -3558,15 +3551,15 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
} else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
err = -EAGAIN;
}
@@ -3608,11 +3601,10 @@ static int
ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- u32 rss_context = rxfh->rss_context;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u16 qcount, offset;
- int err, num_tc, i;
+ int err, i;
u8 *lut;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
@@ -3620,24 +3612,8 @@ ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
return -EOPNOTSUPP;
}
- if (rss_context && !ice_is_adq_active(pf)) {
- netdev_err(netdev, "RSS context cannot be non-zero when ADQ is not configured.\n");
- return -EINVAL;
- }
-
- qcount = vsi->mqprio_qopt.qopt.count[rss_context];
- offset = vsi->mqprio_qopt.qopt.offset[rss_context];
-
- if (rss_context && ice_is_adq_active(pf)) {
- num_tc = vsi->mqprio_qopt.qopt.num_tc;
- if (rss_context >= num_tc) {
- netdev_err(netdev, "RSS context:%d > num_tc:%d\n",
- rss_context, num_tc);
- return -EINVAL;
- }
- /* Use channel VSI of given TC */
- vsi = vsi->tc_map_vsi[rss_context];
- }
+ qcount = vsi->mqprio_qopt.qopt.count[0];
+ offset = vsi->mqprio_qopt.qopt.offset[0];
rxfh->hfunc = ETH_RSS_HASH_TOP;
if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
@@ -3697,9 +3673,6 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return -EOPNOTSUPP;
-
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
/* RSS not supported return error here */
netdev_warn(netdev, "RSS is not configured on this VSI!\n");
@@ -3789,8 +3762,7 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
*/
static int ice_get_max_txq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_txq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_txq);
}
/**
@@ -3799,8 +3771,7 @@ static int ice_get_max_txq(struct ice_pf *pf)
*/
static int ice_get_max_rxq(struct ice_pf *pf)
{
- return min3(pf->num_lan_msix, (u16)num_online_cpus(),
- (u16)pf->hw.func_caps.common_cap.num_rxq);
+ return min(num_online_cpus(), pf->hw.func_caps.common_cap.num_rxq);
}
/**
@@ -3818,8 +3789,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
- combined++;
+ combined += min(q_vector->num_ring_tx, q_vector->num_ring_rx);
}
return combined;
@@ -3900,7 +3870,7 @@ static int ice_vsi_set_dflt_rss_lut(struct ice_vsi *vsi, int req_rss_size)
err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
if (err)
dev_err(dev, "Cannot set RSS lut, err %d aq_err %s\n", err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
kfree(lut);
return err;
@@ -3968,11 +3938,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- if (pf->adev) {
+ if (pf->cdev_info && pf->cdev_info->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&pf->cdev_info->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (pf->cdev_info->adev->dev.driver) {
netdev_err(dev, "Cannot change channels when RDMA is active\n");
ret = -EBUSY;
goto adev_unlock;
@@ -3991,7 +3961,7 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&pf->cdev_info->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return ret;
@@ -4452,9 +4422,7 @@ static int
ice_get_module_info(struct net_device *netdev,
struct ethtool_modinfo *modinfo)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_hw *hw = &pf->hw;
u8 sff8472_comp = 0;
u8 sff8472_swap = 0;
@@ -4526,12 +4494,10 @@ static int
ice_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
#define SFF_READ_BLOCK_SIZE 8
u8 value[SFF_READ_BLOCK_SIZE] = { 0 };
u8 addr = ICE_I2C_EEPROM_DEV_ADDR;
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
bool is_sfp = false;
unsigned int i, j;
@@ -4659,10 +4625,12 @@ static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
* ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
* @netdev: network interface device structure
* @fec_stats: buffer to hold FEC statistics for given port
+ * @hist: buffer to put FEC histogram statistics for given port
*
*/
static void ice_get_fec_stats(struct net_device *netdev,
- struct ethtool_fec_stats *fec_stats)
+ struct ethtool_fec_stats *fec_stats,
+ struct ethtool_fec_hist *hist)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_topology port_topology;
@@ -4694,6 +4662,98 @@ static void ice_get_fec_stats(struct net_device *netdev,
pi->lport, err);
}
+static void ice_get_eth_mac_stats(struct net_device *netdev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ mac_stats->FramesTransmittedOK = ps->eth.tx_unicast +
+ ps->eth.tx_multicast +
+ ps->eth.tx_broadcast;
+ mac_stats->FramesReceivedOK = ps->eth.rx_unicast +
+ ps->eth.rx_multicast +
+ ps->eth.rx_broadcast;
+ mac_stats->FrameCheckSequenceErrors = ps->crc_errors;
+ mac_stats->OctetsTransmittedOK = ps->eth.tx_bytes;
+ mac_stats->OctetsReceivedOK = ps->eth.rx_bytes;
+ mac_stats->MulticastFramesXmittedOK = ps->eth.tx_multicast;
+ mac_stats->BroadcastFramesXmittedOK = ps->eth.tx_broadcast;
+ mac_stats->MulticastFramesReceivedOK = ps->eth.rx_multicast;
+ mac_stats->BroadcastFramesReceivedOK = ps->eth.rx_broadcast;
+ mac_stats->InRangeLengthErrors = ps->rx_len_errors;
+ mac_stats->FrameTooLongErrors = ps->rx_oversize;
+}
+
+static void ice_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ pause_stats->tx_pause_frames = ps->link_xon_tx + ps->link_xoff_tx;
+ pause_stats->rx_pause_frames = ps->link_xon_rx + ps->link_xoff_rx;
+}
+
+static const struct ethtool_rmon_hist_range ice_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, 9522 },
+ {}
+};
+
+static void ice_get_rmon_stats(struct net_device *netdev,
+ struct ethtool_rmon_stats *rmon,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_hw_port_stats *ps = &pf->stats;
+
+ rmon->undersize_pkts = ps->rx_undersize;
+ rmon->oversize_pkts = ps->rx_oversize;
+ rmon->fragments = ps->rx_fragments;
+ rmon->jabbers = ps->rx_jabber;
+
+ rmon->hist[0] = ps->rx_size_64;
+ rmon->hist[1] = ps->rx_size_127;
+ rmon->hist[2] = ps->rx_size_255;
+ rmon->hist[3] = ps->rx_size_511;
+ rmon->hist[4] = ps->rx_size_1023;
+ rmon->hist[5] = ps->rx_size_1522;
+ rmon->hist[6] = ps->rx_size_big;
+
+ rmon->hist_tx[0] = ps->tx_size_64;
+ rmon->hist_tx[1] = ps->tx_size_127;
+ rmon->hist_tx[2] = ps->tx_size_255;
+ rmon->hist_tx[3] = ps->tx_size_511;
+ rmon->hist_tx[4] = ps->tx_size_1023;
+ rmon->hist_tx[5] = ps->tx_size_1522;
+ rmon->hist_tx[6] = ps->tx_size_big;
+
+ *ranges = ice_rmon_ranges;
+}
+
+/* ice_get_ts_stats - provide timestamping stats
+ * @netdev: the netdevice pointer from ethtool
+ * @ts_stats: the ethtool data structure to fill in
+ */
+static void ice_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_ptp *ptp = &pf->ptp;
+
+ ts_stats->pkts = ptp->tx_hwtstamp_good;
+ ts_stats->err = ptp->tx_hwtstamp_skipped +
+ ptp->tx_hwtstamp_flushed +
+ ptp->tx_hwtstamp_discarded;
+ ts_stats->lost = ptp->tx_hwtstamp_timeouts;
+}
+
#define ICE_ETHTOOL_PFR (ETH_RESET_IRQ | ETH_RESET_DMA | \
ETH_RESET_FILTER | ETH_RESET_OFFLOAD)
@@ -4715,8 +4775,7 @@ static void ice_get_fec_stats(struct net_device *netdev,
*/
static int ice_ethtool_reset(struct net_device *dev, u32 *flags)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
enum ice_reset_req reset;
switch (*flags) {
@@ -4770,15 +4829,18 @@ static int ice_repr_ethtool_reset(struct net_device *dev, u32 *flags)
}
static const struct ethtool_ops ice_ethtool_ops = {
- .cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE |
ETHTOOL_COALESCE_RX_USECS_HIGH,
- .cap_rss_sym_xor_supported = true,
- .rxfh_per_ctx_key = true,
+ .supported_input_xfrm = RXH_XFRM_SYM_XOR,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
.get_fec_stats = ice_get_fec_stats,
+ .get_eth_mac_stats = ice_get_eth_mac_stats,
+ .get_pause_stats = ice_get_pause_stats,
+ .get_rmon_stats = ice_get_rmon_stats,
+ .get_ts_stats = ice_get_ts_stats,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
@@ -4788,6 +4850,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
+ .get_link_ext_stats = ice_get_link_ext_stats,
.get_eeprom_len = ice_get_eeprom_len,
.get_eeprom = ice_get_eeprom,
.get_coalesce = ice_get_coalesce,
@@ -4800,6 +4863,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_sset_count = ice_get_sset_count,
.get_rxnfc = ice_get_rxnfc,
.set_rxnfc = ice_set_rxnfc,
+ .get_rx_ring_count = ice_get_rx_ring_count,
.get_ringparam = ice_get_ringparam,
.set_ringparam = ice_set_ringparam,
.nway_reset = ice_nway_reset,
@@ -4810,6 +4874,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_rxfh_indir_size = ice_get_rxfh_indir_size,
.get_rxfh = ice_get_rxfh,
.set_rxfh = ice_set_rxfh,
+ .get_rxfh_fields = ice_get_rxfh_fields,
+ .set_rxfh_fields = ice_set_rxfh_fields,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ice_get_ts_info,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index 8f2ad1c172c0..23b2cfbc9684 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -15,7 +15,6 @@ struct ice_serdes_equalization_to_ethtool {
int rx_equ_post1;
int rx_equ_bflf;
int rx_equ_bfhf;
- int rx_equ_drate;
int rx_equ_ctle_gainhf;
int rx_equ_ctle_gainlf;
int rx_equ_ctle_gaindc;
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index ee9862ddfe15..aceec184e89b 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1605,22 +1605,19 @@ void ice_fdir_replay_fltrs(struct ice_pf *pf)
*/
int ice_fdir_create_dflt_rules(struct ice_pf *pf)
{
+ static const enum ice_fltr_ptype dflt_rules[] = {
+ ICE_FLTR_PTYPE_NONF_IPV4_TCP, ICE_FLTR_PTYPE_NONF_IPV4_UDP,
+ ICE_FLTR_PTYPE_NONF_IPV6_TCP, ICE_FLTR_PTYPE_NONF_IPV6_UDP,
+ };
int err;
/* Create perfect TCP and UDP rules in hardware. */
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
- if (err)
- return err;
+ for (int i = 0; i < ARRAY_SIZE(dflt_rules); i++) {
+ err = ice_create_init_fdir_rule(pf, dflt_rules[i]);
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_TCP);
- if (err)
- return err;
-
- err = ice_create_init_fdir_rule(pf, ICE_FLTR_PTYPE_NONF_IPV6_UDP);
+ if (err)
+ break;
+ }
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 26b357c0ae15..b29fbdec9442 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -1121,7 +1121,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* ice_fdir_has_frag - does flow type have 2 ptypes
* @flow: flow ptype
*
- * returns true is there is a fragment packet for this ptype
+ * Return: true if there is a fragment packet for this ptype
*/
bool ice_fdir_has_frag(enum ice_fltr_ptype flow)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index ed95072ca6e3..c0dbec369366 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -574,9 +574,7 @@ ice_destroy_tunnel_end:
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
u16 index;
@@ -598,9 +596,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
unsigned int idx, struct udp_tunnel_info *ti)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
enum ice_tunnel_type tnl_type;
int status;
@@ -1479,7 +1475,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
hw->blk[blk].masks.count = per_pf;
- hw->blk[blk].masks.first = hw->pf_id * per_pf;
+ hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
@@ -3043,16 +3039,16 @@ ice_disable_fd_swap(struct ice_hw *hw, u8 prof_id)
* the ID value used here.
*/
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap)
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap)
{
- u32 bytes = DIV_ROUND_UP(ICE_FLOW_PTYPE_MAX, BITS_PER_BYTE);
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- u8 byte = 0;
- u8 prof_id;
int status;
+ u8 prof_id;
+ u16 ptype;
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
@@ -3102,57 +3098,35 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
prof->context = 0;
/* build list of ptgs */
- while (bytes && prof->ptg_cnt < ICE_MAX_PTG_PER_PROFILE) {
- u8 bit;
+ for_each_set_bit(ptype, ptypes, ICE_FLOW_PTYPE_MAX) {
+ u8 ptg;
- if (!ptypes[byte]) {
- bytes--;
- byte++;
+ /* The package should place all ptypes in a non-zero
+ * PTG, so the following call should never fail.
+ */
+ if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
continue;
- }
-
- /* Examine 8 bits per byte */
- for_each_set_bit(bit, (unsigned long *)&ptypes[byte],
- BITS_PER_BYTE) {
- u16 ptype;
- u8 ptg;
- ptype = byte * BITS_PER_BYTE + bit;
-
- /* The package should place all ptypes in a non-zero
- * PTG, so the following call should never fail.
- */
- if (ice_ptg_find_ptype(hw, blk, ptype, &ptg))
- continue;
+ /* If PTG is already added, skip and continue */
+ if (test_bit(ptg, ptgs_used))
+ continue;
- /* If PTG is already added, skip and continue */
- if (test_bit(ptg, ptgs_used))
- continue;
+ set_bit(ptg, ptgs_used);
+ /* Check to see there are any attributes for this ptype, and
+ * add them if found.
+ */
+ status = ice_add_prof_attrib(prof, ptg, ptype, attr, attr_cnt);
+ if (status == -ENOSPC)
+ break;
+ if (status) {
+ /* This is simple a ptype/PTG with no attribute */
+ prof->ptg[prof->ptg_cnt] = ptg;
+ prof->attr[prof->ptg_cnt].flags = 0;
+ prof->attr[prof->ptg_cnt].mask = 0;
- __set_bit(ptg, ptgs_used);
- /* Check to see there are any attributes for
- * this PTYPE, and add them if found.
- */
- status = ice_add_prof_attrib(prof, ptg, ptype,
- attr, attr_cnt);
- if (status == -ENOSPC)
+ if (++prof->ptg_cnt >= ICE_MAX_PTG_PER_PROFILE)
break;
- if (status) {
- /* This is simple a PTYPE/PTG with no
- * attribute
- */
- prof->ptg[prof->ptg_cnt] = ptg;
- prof->attr[prof->ptg_cnt].flags = 0;
- prof->attr[prof->ptg_cnt].mask = 0;
-
- if (++prof->ptg_cnt >=
- ICE_MAX_PTG_PER_PROFILE)
- break;
- }
}
-
- bytes--;
- byte++;
}
list_add(&prof->list, &hw->blk[blk].es.prof_map);
@@ -3604,6 +3578,19 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
}
/**
+ * ice_set_tcam_flags - set TCAM flag don't care mask
+ * @mask: mask for flags
+ * @dc_mask: pointer to the don't care mask
+ */
+static void ice_set_tcam_flags(u16 mask, u8 dc_mask[ICE_TCAM_KEY_VAL_SZ])
+{
+ u16 inverted_mask = ~mask;
+
+ /* flags are lowest u16 */
+ put_unaligned_le16(inverted_mask, dc_mask);
+}
+
+/**
* ice_rem_chg_tcam_ent - remove a specific TCAM entry from change list
* @hw: pointer to the HW struct
* @idx: the index of the TCAM entry to remove
@@ -3673,6 +3660,9 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
if (!p)
return -ENOMEM;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(tcam->attr.mask, dc_msk);
+
status = ice_tcam_write_entry(hw, blk, tcam->tcam_idx, tcam->prof_id,
tcam->ptg, vsig, 0, tcam->attr.flags,
vl_msk, dc_msk, nm_msk);
@@ -3699,6 +3689,34 @@ err_ice_prof_tcam_ena_dis:
}
/**
+ * ice_ptg_attr_in_use - determine if PTG and attribute pair is in use
+ * @ptg_attr: pointer to the PTG and attribute pair to check
+ * @ptgs_used: bitmap that denotes which PTGs are in use
+ * @attr_used: array of PTG and attributes pairs already used
+ * @attr_cnt: count of entries in the attr_used array
+ *
+ * Return: true if the PTG and attribute pair is in use, false otherwise.
+ */
+static bool
+ice_ptg_attr_in_use(struct ice_tcam_inf *ptg_attr, unsigned long *ptgs_used,
+ struct ice_tcam_inf *attr_used[], u16 attr_cnt)
+{
+ u16 i;
+
+ if (!test_bit(ptg_attr->ptg, ptgs_used))
+ return false;
+
+ /* the PTG is used, so now look for correct attributes */
+ for (i = 0; i < attr_cnt; i++)
+ if (attr_used[i]->ptg == ptg_attr->ptg &&
+ attr_used[i]->attr.flags == ptg_attr->attr.flags &&
+ attr_used[i]->attr.mask == ptg_attr->attr.mask)
+ return true;
+
+ return false;
+}
+
+/**
* ice_adj_prof_priorities - adjust profile based on priorities
* @hw: pointer to the HW struct
* @blk: hardware block
@@ -3710,10 +3728,16 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct list_head *chg)
{
DECLARE_BITMAP(ptgs_used, ICE_XLT1_CNT);
+ struct ice_tcam_inf **attr_used;
struct ice_vsig_prof *t;
- int status;
+ u16 attr_used_cnt = 0;
+ int status = 0;
u16 idx;
+ attr_used = kcalloc(ICE_MAX_PTG_ATTRS, sizeof(*attr_used), GFP_KERNEL);
+ if (!attr_used)
+ return -ENOMEM;
+
bitmap_zero(ptgs_used, ICE_XLT1_CNT);
idx = vsig & ICE_VSIG_IDX_M;
@@ -3731,11 +3755,15 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
u16 i;
for (i = 0; i < t->tcam_count; i++) {
+ bool used;
+
/* Scan the priorities from newest to oldest.
* Make sure that the newest profiles take priority.
*/
- if (test_bit(t->tcam[i].ptg, ptgs_used) &&
- t->tcam[i].in_use) {
+ used = ice_ptg_attr_in_use(&t->tcam[i], ptgs_used,
+ attr_used, attr_used_cnt);
+
+ if (used && t->tcam[i].in_use) {
/* need to mark this PTG as never match, as it
* was already in use and therefore duplicate
* (and lower priority)
@@ -3745,9 +3773,8 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
&t->tcam[i],
chg);
if (status)
- return status;
- } else if (!test_bit(t->tcam[i].ptg, ptgs_used) &&
- !t->tcam[i].in_use) {
+ goto free_attr_used;
+ } else if (!used && !t->tcam[i].in_use) {
/* need to enable this PTG, as it in not in use
* and not enabled (highest priority)
*/
@@ -3756,15 +3783,21 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
&t->tcam[i],
chg);
if (status)
- return status;
+ goto free_attr_used;
}
/* keep track of used ptgs */
- __set_bit(t->tcam[i].ptg, ptgs_used);
+ set_bit(t->tcam[i].ptg, ptgs_used);
+ if (attr_used_cnt < ICE_MAX_PTG_ATTRS)
+ attr_used[attr_used_cnt++] = &t->tcam[i];
+ else
+ ice_debug(hw, ICE_DBG_INIT, "Warn: ICE_MAX_PTG_ATTRS exceeded\n");
}
}
- return 0;
+free_attr_used:
+ kfree(attr_used);
+ return status;
}
/**
@@ -3847,11 +3880,15 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
p->vsig = vsig;
p->tcam_idx = t->tcam[i].tcam_idx;
+ /* set don't care masks for TCAM flags */
+ ice_set_tcam_flags(t->tcam[i].attr.mask, dc_msk);
+
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, t->tcam[i].tcam_idx,
t->tcam[i].prof_id,
- t->tcam[i].ptg, vsig, 0, 0,
- vl_msk, dc_msk, nm_msk);
+ t->tcam[i].ptg, vsig, 0,
+ t->tcam[i].attr.flags, vl_msk,
+ dc_msk, nm_msk);
if (status) {
devm_kfree(ice_hw_to_dev(hw), p);
goto err_ice_add_prof_id_vsig;
@@ -4165,9 +4202,6 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
u16 vsi_num;
int status;
- if (blk != ICE_BLK_FD)
- return -EINVAL;
-
vsi_num = ice_get_hw_vsi_num(hw, dest_vsi);
status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
if (status) {
@@ -4176,6 +4210,9 @@ ice_flow_assoc_fdir_prof(struct ice_hw *hw, enum ice_block blk,
return status;
}
+ if (blk != ICE_BLK_FD)
+ return 0;
+
vsi_num = ice_get_hw_vsi_num(hw, fdir_vsi);
status = ice_add_prof_id_flow(hw, blk, vsi_num, hdl);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 28b0897adf32..ee5d9f9c9d53 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -39,9 +39,10 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype);
/* XLT2/VSI group functions */
int
-ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
- const struct ice_ptype_attributes *attr, u16 attr_cnt,
- struct ice_fv_word *es, u16 *masks, bool symm, bool fd_swap);
+ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
+ unsigned long *ptypes, const struct ice_ptype_attributes *attr,
+ u16 attr_cnt, struct ice_fv_word *es, u16 *masks, bool symm,
+ bool fd_swap);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 817beca591e0..80c9e7c749c2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -187,6 +187,7 @@ struct ice_prof_map {
};
#define ICE_INVALID_TCAM 0xFFFF
+#define ICE_MAX_PTG_ATTRS 1024
struct ice_tcam_inf {
u16 tcam_idx;
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index d97b751052f2..c9b6d0a84bd1 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -5,6 +5,38 @@
#include "ice_flow.h"
#include <net/gre.h>
+/* Size of known protocol header fields */
+#define ICE_FLOW_FLD_SZ_ETH_TYPE 2
+#define ICE_FLOW_FLD_SZ_VLAN 2
+#define ICE_FLOW_FLD_SZ_IPV4_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_ADDR 16
+#define ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR 4
+#define ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR 6
+#define ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR 8
+#define ICE_FLOW_FLD_SZ_IPV4_ID 2
+#define ICE_FLOW_FLD_SZ_IPV6_ID 4
+#define ICE_FLOW_FLD_SZ_IP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_TCP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_UDP_CHKSUM 2
+#define ICE_FLOW_FLD_SZ_SCTP_CHKSUM 4
+#define ICE_FLOW_FLD_SZ_IP_DSCP 1
+#define ICE_FLOW_FLD_SZ_IP_TTL 1
+#define ICE_FLOW_FLD_SZ_IP_PROT 1
+#define ICE_FLOW_FLD_SZ_PORT 2
+#define ICE_FLOW_FLD_SZ_TCP_FLAGS 1
+#define ICE_FLOW_FLD_SZ_ICMP_TYPE 1
+#define ICE_FLOW_FLD_SZ_ICMP_CODE 1
+#define ICE_FLOW_FLD_SZ_ARP_OPER 2
+#define ICE_FLOW_FLD_SZ_GRE_KEYID 4
+#define ICE_FLOW_FLD_SZ_GTP_TEID 4
+#define ICE_FLOW_FLD_SZ_GTP_QFI 2
+#define ICE_FLOW_FLD_SZ_PFCP_SEID 8
+#define ICE_FLOW_FLD_SZ_ESP_SPI 4
+#define ICE_FLOW_FLD_SZ_AH_SPI 4
+#define ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI 4
+#define ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID 2
+#define ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID 2
+
/* Describe properties of a protocol header field */
struct ice_flow_field_info {
enum ice_flow_seg_hdr hdr;
@@ -20,6 +52,7 @@ struct ice_flow_field_info {
.mask = 0, \
}
+/* QFI: 6-bit field in GTP-U PDU Session Container (3GPP TS 38.415) */
#define ICE_FLOW_FLD_INFO_MSK(_hdr, _offset_bytes, _size_bytes, _mask) { \
.hdr = _hdr, \
.off = (_offset_bytes) * BITS_PER_BYTE, \
@@ -61,7 +94,33 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* ICE_FLOW_FIELD_IDX_IPV6_SA */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8, sizeof(struct in6_addr)),
/* ICE_FLOW_FIELD_IDX_IPV6_DA */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, sizeof(struct in6_addr)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24, ICE_FLOW_FLD_SZ_IPV6_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV4_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV4, 10, ICE_FLOW_FLD_SZ_IP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_IPV4_FRAG */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
+ ICE_FLOW_FLD_SZ_IPV4_ID),
+ /* ICE_FLOW_FIELD_IDX_IPV6_FRAG */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV_FRAG, 4,
+ ICE_FLOW_FLD_SZ_IPV6_ID),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE32_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE48_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 8,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
+ /* ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_IPV6, 24,
+ ICE_FLOW_FLD_SZ_IPV6_PRE64_ADDR),
/* Transport */
/* ICE_FLOW_FIELD_IDX_TCP_SRC_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 0, sizeof(__be16)),
@@ -76,7 +135,14 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
/* ICE_FLOW_FIELD_IDX_SCTP_DST_PORT */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 2, sizeof(__be16)),
/* ICE_FLOW_FIELD_IDX_TCP_FLAGS */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, 1),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 13, ICE_FLOW_FLD_SZ_TCP_FLAGS),
+ /* ICE_FLOW_FIELD_IDX_TCP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_TCP, 16, ICE_FLOW_FLD_SZ_TCP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_UDP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_UDP, 6, ICE_FLOW_FLD_SZ_UDP_CHKSUM),
+ /* ICE_FLOW_FIELD_IDX_SCTP_CHKSUM */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_SCTP, 8,
+ ICE_FLOW_FLD_SZ_SCTP_CHKSUM),
/* ARP */
/* ICE_FLOW_FIELD_IDX_ARP_SIP */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_ARP, 14, sizeof(struct in_addr)),
@@ -108,9 +174,17 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_EH, 22, sizeof(__be16),
0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_UP_TEID */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_UP, 12,
+ ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_UP_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_UP, 22,
+ ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
/* ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_GTPU_DWN, 12,
+ ICE_FLOW_FLD_SZ_GTP_TEID),
+ /* ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI */
+ ICE_FLOW_FLD_INFO_MSK(ICE_FLOW_SEG_HDR_GTPU_DWN, 22,
+ ICE_FLOW_FLD_SZ_GTP_QFI, 0x3f00),
/* PPPoE */
/* ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID */
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_PPPOE, 2, sizeof(__be16)),
@@ -128,7 +202,16 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_AH, 4, sizeof(__be32)),
/* NAT_T_ESP */
/* ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI */
- ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8, sizeof(__be32)),
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_NAT_T_ESP, 8,
+ ICE_FLOW_FLD_SZ_NAT_T_ESP_SPI),
+ /* L2TPV2 */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 12,
+ ICE_FLOW_FLD_SZ_L2TPV2_SESS_ID),
+ /* L2TPV2_LEN */
+ /* ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID */
+ ICE_FLOW_FLD_INFO(ICE_FLOW_SEG_HDR_L2TPV2, 14,
+ ICE_FLOW_FLD_SZ_L2TPV2_LEN_SESS_ID),
};
/* Bitmaps indicating relevant packet types for a particular protocol header
@@ -137,9 +220,9 @@ struct ice_flow_field_info ice_flds_info[ICE_FLOW_FIELD_IDX_MAX] = {
*/
static const u32 ice_ptypes_mac_ofos[] = {
0xFDC00846, 0xBFBF7F7E, 0xF70001DF, 0xFEFDFDFB,
- 0x0000077E, 0x00000000, 0x00000000, 0x00000000,
- 0x00400000, 0x03FFF000, 0x7FFFFFE0, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0000077E, 0x000003FF, 0x00000000, 0x00000000,
+ 0x00400000, 0x03FFF000, 0xFFFFFFE0, 0x00000707,
+ 0xFFFFF000, 0x000003FF, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -162,10 +245,10 @@ static const u32 ice_ptypes_macvlan_il[] = {
* include IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1D800000, 0xBFBF7800, 0x000001DF, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x000002A0, 0x00000000,
+ 0x00015000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -176,10 +259,10 @@ static const u32 ice_ptypes_ipv4_ofos[] = {
* IPv4 other PTYPEs
*/
static const u32 ice_ptypes_ipv4_ofos_all[] = {
- 0x1DC00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x1D800000, 0x27BF7800, 0x00000000, 0x00000000,
0x00000000, 0x00000155, 0x00000000, 0x00000000,
- 0x00000000, 0x000FC000, 0x83E0F800, 0x00000101,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x000FC000, 0x83E0FAA0, 0x00000101,
+ 0x3FFD5000, 0x00000000, 0x02FBEFBC, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -191,7 +274,7 @@ static const u32 ice_ptypes_ipv4_il[] = {
0xE0000000, 0xB807700E, 0x80000003, 0xE01DC03B,
0x0000000E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x001FF800, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0xC0FC0000, 0x0000000F, 0xBC0BC0BC, 0x00000BC0,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -202,10 +285,10 @@ static const u32 ice_ptypes_ipv4_il[] = {
* include IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos[] = {
- 0x00000000, 0x00000000, 0x77000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x76000000, 0x10002000,
0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00000000, 0x03F00000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x00000540, 0x00000000,
+ 0x0002A000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -216,10 +299,10 @@ static const u32 ice_ptypes_ipv6_ofos[] = {
* IPv6 other PTYPEs
*/
static const u32 ice_ptypes_ipv6_ofos_all[] = {
- 0x00000000, 0x00000000, 0x77000000, 0x10002000,
- 0x00000000, 0x000002AA, 0x00000000, 0x00000000,
- 0x00080F00, 0x03F00000, 0x7C1F0000, 0x00000206,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x76000000, 0xFEFDE000,
+ 0x0000077E, 0x000002AA, 0x00000000, 0x00000000,
+ 0x00000000, 0x03F00000, 0x7C1F0540, 0x00000206,
+ 0xC002A000, 0x000003FF, 0xBC000000, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -231,7 +314,7 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x03B80770, 0x000001DC, 0x0EE00000,
0x00000770, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x7FE00000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x3F000000, 0x000003F0, 0x02F02F00, 0x0002F02F,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -304,8 +387,8 @@ static const u32 ice_ptypes_ipv6_il_no_l4[] = {
static const u32 ice_ptypes_udp_il[] = {
0x81000000, 0x20204040, 0x04000010, 0x80810102,
0x00000040, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00410000, 0x90842000, 0x00000007,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00410000, 0x908427E0, 0x00000007,
+ 0x0413F000, 0x00000041, 0x10410410, 0x00004104,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -317,7 +400,7 @@ static const u32 ice_ptypes_tcp_il[] = {
0x04000000, 0x80810102, 0x10000040, 0x02040408,
0x00000102, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00820000, 0x21084000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x08200000, 0x00000082, 0x20820820, 0x00008208,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -329,7 +412,7 @@ static const u32 ice_ptypes_sctp_il[] = {
0x08000000, 0x01020204, 0x20000081, 0x04080810,
0x00000204, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x01040000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x10400000, 0x00000104, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -353,7 +436,7 @@ static const u32 ice_ptypes_icmp_il[] = {
0x00000000, 0x02040408, 0x40000102, 0x08101020,
0x00000408, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x42108000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x20800000, 0x00000208, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -365,7 +448,7 @@ static const u32 ice_ptypes_gre_of[] = {
0x00000000, 0xBFBF7800, 0x000001DF, 0xFEFDE000,
0x0000017E, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0xBEFBEFBC, 0x0002FBEF,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -374,7 +457,7 @@ static const u32 ice_ptypes_gre_of[] = {
/* Packet types for packets with an Innermost/Last MAC header */
static const u32 ice_ptypes_mac_il[] = {
- 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -388,7 +471,7 @@ static const u32 ice_ptypes_mac_il[] = {
static const u32 ice_ptypes_gtpc[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
- 0x00000000, 0x00000000, 0x00000180, 0x00000000,
+ 0x00000000, 0x00000000, 0x000001E0, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1421,7 +1504,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
}
/* Add a HW profile for this flow profile */
- status = ice_add_prof(hw, blk, prof_id, (u8 *)params->ptypes,
+ status = ice_add_prof(hw, blk, prof_id, params->ptypes,
params->attr, params->attr_cnt, params->es,
params->mask, symm, true);
if (status) {
@@ -1617,7 +1700,7 @@ ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi,
break;
}
- status = ice_add_prof(hw, blk, id, (u8 *)prof->ptypes,
+ status = ice_add_prof(hw, blk, id, prof->ptypes,
params->attr, params->attr_cnt,
params->es, params->mask, false, false);
if (status)
@@ -2325,6 +2408,130 @@ static void ice_rss_set_symm(struct ice_hw *hw, struct ice_flow_prof *prof)
}
/**
+ * ice_rss_cfg_raw_symm - Configure symmetric RSS for a raw parser profile
+ * @hw: device HW
+ * @prof: parser profile describing extracted FV (field vector) entries
+ * @prof_id: RSS profile identifier used to program symmetry registers
+ *
+ * The routine scans the parser profile's FV entries and looks for
+ * direction-sensitive pairs (L3 src/dst, L4 src/dst). When a pair is found,
+ * it programs XOR-based symmetry so that flows hash identically regardless
+ * of packet direction. This preserves CPU affinity for the same 5-tuple.
+ *
+ * Notes:
+ * - The size of each logical field (IPv4/IPv6 address, L4 port) is expressed
+ * in units of ICE_FLOW_FV_EXTRACT_SZ so we can step across fv[] correctly.
+ * - We guard against out-of-bounds access before looking at fv[i + len].
+ */
+static void ice_rss_cfg_raw_symm(struct ice_hw *hw,
+ const struct ice_parser_profile *prof,
+ u64 prof_id)
+{
+ for (size_t i = 0; i < prof->fv_num; i++) {
+ u8 proto_id = prof->fv[i].proto_id;
+ u16 src_off = 0, dst_off = 0;
+ size_t src_idx, dst_idx;
+ bool is_matched = false;
+ unsigned int len = 0;
+
+ switch (proto_id) {
+ /* IPv4 address pairs (outer/inner variants) */
+ case ICE_PROT_IPV4_OF_OR_S:
+ case ICE_PROT_IPV4_IL:
+ case ICE_PROT_IPV4_IL_IL:
+ len = ICE_FLOW_FLD_SZ_IPV4_ADDR /
+ ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_IPV4_SRC_OFFSET;
+ dst_off = ICE_FLOW_FIELD_IPV4_DST_OFFSET;
+ break;
+
+ /* IPv6 address pairs (outer/inner variants) */
+ case ICE_PROT_IPV6_OF_OR_S:
+ case ICE_PROT_IPV6_IL:
+ case ICE_PROT_IPV6_IL_IL:
+ len = ICE_FLOW_FLD_SZ_IPV6_ADDR /
+ ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_IPV6_SRC_OFFSET;
+ dst_off = ICE_FLOW_FIELD_IPV6_DST_OFFSET;
+ break;
+
+ /* L4 port pairs (TCP/UDP/SCTP) */
+ case ICE_PROT_TCP_IL:
+ case ICE_PROT_UDP_IL_OR_S:
+ case ICE_PROT_SCTP_IL:
+ len = ICE_FLOW_FLD_SZ_PORT / ICE_FLOW_FV_EXTRACT_SZ;
+ src_off = ICE_FLOW_FIELD_SRC_PORT_OFFSET;
+ dst_off = ICE_FLOW_FIELD_DST_PORT_OFFSET;
+ break;
+
+ default:
+ continue;
+ }
+
+ /* Bounds check before accessing fv[i + len]. */
+ if (i + len >= prof->fv_num)
+ continue;
+
+ /* Verify src/dst pairing for this protocol id. */
+ is_matched = prof->fv[i].offset == src_off &&
+ prof->fv[i + len].proto_id == proto_id &&
+ prof->fv[i + len].offset == dst_off;
+ if (!is_matched)
+ continue;
+
+ /* Program XOR symmetry for this field pair. */
+ src_idx = i;
+ dst_idx = i + len;
+
+ ice_rss_config_xor(hw, prof_id, src_idx, dst_idx, len);
+
+ /* Skip over the pair we just handled; the loop's ++i advances
+ * one more element, hence the --i after the jump.
+ */
+ i += (2 * len);
+ /* not strictly needed; keeps static analyzers happy */
+ if (i == 0)
+ break;
+ --i;
+ }
+}
+
+/* Max registers index per packet profile */
+#define ICE_SYMM_REG_INDEX_MAX 6
+
+/**
+ * ice_rss_update_raw_symm - update symmetric hash configuration
+ * for raw pattern
+ * @hw: pointer to the hardware structure
+ * @cfg: configure parameters for raw pattern
+ * @id: profile tracking ID
+ *
+ * Update symmetric hash configuration for raw pattern if required.
+ * Otherwise only clear to default.
+ */
+void
+ice_rss_update_raw_symm(struct ice_hw *hw,
+ struct ice_rss_raw_cfg *cfg, u64 id)
+{
+ struct ice_prof_map *map;
+ u8 prof_id, m;
+
+ mutex_lock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ map = ice_search_prof_id(hw, ICE_BLK_RSS, id);
+ if (map)
+ prof_id = map->prof_id;
+ mutex_unlock(&hw->blk[ICE_BLK_RSS].es.prof_map_lock);
+ if (!map)
+ return;
+ /* clear to default */
+ for (m = 0; m < ICE_SYMM_REG_INDEX_MAX; m++)
+ wr32(hw, GLQF_HSYMM(prof_id, m), 0);
+
+ if (cfg->symm)
+ ice_rss_cfg_raw_symm(hw, &cfg->prof, prof_id);
+}
+
+/**
* ice_add_rss_cfg_sync - add an RSS configuration
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
@@ -2573,38 +2780,38 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
* convert its values to their appropriate flow L3, L4 values.
*/
#define ICE_FLOW_AVF_RSS_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4))
#define ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP))
#define ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP))
#define ICE_FLOW_AVF_RSS_ALL_IPV4_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV4_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS | \
- ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP))
+ ICE_FLOW_AVF_RSS_IPV4_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP))
#define ICE_FLOW_AVF_RSS_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6))
#define ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP))
#define ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS \
- (BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP))
+ (BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP))
#define ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS \
(ICE_FLOW_AVF_RSS_TCP_IPV6_MASKS | ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS | \
- ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP))
+ ICE_FLOW_AVF_RSS_IPV6_MASKS | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP))
/**
* ice_add_avf_rss_cfg - add an RSS configuration for AVF driver
* @hw: pointer to the hardware structure
* @vsi: VF's VSI
- * @avf_hash: hash bit fields (ICE_AVF_FLOW_FIELD_*) to configure
+ * @avf_hash: hash bit fields (LIBIE_FILTER_PCTYPE_*) to configure
*
* This function will take the hash bitmap provided by the AVF driver via a
* message, convert it to ICE-compatible values, and configure RSS flow
@@ -2621,8 +2828,7 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
return -EINVAL;
vsi_handle = vsi->idx;
- if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
- !ice_is_vsi_valid(hw, vsi_handle))
+ if (!avf_hash || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
/* Make sure no unsupported bits are specified */
@@ -2658,11 +2864,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV4_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV4 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP);
}
} else if (hash_flds & ICE_FLOW_AVF_RSS_ALL_IPV6_MASKS) {
if (hash_flds & ICE_FLOW_AVF_RSS_IPV6_MASKS) {
@@ -2679,11 +2885,11 @@ int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 avf_hash)
ICE_FLOW_HASH_UDP_PORT;
hash_flds &= ~ICE_FLOW_AVF_RSS_UDP_IPV6_MASKS;
} else if (hash_flds &
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP)) {
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP)) {
rss_hash = ICE_FLOW_HASH_IPV6 |
ICE_FLOW_HASH_SCTP_PORT;
hash_flds &=
- ~BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP);
+ ~BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 6cb7bb879c98..6c6cdc8addb1 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -4,6 +4,8 @@
#ifndef _ICE_FLOW_H_
#define _ICE_FLOW_H_
+#include <linux/net/intel/libie/pctype.h>
+
#include "ice_flex_type.h"
#include "ice_parser.h"
@@ -20,6 +22,15 @@
#define ICE_FLOW_HASH_IPV6 \
(BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA))
+#define ICE_FLOW_HASH_IPV6_PRE32 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA))
+#define ICE_FLOW_HASH_IPV6_PRE48 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA))
+#define ICE_FLOW_HASH_IPV6_PRE64 \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) | \
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA))
#define ICE_FLOW_HASH_TCP_PORT \
(BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \
BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT))
@@ -38,6 +49,33 @@
#define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT)
#define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE32 \
+ (ICE_FLOW_HASH_IPV6_PRE32 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE48 \
+ (ICE_FLOW_HASH_IPV6_PRE48 | ICE_FLOW_HASH_SCTP_PORT)
+#define ICE_HASH_TCP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_TCP_PORT)
+#define ICE_HASH_UDP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_UDP_PORT)
+#define ICE_HASH_SCTP_IPV6_PRE64 \
+ (ICE_FLOW_HASH_IPV6_PRE64 | ICE_FLOW_HASH_SCTP_PORT)
+
+#define ICE_FLOW_HASH_GTP_TEID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
+
+#define ICE_FLOW_HASH_GTP_IPV4_TEID \
+ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_TEID)
+#define ICE_FLOW_HASH_GTP_IPV6_TEID \
+ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_TEID)
+
#define ICE_FLOW_HASH_GTP_C_TEID \
(BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID))
@@ -126,6 +164,23 @@
#define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \
(ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI)
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_SESS_ID)
+
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID \
+ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID))
+#define ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID_ETH \
+ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_L2TPV2_LEN_SESS_ID)
+
+#define ICE_FLOW_FIELD_IPV4_SRC_OFFSET 12
+#define ICE_FLOW_FIELD_IPV4_DST_OFFSET 16
+#define ICE_FLOW_FIELD_IPV6_SRC_OFFSET 8
+#define ICE_FLOW_FIELD_IPV6_DST_OFFSET 24
+#define ICE_FLOW_FIELD_SRC_PORT_OFFSET 0
+#define ICE_FLOW_FIELD_DST_PORT_OFFSET 2
+
/* Protocol header fields within a packet segment. A segment consists of one or
* more protocol headers that make up a logical group of protocol headers. Each
* logical group of protocol headers encapsulates or is encapsulated using/by
@@ -158,10 +213,13 @@ enum ice_flow_seg_hdr {
ICE_FLOW_SEG_HDR_AH = 0x00200000,
ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000,
ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000,
+ ICE_FLOW_SEG_HDR_GTPU_NON_IP = 0x01000000,
+ ICE_FLOW_SEG_HDR_L2TPV2 = 0x10000000,
/* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and
- * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs
+ * ICE_FLOW_SEG_HDR_IPV6.
*/
- ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000,
+ ICE_FLOW_SEG_HDR_IPV_FRAG = 0x40000000,
+ ICE_FLOW_SEG_HDR_IPV_OTHER = 0x80000000,
};
/* These segments all have the same PTYPES, but are otherwise distinguished by
@@ -198,6 +256,15 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_IPV4_DA,
ICE_FLOW_FIELD_IDX_IPV6_SA,
ICE_FLOW_FIELD_IDX_IPV6_DA,
+ ICE_FLOW_FIELD_IDX_IPV4_CHKSUM,
+ ICE_FLOW_FIELD_IDX_IPV4_ID,
+ ICE_FLOW_FIELD_IDX_IPV6_ID,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE32_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE32_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE48_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE48_DA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA,
+ ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA,
/* L4 */
ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
ICE_FLOW_FIELD_IDX_TCP_DST_PORT,
@@ -206,6 +273,9 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
ICE_FLOW_FIELD_IDX_SCTP_DST_PORT,
ICE_FLOW_FIELD_IDX_TCP_FLAGS,
+ ICE_FLOW_FIELD_IDX_TCP_CHKSUM,
+ ICE_FLOW_FIELD_IDX_UDP_CHKSUM,
+ ICE_FLOW_FIELD_IDX_SCTP_CHKSUM,
/* ARP */
ICE_FLOW_FIELD_IDX_ARP_SIP,
ICE_FLOW_FIELD_IDX_ARP_DIP,
@@ -226,13 +296,13 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_GTPU_EH_QFI,
/* GTPU_UP */
ICE_FLOW_FIELD_IDX_GTPU_UP_TEID,
+ ICE_FLOW_FIELD_IDX_GTPU_UP_QFI,
/* GTPU_DWN */
ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID,
- /* PPPoE */
+ ICE_FLOW_FIELD_IDX_GTPU_DWN_QFI,
ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID,
/* PFCP */
ICE_FLOW_FIELD_IDX_PFCP_SEID,
- /* L2TPv3 */
ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID,
/* ESP */
ICE_FLOW_FIELD_IDX_ESP_SPI,
@@ -240,10 +310,16 @@ enum ice_flow_field {
ICE_FLOW_FIELD_IDX_AH_SPI,
/* NAT_T ESP */
ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI,
+ /* L2TPV2 SESSION ID*/
+ ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID,
+ /* L2TPV2_LEN SESSION ID */
+ ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID,
/* The total number of enums must not exceed 64 */
ICE_FLOW_FIELD_IDX_MAX
};
+static_assert(ICE_FLOW_FIELD_IDX_MAX <= 64, "The total number of enums must not exceed 64");
+
#define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
#define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
#define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
@@ -264,57 +340,27 @@ enum ice_flow_field {
#define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \
BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)
-/* Flow headers and fields for AVF support */
-enum ice_flow_avf_hdr_field {
- /* Values 0 - 28 are reserved for future use */
- ICE_AVF_FLOW_FIELD_INVALID = 0,
- ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_UDP,
- ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV4_TCP,
- ICE_AVF_FLOW_FIELD_IPV4_SCTP,
- ICE_AVF_FLOW_FIELD_IPV4_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV4,
- /* Values 37-38 are reserved */
- ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39,
- ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_UDP,
- ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK,
- ICE_AVF_FLOW_FIELD_IPV6_TCP,
- ICE_AVF_FLOW_FIELD_IPV6_SCTP,
- ICE_AVF_FLOW_FIELD_IPV6_OTHER,
- ICE_AVF_FLOW_FIELD_FRAG_IPV6,
- ICE_AVF_FLOW_FIELD_RSVD47,
- ICE_AVF_FLOW_FIELD_FCOE_OX,
- ICE_AVF_FLOW_FIELD_FCOE_RX,
- ICE_AVF_FLOW_FIELD_FCOE_OTHER,
- /* Values 51-62 are reserved */
- ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63,
- ICE_AVF_FLOW_FIELD_MAX
-};
-
/* Supported RSS offloads This macro is defined to support
- * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware
+ * VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS ops. PF driver sends the RSS hardware
* capabilities to the caller of this ops.
*/
-#define ICE_DEFAULT_RSS_HENA ( \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \
- BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP))
+#define ICE_DEFAULT_RSS_HASHCFG ( \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
+ BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
enum ice_rss_cfg_hdr_type {
ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */
@@ -324,6 +370,10 @@ enum ice_rss_cfg_hdr_type {
/* take inner headers as inputset for packet with outer ipv6. */
ICE_RSS_INNER_HEADERS_W_OUTER_IPV6,
/* take outer headers first then inner headers as inputset */
+ /* take inner as inputset for GTPoGRE with outer IPv4 + GRE. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE,
+ /* take inner as inputset for GTPoGRE with outer IPv6 + GRE. */
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE,
ICE_RSS_ANY_HEADERS
};
@@ -434,6 +484,12 @@ struct ice_flow_prof {
bool symm; /* Symmetric Hash for RSS */
};
+struct ice_rss_raw_cfg {
+ struct ice_parser_profile prof;
+ bool raw_ena;
+ bool symm;
+};
+
struct ice_rss_cfg {
struct list_head l_entry;
/* bitmap of VSIs added to the RSS entry */
@@ -472,4 +528,6 @@ int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi,
int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm);
+void ice_rss_update_raw_symm(struct ice_hw *hw,
+ struct ice_rss_raw_cfg *cfg, u64 id);
#endif /* _ICE_FLOW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 2702a0da5c3e..973a13d3d92a 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -6,6 +6,7 @@
#include <linux/crc32.h>
#include <linux/pldmfw.h>
#include "ice.h"
+#include "ice_lib.h"
#include "ice_fw_update.h"
struct ice_fwu_priv {
@@ -67,7 +68,7 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
if (status) {
dev_err(dev, "Failed to send record package data to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to record package data to firmware");
return -EIO;
}
@@ -125,6 +126,10 @@ ice_check_component_response(struct ice_pf *pf, u16 id, u8 response, u8 code,
case ICE_AQ_NVM_PASS_COMP_CAN_NOT_BE_UPDATED:
dev_info(dev, "firmware has rejected updating %s\n", component);
break;
+ case ICE_AQ_NVM_PASS_COMP_PARTIAL_CHECK:
+ if (ice_is_recovery_mode(&pf->hw))
+ return 0;
+ break;
}
switch (code) {
@@ -252,7 +257,7 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
if (status) {
dev_err(dev, "Failed to transfer component table to firmware, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to transfer component table to firmware");
return -EIO;
}
@@ -294,7 +299,8 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
u32 completion_offset;
int err;
@@ -308,7 +314,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (err) {
dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %d aq_err %s\n",
module, block_size, offset, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
}
@@ -328,11 +334,12 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
- completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
- completion_offset |= desc->params.nvm.offset_high << 16;
+ completion_offset = le16_to_cpu(cmd->offset_low);
+ completion_offset |= cmd->offset_high << 16;
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
@@ -351,7 +358,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
if (completion_retval) {
dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
module, block_size, offset,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
}
@@ -364,7 +371,7 @@ int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
- *reset_level = desc->params.nvm.cmd_flags &
+ *reset_level = cmd->cmd_flags &
ICE_AQC_NVM_RESET_LVL_M;
dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level);
@@ -482,7 +489,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
- struct ice_aq_desc *desc;
+ struct libie_aq_desc *desc;
+ struct ice_aqc_nvm *cmd;
struct devlink *devlink;
int err;
@@ -498,7 +506,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
if (err) {
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
component, module, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to erase flash module");
err = -EIO;
goto out_notify_devlink;
@@ -513,7 +521,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
desc = &task.event.desc;
- completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ cmd = libie_aq_raw(desc);
+ completion_module = le16_to_cpu(cmd->module_typeid);
completion_retval = le16_to_cpu(desc->retval);
if (completion_module != module) {
@@ -525,9 +534,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to erase %s (module 0x02%x), aq_err %s\n",
+ dev_err(dev, "Firmware failed to erase %s (module 0x%02x), aq_err %s\n",
component, module,
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to erase flash");
err = -EIO;
goto out_notify_devlink;
@@ -574,7 +583,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (err) {
dev_err(dev, "Failed to switch active flash banks, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to switch active flash banks");
return -EIO;
}
@@ -606,7 +615,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
completion_retval = le16_to_cpu(task.event.desc.retval);
if (completion_retval) {
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
- ice_aq_str((enum ice_aq_err)completion_retval));
+ libie_aq_str((enum libie_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to switch active flash banks");
return -EIO;
}
@@ -944,7 +953,7 @@ ice_cancel_pending_update(struct ice_pf *pf, const char *component,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
@@ -1004,13 +1013,20 @@ int ice_devlink_flash_update(struct devlink *devlink,
return -EOPNOTSUPP;
}
- if (!hw->dev_caps.common_cap.nvm_unified_update) {
+ if (!hw->dev_caps.common_cap.nvm_unified_update && !ice_is_recovery_mode(hw)) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
memset(&priv, 0, sizeof(priv));
+ if (params->component && strcmp(params->component, "fw.mgmt") == 0) {
+ priv.context.mode = PLDMFW_UPDATE_MODE_SINGLE_COMPONENT;
+ priv.context.component_identifier = NVM_COMP_ID_NVM;
+ } else if (params->component) {
+ return -EOPNOTSUPP;
+ }
+
/* the E822 device needs a slightly different ops */
if (hw->mac_type == ICE_MAC_GENERIC)
priv.context.ops = &ice_fwu_ops_e822;
@@ -1030,7 +1046,7 @@ int ice_devlink_flash_update(struct devlink *devlink,
err = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (err) {
dev_err(dev, "Failed to acquire device flash lock, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire device flash lock");
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.c b/drivers/net/ethernet/intel/ice/ice_fwlog.c
deleted file mode 100644
index 4fd15387a7e5..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.c
+++ /dev/null
@@ -1,472 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2022, Intel Corporation. */
-
-#include <linux/vmalloc.h>
-#include "ice.h"
-#include "ice_common.h"
-#include "ice_fwlog.h"
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings)
-{
- u16 head, tail;
-
- head = rings->head;
- tail = rings->tail;
-
- if (head < tail && (tail - head == (rings->size - 1)))
- return true;
- else if (head > tail && (tail == (head - 1)))
- return true;
-
- return false;
-}
-
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings)
-{
- return rings->head == rings->tail;
-}
-
-void ice_fwlog_ring_increment(u16 *item, u16 size)
-{
- *item = (*item + 1) & (size - 1);
-}
-
-static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i, nr_bytes;
- u8 *mem;
-
- nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
- mem = vzalloc(nr_bytes);
- if (!mem)
- return -ENOMEM;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- ring->data_size = ICE_AQ_MAX_BUF_LEN;
- ring->data = mem;
- mem += ICE_AQ_MAX_BUF_LEN;
- }
-
- return 0;
-}
-
-static void ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings)
-{
- int i;
-
- for (i = 0; i < rings->size; i++) {
- struct ice_fwlog_data *ring = &rings->rings[i];
-
- /* the first ring is the base memory for the whole range so
- * free it
- */
- if (!i)
- vfree(ring->data);
-
- ring->data = NULL;
- ring->data_size = 0;
- }
-}
-
-#define ICE_FWLOG_INDEX_TO_BYTES(n) ((128 * 1024) << (n))
-/**
- * ice_fwlog_realloc_rings - reallocate the FW log rings
- * @hw: pointer to the HW structure
- * @index: the new index to use to allocate memory for the log data
- *
- */
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index)
-{
- struct ice_fwlog_ring ring;
- int status, ring_size;
-
- /* convert the number of bytes into a number of 4K buffers. externally
- * the driver presents the interface to the FW log data as a number of
- * bytes because that's easy for users to understand. internally the
- * driver uses a ring of buffers because the driver doesn't know where
- * the beginning and end of any line of log data is so the driver has
- * to overwrite data as complete blocks. when the data is returned to
- * the user the driver knows that the data is correct and the FW log
- * can be correctly parsed by the tools
- */
- ring_size = ICE_FWLOG_INDEX_TO_BYTES(index) / ICE_AQ_MAX_BUF_LEN;
- if (ring_size == hw->fwlog_ring.size)
- return;
-
- /* allocate space for the new rings and buffers then release the
- * old rings and buffers. that way if we don't have enough
- * memory then we at least have what we had before
- */
- ring.rings = kcalloc(ring_size, sizeof(*ring.rings), GFP_KERNEL);
- if (!ring.rings)
- return;
-
- ring.size = ring_size;
-
- status = ice_fwlog_alloc_ring_buffs(&ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&ring);
- kfree(ring.rings);
- return;
- }
-
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
-
- hw->fwlog_ring.rings = ring.rings;
- hw->fwlog_ring.size = ring.size;
- hw->fwlog_ring.index = index;
- hw->fwlog_ring.head = 0;
- hw->fwlog_ring.tail = 0;
-}
-
-/**
- * ice_fwlog_init - Initialize FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called on driver initialization during
- * ice_init_hw().
- */
-int ice_fwlog_init(struct ice_hw *hw)
-{
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return -EINVAL;
-
- ice_fwlog_set_supported(hw);
-
- if (ice_fwlog_supported(hw)) {
- int status;
-
- /* read the current config from the FW and store it */
- status = ice_fwlog_get(hw, &hw->fwlog_cfg);
- if (status)
- return status;
-
- hw->fwlog_ring.rings = kcalloc(ICE_FWLOG_RING_SIZE_DFLT,
- sizeof(*hw->fwlog_ring.rings),
- GFP_KERNEL);
- if (!hw->fwlog_ring.rings) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log rings\n");
- return -ENOMEM;
- }
-
- hw->fwlog_ring.size = ICE_FWLOG_RING_SIZE_DFLT;
- hw->fwlog_ring.index = ICE_FWLOG_RING_SIZE_INDEX_DFLT;
-
- status = ice_fwlog_alloc_ring_buffs(&hw->fwlog_ring);
- if (status) {
- dev_warn(ice_hw_to_dev(hw), "Unable to allocate memory for FW log ring data buffers\n");
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- return status;
- }
-
- ice_debugfs_fwlog_init(hw->back);
- } else {
- dev_warn(ice_hw_to_dev(hw), "FW logging is not supported in this NVM image. Please update the NVM to get FW log support\n");
- }
-
- return 0;
-}
-
-/**
- * ice_fwlog_deinit - unroll FW logging configuration
- * @hw: pointer to the HW structure
- *
- * This function should be called in ice_deinit_hw().
- */
-void ice_fwlog_deinit(struct ice_hw *hw)
-{
- struct ice_pf *pf = hw->back;
- int status;
-
- /* only support fw log commands on PF 0 */
- if (hw->bus.func)
- return;
-
- ice_debugfs_pf_deinit(hw->back);
-
- /* make sure FW logging is disabled to not put the FW in a weird state
- * for the next driver load
- */
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_ARQ_ENA;
- status = ice_fwlog_set(hw, &hw->fwlog_cfg);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to turn off FW logging, status: %d\n",
- status);
-
- kfree(pf->ice_debugfs_pf_fwlog_modules);
-
- pf->ice_debugfs_pf_fwlog_modules = NULL;
-
- status = ice_fwlog_unregister(hw);
- if (status)
- dev_warn(ice_hw_to_dev(hw), "Unable to unregister FW logging, status: %d\n",
- status);
-
- if (hw->fwlog_ring.rings) {
- ice_fwlog_free_ring_buffs(&hw->fwlog_ring);
- kfree(hw->fwlog_ring.rings);
- }
-}
-
-/**
- * ice_fwlog_supported - Cached for whether FW supports FW logging or not
- * @hw: pointer to the HW structure
- *
- * This will always return false if called before ice_init_hw(), so it must be
- * called after ice_init_hw().
- */
-bool ice_fwlog_supported(struct ice_hw *hw)
-{
- return hw->fwlog_supported;
-}
-
-/**
- * ice_aq_fwlog_set - Set FW logging configuration AQ command (0xFF30)
- * @hw: pointer to the HW structure
- * @entries: entries to configure
- * @num_entries: number of @entries
- * @options: options from ice_fwlog_cfg->options structure
- * @log_resolution: logging resolution
- */
-static int
-ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
- u16 num_entries, u16 options, u16 log_resolution)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
- int status;
- int i;
-
- fw_modules = kcalloc(num_entries, sizeof(*fw_modules), GFP_KERNEL);
- if (!fw_modules)
- return -ENOMEM;
-
- for (i = 0; i < num_entries; i++) {
- fw_modules[i].module_identifier =
- cpu_to_le16(entries[i].module_id);
- fw_modules[i].log_level = entries[i].log_level;
- }
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_config);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- cmd = &desc.params.fw_log;
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_CONF_SET_VALID;
- cmd->ops.cfg.log_resolution = cpu_to_le16(log_resolution);
- cmd->ops.cfg.mdl_cnt = cpu_to_le16(num_entries);
-
- if (options & ICE_FWLOG_OPTION_ARQ_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_AQ_EN;
- if (options & ICE_FWLOG_OPTION_UART_ENA)
- cmd->cmd_flags |= ICE_AQC_FW_LOG_CONF_UART_EN;
-
- status = ice_aq_send_cmd(hw, &desc, fw_modules,
- sizeof(*fw_modules) * num_entries,
- NULL);
-
- kfree(fw_modules);
-
- return status;
-}
-
-/**
- * ice_fwlog_set - Set the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config used to set firmware logging
- *
- * This function should be called whenever the driver needs to set the firmware
- * logging configuration. It can be called on initialization, reset, or during
- * runtime.
- *
- * If the PF wishes to receive FW logging then it must register via
- * ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
- * for init.
- */
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_set(hw, cfg->module_entries,
- ICE_AQC_FW_LOG_ID_MAX, cfg->options,
- cfg->log_resolution);
-}
-
-/**
- * ice_aq_fwlog_get - Get the current firmware logging configuration (0xFF32)
- * @hw: pointer to the HW structure
- * @cfg: firmware logging configuration to populate
- */
-static int ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- struct ice_aqc_fw_log_cfg_resp *fw_modules;
- struct ice_aqc_fw_log *cmd;
- struct ice_aq_desc desc;
- u16 module_id_cnt;
- int status;
- void *buf;
- int i;
-
- memset(cfg, 0, sizeof(*cfg));
-
- buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_query);
- cmd = &desc.params.fw_log;
-
- cmd->cmd_flags = ICE_AQC_FW_LOG_AQ_QUERY;
-
- status = ice_aq_send_cmd(hw, &desc, buf, ICE_AQ_MAX_BUF_LEN, NULL);
- if (status) {
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to get FW log configuration\n");
- goto status_out;
- }
-
- module_id_cnt = le16_to_cpu(cmd->ops.cfg.mdl_cnt);
- if (module_id_cnt < ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned less than the expected number of FW log module IDs\n");
- } else if (module_id_cnt > ICE_AQC_FW_LOG_ID_MAX) {
- ice_debug(hw, ICE_DBG_FW_LOG, "FW returned more than expected number of FW log module IDs, setting module_id_cnt to software expected max %u\n",
- ICE_AQC_FW_LOG_ID_MAX);
- module_id_cnt = ICE_AQC_FW_LOG_ID_MAX;
- }
-
- cfg->log_resolution = le16_to_cpu(cmd->ops.cfg.log_resolution);
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_AQ_EN)
- cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_CONF_UART_EN)
- cfg->options |= ICE_FWLOG_OPTION_UART_ENA;
- if (cmd->cmd_flags & ICE_AQC_FW_LOG_QUERY_REGISTERED)
- cfg->options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- fw_modules = (struct ice_aqc_fw_log_cfg_resp *)buf;
-
- for (i = 0; i < module_id_cnt; i++) {
- struct ice_aqc_fw_log_cfg_resp *fw_module = &fw_modules[i];
-
- cfg->module_entries[i].module_id =
- le16_to_cpu(fw_module->module_identifier);
- cfg->module_entries[i].log_level = fw_module->log_level;
- }
-
-status_out:
- kfree(buf);
- return status;
-}
-
-/**
- * ice_fwlog_get - Get the firmware logging settings
- * @hw: pointer to the HW structure
- * @cfg: config to populate based on current firmware logging settings
- */
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
-{
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- return ice_aq_fwlog_get(hw, cfg);
-}
-
-/**
- * ice_aq_fwlog_register - Register PF for firmware logging events (0xFF31)
- * @hw: pointer to the HW structure
- * @reg: true to register and false to unregister
- */
-static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logs_register);
-
- if (reg)
- desc.params.fw_log.cmd_flags = ICE_AQC_FW_LOG_AQ_REGISTER;
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
-}
-
-/**
- * ice_fwlog_register - Register the PF for firmware logging
- * @hw: pointer to the HW structure
- *
- * After this call the PF will start to receive firmware logging based on the
- * configuration set in ice_fwlog_set.
- */
-int ice_fwlog_register(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, true);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to register for firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options |= ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_unregister - Unregister the PF from firmware logging
- * @hw: pointer to the HW structure
- */
-int ice_fwlog_unregister(struct ice_hw *hw)
-{
- int status;
-
- if (!ice_fwlog_supported(hw))
- return -EOPNOTSUPP;
-
- status = ice_aq_fwlog_register(hw, false);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "Failed to unregister from firmware logging events over ARQ\n");
- else
- hw->fwlog_cfg.options &= ~ICE_FWLOG_OPTION_IS_REGISTERED;
-
- return status;
-}
-
-/**
- * ice_fwlog_set_supported - Set if FW logging is supported by FW
- * @hw: pointer to the HW struct
- *
- * If FW returns success to the ice_aq_fwlog_get call then it supports FW
- * logging, else it doesn't. Set the fwlog_supported flag accordingly.
- *
- * This function is only meant to be called during driver init to determine if
- * the FW support FW logging.
- */
-void ice_fwlog_set_supported(struct ice_hw *hw)
-{
- struct ice_fwlog_cfg *cfg;
- int status;
-
- hw->fwlog_supported = false;
-
- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return;
-
- /* don't call ice_fwlog_get() because that would check to see if FW
- * logging is supported which is what the driver is determining now
- */
- status = ice_aq_fwlog_get(hw, cfg);
- if (status)
- ice_debug(hw, ICE_DBG_FW_LOG, "ice_aq_fwlog_get failed, FW logging is not supported on this version of FW, status %d\n",
- status);
- else
- hw->fwlog_supported = true;
-
- kfree(cfg);
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_fwlog.h b/drivers/net/ethernet/intel/ice/ice_fwlog.h
deleted file mode 100644
index 287e71fa4b86..000000000000
--- a/drivers/net/ethernet/intel/ice/ice_fwlog.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2022, Intel Corporation. */
-
-#ifndef _ICE_FWLOG_H_
-#define _ICE_FWLOG_H_
-#include "ice_adminq_cmd.h"
-
-struct ice_hw;
-
-/* Only a single log level should be set and all log levels under the set value
- * are enabled, e.g. if log level is set to ICE_FW_LOG_LEVEL_VERBOSE, then all
- * other log levels are included (except ICE_FW_LOG_LEVEL_NONE)
- */
-enum ice_fwlog_level {
- ICE_FWLOG_LEVEL_NONE = 0,
- ICE_FWLOG_LEVEL_ERROR = 1,
- ICE_FWLOG_LEVEL_WARNING = 2,
- ICE_FWLOG_LEVEL_NORMAL = 3,
- ICE_FWLOG_LEVEL_VERBOSE = 4,
- ICE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
-};
-
-struct ice_fwlog_module_entry {
- /* module ID for the corresponding firmware logging event */
- u16 module_id;
- /* verbosity level for the module_id */
- u8 log_level;
-};
-
-struct ice_fwlog_cfg {
- /* list of modules for configuring log level */
- struct ice_fwlog_module_entry module_entries[ICE_AQC_FW_LOG_ID_MAX];
- /* options used to configure firmware logging */
- u16 options;
-#define ICE_FWLOG_OPTION_ARQ_ENA BIT(0)
-#define ICE_FWLOG_OPTION_UART_ENA BIT(1)
- /* set before calling ice_fwlog_init() so the PF registers for firmware
- * logging on initialization
- */
-#define ICE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
- /* set in the ice_fwlog_get() response if the PF is registered for FW
- * logging events over ARQ
- */
-#define ICE_FWLOG_OPTION_IS_REGISTERED BIT(3)
-
- /* minimum number of log events sent per Admin Receive Queue event */
- u16 log_resolution;
-};
-
-struct ice_fwlog_data {
- u16 data_size;
- u8 *data;
-};
-
-struct ice_fwlog_ring {
- struct ice_fwlog_data *rings;
- u16 index;
- u16 size;
- u16 head;
- u16 tail;
-};
-
-#define ICE_FWLOG_RING_SIZE_INDEX_DFLT 3
-#define ICE_FWLOG_RING_SIZE_DFLT 256
-#define ICE_FWLOG_RING_SIZE_MAX 512
-
-bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
-bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
-void ice_fwlog_ring_increment(u16 *item, u16 size);
-void ice_fwlog_set_supported(struct ice_hw *hw);
-bool ice_fwlog_supported(struct ice_hw *hw);
-int ice_fwlog_init(struct ice_hw *hw);
-void ice_fwlog_deinit(struct ice_hw *hw);
-int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-int ice_fwlog_register(struct ice_hw *hw);
-int ice_fwlog_unregister(struct ice_hw *hw);
-void ice_fwlog_realloc_rings(struct ice_hw *hw, int index);
-#endif /* _ICE_FWLOG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index f02e8ca55375..6b26290452d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -182,7 +182,7 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
- kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
+ kworker = kthread_run_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
return NULL;
@@ -381,32 +381,23 @@ void ice_gnss_exit(struct ice_pf *pf)
}
/**
- * ice_gnss_is_gps_present - Check if GPS HW is present
+ * ice_gnss_is_module_present - Check if GNSS HW is present
* @hw: pointer to HW struct
+ *
+ * Return: true when GNSS is present, false otherwise.
*/
-bool ice_gnss_is_gps_present(struct ice_hw *hw)
+bool ice_gnss_is_module_present(struct ice_hw *hw)
{
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return false;
+ int err;
+ u8 data;
- if (!ice_is_gps_in_netlist(hw))
+ if (!hw->func_caps.ts_func_info.src_tmr_owned ||
+ !ice_is_gps_in_netlist(hw))
return false;
-#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
- if (ice_is_e810t(hw)) {
- int err;
- u8 data;
-
- err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
- if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
- return false;
- } else {
- return false;
- }
-#else
- if (!ice_is_e810t(hw))
+ err = ice_read_pca9575_reg(hw, ICE_PCA9575_P0_IN, &data);
+ if (err || !!(data & ICE_P0_GNSS_PRSNT_N))
return false;
-#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
return true;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index 75e567ad7059..15daf603ed7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -37,11 +37,11 @@ struct gnss_serial {
#if IS_ENABLED(CONFIG_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
-bool ice_gnss_is_gps_present(struct ice_hw *hw);
+bool ice_gnss_is_module_present(struct ice_hw *hw);
#else
static inline void ice_gnss_init(struct ice_pf *pf) { }
static inline void ice_gnss_exit(struct ice_pf *pf) { }
-static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
+static inline bool ice_gnss_is_module_present(struct ice_hw *hw)
{
return false;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index dc88aea9f473..082ad33c53dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -16,8 +16,10 @@
#define GLCOMM_QUANTA_PROF_MAX_DESC_M ICE_M(0x3F, 24)
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_MAX_INDEX 16383
#define QTX_COMM_HEAD_HEAD_S 0
#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8))
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
@@ -272,6 +274,8 @@
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
+#define PFLAN_TX_QALLOC(_PF) (0x001D2580 + ((_PF) * 4))
+#define PFLAN_TX_QALLOC_FIRSTQ_M GENMASK(13, 0)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
@@ -376,6 +380,15 @@
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
#define GLNVM_ULD_PE_DONE_M BIT(10)
+#define GLCOMM_QTX_CNTX_CTL 0x002D2DC8
+#define GLCOMM_QTX_CNTX_CTL_QUEUE_ID_M GENMASK(13, 0)
+#define GLCOMM_QTX_CNTX_CTL_CMD_M GENMASK(18, 16)
+#define GLCOMM_QTX_CNTX_CTL_CMD_READ 0
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE 1
+#define GLCOMM_QTX_CNTX_CTL_CMD_RESET 3
+#define GLCOMM_QTX_CNTX_CTL_CMD_WRITE_NO_DYN 4
+#define GLCOMM_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define GLCOMM_QTX_CNTX_DATA(_i) (0x002D2D40 + ((_i) * 4))
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
@@ -541,12 +554,26 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000
#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32))
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32))
+#define E830_GLPTM_ART_CTL 0x00088B50
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_TIME_H 0x00088B54
+#define E830_GLPTM_ART_TIME_L 0x00088B58
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4))
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4))
+#define E830_PFPTM_SEM 0x00088B00
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
+#define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64))
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M ICE_M(0x1FF, 0)
#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000
#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + (_VF) * 4)
#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + (_VF) * 4)
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 145b27f2a4ce..420d45c2558b 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -9,22 +9,25 @@
static DEFINE_XARRAY_ALLOC1(ice_aux_id);
/**
- * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
- * @pf: pointer to PF struct
+ * ice_get_auxiliary_drv - retrieve iidc_rdma_core_auxiliary_drv struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
*
* This function has to be called with a device_lock on the
- * pf->adev.dev to avoid race conditions.
+ * cdev->adev.dev to avoid race conditions.
+ *
+ * Return: pointer to the matched auxiliary driver struct
*/
-static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+static struct iidc_rdma_core_auxiliary_drv *
+ice_get_auxiliary_drv(struct iidc_rdma_core_dev_info *cdev)
{
struct auxiliary_device *adev;
- adev = pf->adev;
+ adev = cdev->adev;
if (!adev || !adev->dev.driver)
return NULL;
- return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
- adrv.driver);
+ return container_of(adev->dev.driver,
+ struct iidc_rdma_core_auxiliary_drv, adrv.driver);
}
/**
@@ -32,44 +35,54 @@ static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
* @pf: pointer to PF struct
* @event: event struct
*/
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event)
{
- struct iidc_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_auxiliary_drv *iadrv;
+ struct iidc_rdma_core_dev_info *cdev;
if (WARN_ON_ONCE(!in_task()))
return;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return;
+
mutex_lock(&pf->adev_mutex);
- if (!pf->adev)
+ if (!cdev->adev)
goto finish;
- device_lock(&pf->adev->dev);
- iadrv = ice_get_auxiliary_drv(pf);
+ device_lock(&cdev->adev->dev);
+ iadrv = ice_get_auxiliary_drv(cdev);
if (iadrv && iadrv->event_handler)
- iadrv->event_handler(pf, event);
- device_unlock(&pf->adev->dev);
+ iadrv->event_handler(cdev, event);
+ device_unlock(&cdev->adev->dev);
finish:
mutex_unlock(&pf->adev_mutex);
}
/**
* ice_add_rdma_qset - Add Leaf Node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be allocated
+ *
+ * Return: Zero on success or error code encountered
*/
-int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_add_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
struct ice_vsi *vsi;
struct device *dev;
+ struct ice_pf *pf;
u32 qset_teid;
u16 qs_handle;
int status;
int i;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
dev = ice_pf_to_dev(pf);
if (!ice_is_rdma_ena(pf))
@@ -100,7 +113,6 @@ int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
dev_err(dev, "Failed VSI RDMA Qset enable\n");
return status;
}
- vsi->qset_handle[qset->tc] = qset->qs_handle;
qset->teid = qset_teid;
return 0;
@@ -109,18 +121,23 @@ EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
/**
* ice_del_rdma_qset - Delete leaf node for RDMA Qset
- * @pf: PF struct
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @qset: Resource to be freed
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+int ice_del_rdma_qset(struct iidc_rdma_core_dev_info *cdev,
+ struct iidc_rdma_qset_params *qset)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
u32 teid;
u16 q_id;
- if (WARN_ON(!pf || !qset))
+ if (WARN_ON(!cdev || !qset))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, qset->vport_id);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
@@ -130,36 +147,36 @@ int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
q_id = qset->qs_handle;
teid = qset->teid;
- vsi->qset_handle[qset->tc] = 0;
-
return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
}
EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
/**
* ice_rdma_request_reset - accept request from RDMA to perform a reset
- * @pf: struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @reset_type: type of reset
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+int ice_rdma_request_reset(struct iidc_rdma_core_dev_info *cdev,
+ enum iidc_rdma_reset_type reset_type)
{
enum ice_reset_req reset;
+ struct ice_pf *pf;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
+
switch (reset_type) {
- case IIDC_PFR:
+ case IIDC_FUNC_RESET:
reset = ICE_RESET_PFR;
break;
- case IIDC_CORER:
+ case IIDC_DEV_RESET:
reset = ICE_RESET_CORER;
break;
- case IIDC_GLOBR:
- reset = ICE_RESET_GLOBR;
- break;
default:
- dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
return -EINVAL;
}
@@ -169,18 +186,23 @@ EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
/**
* ice_rdma_update_vsi_filter - update main VSI filters for RDMA
- * @pf: pointer to struct for PF
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
* @vsi_id: VSI HW idx to update filter on
* @enable: bool whether to enable or disable filters
+ *
+ * Return: Zero on success, error code on failure
*/
-int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+int ice_rdma_update_vsi_filter(struct iidc_rdma_core_dev_info *cdev,
+ u16 vsi_id, bool enable)
{
struct ice_vsi *vsi;
+ struct ice_pf *pf;
int status;
- if (WARN_ON(!pf))
+ if (WARN_ON(!cdev))
return -EINVAL;
+ pf = pci_get_drvdata(cdev->pdev);
vsi = ice_find_vsi(pf, vsi_id);
if (!vsi)
return -EINVAL;
@@ -201,88 +223,54 @@ int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
/**
- * ice_get_qos_params - parse QoS params for RDMA consumption
- * @pf: pointer to PF struct
- * @qos: set of QoS values
- */
-void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
-{
- struct ice_dcbx_cfg *dcbx_cfg;
- unsigned int i;
- u32 up2tc;
-
- dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
-
- qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
- for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
- qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
-
- qos->pfc_mode = dcbx_cfg->pfc_mode;
- if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
- for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
- qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
-}
-EXPORT_SYMBOL_GPL(ice_get_qos_params);
-
-/**
- * ice_alloc_rdma_qvectors - Allocate vector resources for RDMA driver
- * @pf: board private structure to initialize
+ * ice_alloc_rdma_qvector - alloc vector resources reserved for RDMA driver
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
+ *
+ * Return: Zero on success, error code on failure
*/
-static int ice_alloc_rdma_qvectors(struct ice_pf *pf)
+int ice_alloc_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- if (ice_is_rdma_ena(pf)) {
- int i;
-
- pf->msix_entries = kcalloc(pf->num_rdma_msix,
- sizeof(*pf->msix_entries),
- GFP_KERNEL);
- if (!pf->msix_entries)
- return -ENOMEM;
+ struct msi_map map;
+ struct ice_pf *pf;
- /* RDMA is the only user of pf->msix_entries array */
- pf->rdma_base_vector = 0;
+ if (WARN_ON(!cdev))
+ return -EINVAL;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msix_entry *entry = &pf->msix_entries[i];
- struct msi_map map;
+ pf = pci_get_drvdata(cdev->pdev);
+ map = ice_alloc_irq(pf, true);
+ if (map.index < 0)
+ return -ENOMEM;
- map = ice_alloc_irq(pf, false);
- if (map.index < 0)
- break;
+ entry->entry = map.index;
+ entry->vector = map.virq;
- entry->entry = map.index;
- entry->vector = map.virq;
- }
- }
return 0;
}
+EXPORT_SYMBOL_GPL(ice_alloc_rdma_qvector);
/**
* ice_free_rdma_qvector - free vector resources reserved for RDMA driver
- * @pf: board private structure to initialize
+ * @cdev: pointer to iidc_rdma_core_dev_info struct
+ * @entry: MSI-X entry to be removed
*/
-static void ice_free_rdma_qvector(struct ice_pf *pf)
+void ice_free_rdma_qvector(struct iidc_rdma_core_dev_info *cdev,
+ struct msix_entry *entry)
{
- int i;
+ struct msi_map map;
+ struct ice_pf *pf;
- if (!pf->msix_entries)
+ if (WARN_ON(!cdev || !entry))
return;
- for (i = 0; i < pf->num_rdma_msix; i++) {
- struct msi_map map;
-
- map.index = pf->msix_entries[i].entry;
- map.virq = pf->msix_entries[i].vector;
- ice_free_irq(pf, map);
- }
+ pf = pci_get_drvdata(cdev->pdev);
- kfree(pf->msix_entries);
- pf->msix_entries = NULL;
+ map.index = entry->entry;
+ map.virq = entry->vector;
+ ice_free_irq(pf, map);
}
+EXPORT_SYMBOL_GPL(ice_free_rdma_qvector);
/**
* ice_adev_release - function to be mapped to AUX dev's release op
@@ -290,19 +278,23 @@ static void ice_free_rdma_qvector(struct ice_pf *pf)
*/
static void ice_adev_release(struct device *dev)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
- iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ iadev = container_of(dev, struct iidc_rdma_core_auxiliary_dev,
+ adev.dev);
kfree(iadev);
}
/**
* ice_plug_aux_dev - allocate and register AUX device
* @pf: pointer to pf struct
+ *
+ * Return: Zero on success, error code on failure
*/
int ice_plug_aux_dev(struct ice_pf *pf)
{
- struct iidc_auxiliary_dev *iadev;
+ struct iidc_rdma_core_auxiliary_dev *iadev;
+ struct iidc_rdma_core_dev_info *cdev;
struct auxiliary_device *adev;
int ret;
@@ -312,17 +304,22 @@ int ice_plug_aux_dev(struct ice_pf *pf)
if (!ice_is_rdma_ena(pf))
return 0;
+ cdev = pf->cdev_info;
+ if (!cdev)
+ return -ENODEV;
+
iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
if (!iadev)
return -ENOMEM;
adev = &iadev->adev;
- iadev->pf = pf;
+ iadev->cdev_info = cdev;
adev->id = pf->aux_idx;
adev->dev.release = ice_adev_release;
adev->dev.parent = &pf->pdev->dev;
- adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
+ adev->name = cdev->rdma_protocol & IIDC_RDMA_PROTOCOL_ROCEV2 ?
+ "roce" : "iwarp";
ret = auxiliary_device_init(adev);
if (ret) {
@@ -337,8 +334,9 @@ int ice_plug_aux_dev(struct ice_pf *pf)
}
mutex_lock(&pf->adev_mutex);
- pf->adev = adev;
+ cdev->adev = adev;
mutex_unlock(&pf->adev_mutex);
+ set_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags);
return 0;
}
@@ -350,15 +348,16 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
{
struct auxiliary_device *adev;
+ if (!test_and_clear_bit(ICE_FLAG_AUX_DEV_CREATED, pf->flags))
+ return;
+
mutex_lock(&pf->adev_mutex);
- adev = pf->adev;
- pf->adev = NULL;
+ adev = pf->cdev_info->adev;
+ pf->cdev_info->adev = NULL;
mutex_unlock(&pf->adev_mutex);
- if (adev) {
- auxiliary_device_delete(adev);
- auxiliary_device_uninit(adev);
- }
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
/**
@@ -367,7 +366,9 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
*/
int ice_init_rdma(struct ice_pf *pf)
{
+ struct iidc_rdma_priv_dev_info *privd;
struct device *dev = &pf->pdev->dev;
+ struct iidc_rdma_core_dev_info *cdev;
int ret;
if (!ice_is_rdma_ena(pf)) {
@@ -375,30 +376,50 @@ int ice_init_rdma(struct ice_pf *pf)
return 0;
}
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ pf->cdev_info = cdev;
+
+ privd = kzalloc(sizeof(*privd), GFP_KERNEL);
+ if (!privd) {
+ ret = -ENOMEM;
+ goto err_privd_alloc;
+ }
+
+ privd->pf_id = pf->hw.pf_id;
ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
GFP_KERNEL);
if (ret) {
dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_alloc_xa;
}
- /* Reserve vector resources */
- ret = ice_alloc_rdma_qvectors(pf);
- if (ret < 0) {
- dev_err(dev, "failed to reserve vectors for RDMA\n");
- goto err_reserve_rdma_qvector;
- }
- pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ cdev->iidc_priv = privd;
+ privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+ privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+ ice_setup_dcb_qos_info(pf, &privd->qos_info);
ret = ice_plug_aux_dev(pf);
if (ret)
goto err_plug_aux_dev;
return 0;
err_plug_aux_dev:
- ice_free_rdma_qvector(pf);
-err_reserve_rdma_qvector:
- pf->adev = NULL;
+ pf->cdev_info->adev = NULL;
xa_erase(&ice_aux_id, pf->aux_idx);
+err_alloc_xa:
+ kfree(privd);
+err_privd_alloc:
+ kfree(cdev);
+ pf->cdev_info = NULL;
+
return ret;
}
@@ -412,6 +433,8 @@ void ice_deinit_rdma(struct ice_pf *pf)
return;
ice_unplug_aux_dev(pf);
- ice_free_rdma_qvector(pf);
xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+ pf->cdev_info = NULL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
index 4b0c86757df9..17dbfcfb6a2a 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc_int.h
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -4,10 +4,11 @@
#ifndef _ICE_IDC_INT_H_
#define _ICE_IDC_INT_H_
-#include <linux/net/intel/iidc.h>
+#include <linux/net/intel/iidc_rdma.h>
+#include <linux/net/intel/iidc_rdma_ice.h>
struct ice_pf;
-void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_rdma_event *event);
#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..30801fd375f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -20,6 +20,19 @@ ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors,
xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
}
+static int
+ice_init_virt_irq_tracker(struct ice_pf *pf, u32 base, u32 num_entries)
+{
+ pf->virt_irq_tracker.bm = bitmap_zalloc(num_entries, GFP_KERNEL);
+ if (!pf->virt_irq_tracker.bm)
+ return -ENOMEM;
+
+ pf->virt_irq_tracker.num_entries = num_entries;
+ pf->virt_irq_tracker.base = base;
+
+ return 0;
+}
+
/**
* ice_deinit_irq_tracker - free xarray tracker
* @pf: board private structure
@@ -29,6 +42,11 @@ static void ice_deinit_irq_tracker(struct ice_pf *pf)
xa_destroy(&pf->irq_tracker.entries);
}
+static void ice_deinit_virt_irq_tracker(struct ice_pf *pf)
+{
+ bitmap_free(pf->virt_irq_tracker.bm);
+}
+
/**
* ice_free_irq_res - free a block of resources
* @pf: board private structure
@@ -45,7 +63,7 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
/**
* ice_get_irq_res - get an interrupt resource
* @pf: board private structure
- * @dyn_only: force entry to be dynamically allocated
+ * @dyn_allowed: allow entry to be dynamically allocated
*
* Allocate new irq entry in the free slot of the tracker. Since xarray
* is used, always allocate new entry at the lowest possible index. Set
@@ -53,11 +71,12 @@ static void ice_free_irq_res(struct ice_pf *pf, u16 index)
*
* Returns allocated irq entry or NULL on failure.
*/
-static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
+static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf,
+ bool dyn_allowed)
{
- struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
+ struct xa_limit limit = { .max = pf->irq_tracker.num_entries - 1,
.min = 0 };
- unsigned int num_static = pf->irq_tracker.num_static;
+ unsigned int num_static = pf->irq_tracker.num_static - 1;
struct ice_irq_entry *entry;
unsigned int index;
int ret;
@@ -66,9 +85,9 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
if (!entry)
return NULL;
- /* skip preallocated entries if the caller says so */
- if (dyn_only)
- limit.min = num_static;
+ /* only already allocated if the caller says so */
+ if (!dyn_allowed)
+ limit.max = num_static;
ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
GFP_KERNEL);
@@ -78,161 +97,18 @@ static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf, bool dyn_only)
entry = NULL;
} else {
entry->index = index;
- entry->dynamic = index >= num_static;
+ entry->dynamic = index > num_static;
}
return entry;
}
-/**
- * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
- * @pf: board private structure
- * @v_remain: number of remaining MSI-X vectors to be distributed
- *
- * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
- * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
- * remaining vectors.
- */
-static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
-{
- int v_rdma;
-
- if (!ice_is_rdma_ena(pf)) {
- pf->num_lan_msix = v_remain;
- return;
- }
-
- /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
- dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining vectors to LAN MSIX
- */
- pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
- pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
- } else {
- /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
-}
-
-/**
- * ice_ena_msix_range - Request a range of MSIX vectors from the OS
- * @pf: board private structure
- *
- * Compute the number of MSIX vectors wanted and request from the OS. Adjust
- * device usage if there are not enough vectors. Return the number of vectors
- * reserved or negative on failure.
- */
-static int ice_ena_msix_range(struct ice_pf *pf)
+#define ICE_RDMA_AEQ_MSIX 1
+static int ice_get_default_msix_amount(struct ice_pf *pf)
{
- int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
- struct device *dev = ice_pf_to_dev(pf);
- int err;
-
- hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
- num_cpus = num_online_cpus();
-
- /* LAN miscellaneous handler */
- v_other = ICE_MIN_LAN_OICR_MSIX;
-
- /* Flow Director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
- v_other += ICE_FDIR_MSIX;
-
- /* switchdev */
- v_other += ICE_ESWITCH_MSIX;
-
- v_wanted = v_other;
-
- /* LAN traffic */
- pf->num_lan_msix = num_cpus;
- v_wanted += pf->num_lan_msix;
-
- /* RDMA auxiliary driver */
- if (ice_is_rdma_ena(pf)) {
- pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- v_wanted += pf->num_rdma_msix;
- }
-
- if (v_wanted > hw_num_msix) {
- int v_remain;
-
- dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
- v_wanted, hw_num_msix);
-
- if (hw_num_msix < ICE_MIN_MSIX) {
- err = -ERANGE;
- goto exit_err;
- }
-
- v_remain = hw_num_msix - v_other;
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
- v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
- }
-
- ice_reduce_msix_usage(pf, v_remain);
- v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
-
- dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
-
- /* actually reserve the vectors */
- v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
- PCI_IRQ_MSIX);
- if (v_actual < 0) {
- dev_err(dev, "unable to reserve MSI-X vectors\n");
- err = v_actual;
- goto exit_err;
- }
-
- if (v_actual < v_wanted) {
- dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_wanted, v_actual);
-
- if (v_actual < ICE_MIN_MSIX) {
- /* error if we can't get minimum vectors */
- pci_free_irq_vectors(pf->pdev);
- err = -ERANGE;
- goto exit_err;
- } else {
- int v_remain = v_actual - v_other;
-
- if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
- v_remain = ICE_MIN_LAN_TXRX_MSIX;
-
- ice_reduce_msix_usage(pf, v_remain);
-
- dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
- pf->num_lan_msix);
-
- if (ice_is_rdma_ena(pf))
- dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
- pf->num_rdma_msix);
- }
- }
-
- return v_actual;
-
-exit_err:
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = 0;
- return err;
+ return ICE_MIN_LAN_OICR_MSIX + num_online_cpus() +
+ (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? ICE_FDIR_MSIX : 0) +
+ (ice_is_rdma_ena(pf) ? num_online_cpus() + ICE_RDMA_AEQ_MSIX : 0);
}
/**
@@ -243,6 +119,7 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
{
pci_free_irq_vectors(pf->pdev);
ice_deinit_irq_tracker(pf);
+ ice_deinit_virt_irq_tracker(pf);
}
/**
@@ -252,27 +129,38 @@ void ice_clear_interrupt_scheme(struct ice_pf *pf)
int ice_init_interrupt_scheme(struct ice_pf *pf)
{
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors, max_vectors;
+ int vectors;
- vectors = ice_ena_msix_range(pf);
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
- if (vectors < 0)
- return -ENOMEM;
+ if (!pf->msix.max)
+ pf->msix.max = min(total_vectors,
+ ice_get_default_msix_amount(pf));
+
+ pf->msix.total = total_vectors;
+ pf->msix.rest = total_vectors - pf->msix.max;
if (pci_msix_can_alloc_dyn(pf->pdev))
- max_vectors = total_vectors;
+ vectors = pf->msix.min;
else
- max_vectors = vectors;
+ vectors = pf->msix.max;
+
+ vectors = pci_alloc_irq_vectors(pf->pdev, pf->msix.min, vectors,
+ PCI_IRQ_MSIX);
+ if (vectors < 0)
+ return vectors;
- ice_init_irq_tracker(pf, max_vectors, vectors);
+ ice_init_irq_tracker(pf, pf->msix.max, vectors);
- return 0;
+ return ice_init_virt_irq_tracker(pf, pf->msix.max, pf->msix.rest);
}
/**
* ice_alloc_irq - Allocate new interrupt vector
* @pf: board private structure
- * @dyn_only: force dynamic allocation of the interrupt
+ * @dyn_allowed: allow dynamic allocation of the interrupt
*
* Allocate new interrupt vector for a given owner id.
* return struct msi_map with interrupt details and track
@@ -285,27 +173,22 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
* interrupt will be allocated with pci_msix_alloc_irq_at.
*
* Some callers may only support dynamically allocated interrupts.
- * This is indicated with dyn_only flag.
+ * This is indicated with dyn_allowed flag.
*
* On failure, return map with negative .index. The caller
* is expected to check returned map index.
*
*/
-struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only)
+struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_allowed)
{
- int sriov_base_vector = pf->sriov_base_vector;
struct msi_map map = { .index = -ENOENT };
struct device *dev = ice_pf_to_dev(pf);
struct ice_irq_entry *entry;
- entry = ice_get_irq_res(pf, dyn_only);
+ entry = ice_get_irq_res(pf, dyn_allowed);
if (!entry)
return map;
- /* fail if we're about to violate SRIOV vectors space */
- if (sriov_base_vector && entry->index >= sriov_base_vector)
- goto exit_free_res;
-
if (pci_msix_can_alloc_dyn(pf->pdev) && entry->dynamic) {
map = pci_msix_alloc_irq_at(pf->pdev, entry->index, NULL);
if (map.index < 0)
@@ -353,26 +236,40 @@ void ice_free_irq(struct ice_pf *pf, struct msi_map map)
}
/**
- * ice_get_max_used_msix_vector - Get the max used interrupt vector
- * @pf: board private structure
+ * ice_virt_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
*
- * Return index of maximum used interrupt vectors with respect to the
- * beginning of the MSIX table. Take into account that some interrupts
- * may have been dynamically allocated after MSIX was initially enabled.
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
*/
-int ice_get_max_used_msix_vector(struct ice_pf *pf)
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed)
{
- unsigned long start, index, max_idx;
- void *entry;
+ int res = bitmap_find_next_zero_area(pf->virt_irq_tracker.bm,
+ pf->virt_irq_tracker.num_entries,
+ 0, needed, 0);
- /* Treat all preallocated interrupts as used */
- start = pf->irq_tracker.num_static;
- max_idx = start - 1;
+ if (res >= pf->virt_irq_tracker.num_entries)
+ return -ENOENT;
- xa_for_each_start(&pf->irq_tracker.entries, index, entry, start) {
- if (index > max_idx)
- max_idx = index;
- }
+ bitmap_set(pf->virt_irq_tracker.bm, res, needed);
+
+ /* conversion from number in bitmap to global irq index */
+ return res + pf->virt_irq_tracker.base;
+}
- return max_idx;
+/**
+ * ice_virt_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @index: first index to be free
+ * @irqs: number of irqs to free
+ */
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs)
+{
+ bitmap_clear(pf->virt_irq_tracker.bm, index - pf->virt_irq_tracker.base,
+ irqs);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.h b/drivers/net/ethernet/intel/ice/ice_irq.h
index f35efc08575e..b2f9dbafd57e 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.h
+++ b/drivers/net/ethernet/intel/ice/ice_irq.h
@@ -15,11 +15,22 @@ struct ice_irq_tracker {
u16 num_static; /* preallocated entries */
};
+struct ice_virt_irq_tracker {
+ unsigned long *bm; /* bitmap to track irq usage */
+ u32 num_entries;
+ /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
+ * number of MSIX vectors needed for all SR-IOV VFs from the number of
+ * MSIX vectors allowed on this PF.
+ */
+ u32 base;
+};
+
int ice_init_interrupt_scheme(struct ice_pf *pf);
void ice_clear_interrupt_scheme(struct ice_pf *pf);
struct msi_map ice_alloc_irq(struct ice_pf *pf, bool dyn_only);
void ice_free_irq(struct ice_pf *pf, struct msi_map map);
-int ice_get_max_used_msix_vector(struct ice_pf *pf);
+int ice_virt_get_irqs(struct ice_pf *pf, u32 needed);
+void ice_virt_free_irqs(struct ice_pf *pf, u32 index, u32 irqs);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 1ccb572ce285..d2576d606e10 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -10,12 +10,17 @@
#define ICE_LAG_RES_SHARED BIT(14)
#define ICE_LAG_RES_VALID BIT(15)
-#define LACP_TRAIN_PKT_LEN 16
-static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0,
- 0x88, 0x09, 0, 0 };
+#define ICE_TRAIN_PKT_LEN 16
+static const u8 lacp_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x88, 0x09, 0, 0 };
+static const u8 act_act_train_pkt[ICE_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0 };
#define ICE_RECIPE_LEN 64
+#define ICE_LAG_SRIOV_CP_RECIPE 10
+
static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
@@ -46,10 +51,10 @@ static void ice_lag_set_primary(struct ice_lag *lag)
}
/**
- * ice_lag_set_backup - set PF LAG state to Backup
+ * ice_lag_set_bkup - set PF LAG state to Backup
* @lag: LAG info struct
*/
-static void ice_lag_set_backup(struct ice_lag *lag)
+static void ice_lag_set_bkup(struct ice_lag *lag)
{
struct ice_pf *pf = lag->pf;
@@ -99,6 +104,28 @@ static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
}
/**
+ * ice_lag_config_eswitch - configure eswitch to work with LAG
+ * @lag: lag info struct
+ * @netdev: active network interface device struct
+ *
+ * Updates all port representors in eswitch to use @netdev for Tx.
+ *
+ * Configures the netdev to keep dst metadata (also used in representor Tx).
+ * This is required for an uplink without switchdev mode configured.
+ */
+static void ice_lag_config_eswitch(struct ice_lag *lag,
+ struct net_device *netdev)
+{
+ struct ice_repr *repr;
+ unsigned long id;
+
+ xa_for_each(&lag->pf->eswitch.reprs, id, repr)
+ repr->dst->u.port_info.lower_dev = netdev;
+
+ netif_keep_dst(netdev);
+}
+
+/**
* ice_netdev_to_lag - return pointer to associated lag struct from netdev
* @netdev: pointer to net_device struct to query
*/
@@ -210,13 +237,12 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_hw *hw = &lag->pf->hw;
u16 s_rule_sz, vsi_num;
- struct ice_hw *hw;
u8 *eth_hdr;
u32 opc;
int err;
- hw = &lag->pf->hw;
vsi_num = ice_get_hw_vsi_num(hw, 0);
s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
@@ -314,26 +340,15 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
}
/**
- * ice_lag_cfg_pf_fltrs - set filters up for new active port
+ * ice_lag_cfg_pf_fltrs_act_bkup - set filters up for new active port
* @lag: local interfaces lag struct
- * @ptr: opaque data containing notifier event
+ * @bonding_info: netdev event bonding info
*/
static void
-ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+ice_lag_cfg_pf_fltrs_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *bonding_info)
{
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
- struct net_device *event_netdev;
- struct device *dev;
-
- event_netdev = netdev_notifier_info_to_dev(ptr);
- /* not for this netdev */
- if (event_netdev != lag->netdev)
- return;
-
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- dev = ice_pf_to_dev(lag->pf);
+ struct device *dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
if (bonding_info->slave.state && lag->pf_rx_rule_id) {
@@ -354,6 +369,105 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_cfg_lp_fltr - configure lport filters
+ * @lag: local interface's lag struct
+ * @add: add or remove rule
+ * @cp: control packet only or general PF lport rule
+ */
+static void
+ice_lag_cfg_lp_fltr(struct ice_lag *lag, bool add, bool cp)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ struct ice_vsi *vsi = lag->pf->vsi[0];
+ u16 buf_len, opc;
+
+ buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule, ICE_TRAIN_PKT_LEN);
+ s_rule = kzalloc(buf_len, GFP_KERNEL);
+ if (!s_rule) {
+ netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
+ return;
+ }
+
+ if (add) {
+ if (cp) {
+ s_rule->recipe_id =
+ cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
+ memcpy(s_rule->hdr_data, lacp_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ } else {
+ s_rule->recipe_id = cpu_to_le16(lag->act_act_recipe);
+ memcpy(s_rule->hdr_data, act_act_train_pkt,
+ ICE_TRAIN_PKT_LEN);
+ }
+
+ s_rule->src = cpu_to_le16(vsi->port_info->lport);
+ s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
+ ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_VALID_BIT |
+ FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ vsi->vsi_num));
+ s_rule->hdr_len = cpu_to_le16(ICE_TRAIN_PKT_LEN);
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ opc = ice_aqc_opc_remove_sw_rules;
+ if (cp)
+ s_rule->index = cpu_to_le16(lag->cp_rule_idx);
+ else
+ s_rule->index = cpu_to_le16(lag->act_act_rule_idx);
+ }
+ if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
+ netdev_warn(lag->netdev, "Error %s %s rule for aggregate\n",
+ add ? "ADDING" : "REMOVING",
+ cp ? "CONTROL PACKET" : "LPORT");
+ goto err_cp_free;
+ }
+
+ if (add) {
+ if (cp)
+ lag->cp_rule_idx = le16_to_cpu(s_rule->index);
+ else
+ lag->act_act_rule_idx = le16_to_cpu(s_rule->index);
+ } else {
+ if (cp)
+ lag->cp_rule_idx = 0;
+ else
+ lag->act_act_rule_idx = 0;
+ }
+
+err_cp_free:
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_cfg_pf_fltrs - set filters up for PF traffic
+ * @lag: local interfaces lag struct
+ * @ptr: opaque data containing notifier event
+ */
+static void
+ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ if (event_netdev != lag->netdev)
+ return;
+
+ bonding_info = &info->bonding_info;
+
+ if (lag->bond_aa) {
+ if (lag->need_fltr_cfg) {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ lag->need_fltr_cfg = false;
+ }
+ } else {
+ ice_lag_cfg_pf_fltrs_act_bkup(lag, bonding_info);
+ }
+}
+
+/**
* ice_display_lag_info - print LAG info
* @lag: LAG info struct
*/
@@ -402,12 +516,11 @@ static u16
ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
u16 vsi_num, u16 numq, u8 tc)
{
+ struct ice_pf *pf = hw->back;
struct ice_q_ctx *q_ctx;
u16 qid, count = 0;
- struct ice_pf *pf;
int i;
- pf = hw->back;
for (i = 0; i < numq; i++) {
q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
if (!q_ctx) {
@@ -577,7 +690,7 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
}
if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
- newport, NULL)) {
+ newport, ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto qbuf_err;
}
@@ -677,54 +790,6 @@ ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
}
/**
- * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
- * @vf: the VF to move Tx nodes for
- *
- * Called just after configuring new VF queues. Check whether the VF Tx
- * scheduling nodes need to be updated to fail over to the active port. If so,
- * move them now.
- */
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
-{
- struct ice_lag_netdev_list ndlist;
- u8 pri_port, act_port;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
-
- vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- if (WARN_ON(vsi->type != ICE_VSI_VF))
- return;
-
- pf = vf->pf;
- lag = pf->lag;
-
- mutex_lock(&pf->lag_mutex);
- if (!lag->bonded)
- goto new_vf_unlock;
-
- pri_port = pf->hw.port_info->lport;
- act_port = lag->active_port;
-
- if (lag->upper_netdev)
- ice_lag_build_netdev_list(lag, &ndlist);
-
- if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
- lag->bonded && lag->primary && pri_port != act_port &&
- !list_empty(lag->netdev_head))
- ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
-
- ice_lag_destroy_netdev_list(lag, &ndlist);
-
-new_vf_unlock:
- mutex_unlock(&pf->lag_mutex);
-}
-
-/**
* ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
* @lag: lag info struct
* @oldport: lport of previous interface
@@ -767,59 +832,60 @@ void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
ice_lag_destroy_netdev_list(lag, &ndlist);
}
-#define ICE_LAG_SRIOV_CP_RECIPE 10
-#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
-
/**
- * ice_lag_cfg_cp_fltr - configure filter for control packets
- * @lag: local interface's lag struct
- * @add: add or remove rule
+ * ice_lag_prepare_vf_reset - helper to adjust vf lag for reset
+ * @lag: lag struct for interface that owns VF
+ *
+ * Context: must be called with the lag_mutex lock held.
+ *
+ * Return: active lport value or ICE_LAG_INVALID_PORT if nothing moved.
*/
-static void
-ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
- struct ice_vsi *vsi;
- u16 buf_len, opc;
-
- vsi = lag->pf->vsi[0];
-
- buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
- ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- s_rule = kzalloc(buf_len, GFP_KERNEL);
- if (!s_rule) {
- netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
- return;
- }
-
- if (add) {
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
- s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
- s_rule->src = cpu_to_le16(vsi->port_info->lport);
- s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
- ICE_SINGLE_ACT_LAN_ENABLE |
- ICE_SINGLE_ACT_VALID_BIT |
- FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi->vsi_num));
- s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
- memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
- opc = ice_aqc_opc_add_sw_rules;
- } else {
- opc = ice_aqc_opc_remove_sw_rules;
- s_rule->index = cpu_to_le16(lag->cp_rule_idx);
- }
- if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
- netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
- add ? "ADDING" : "REMOVING");
- goto cp_free;
+ u8 pri_prt, act_prt;
+
+ if (lag && lag->bonded && lag->primary && lag->upper_netdev) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt &&
+ act_prt != ICE_LAG_INVALID_PORT) {
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ return act_prt;
+ }
+ } else {
+ if (lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, NULL);
+ lag->port_bitmap |= ICE_LAGS_M;
+ }
+ }
}
- if (add)
- lag->cp_rule_idx = le16_to_cpu(s_rule->index);
- else
- lag->cp_rule_idx = 0;
+ return ICE_LAG_INVALID_PORT;
+}
-cp_free:
- kfree(s_rule);
+/**
+ * ice_lag_complete_vf_reset - helper for lag after reset
+ * @lag: lag struct for primary interface
+ * @act_prt: which port should be active for lag
+ *
+ * Context: must be called while holding the lag_mutex.
+ */
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt)
+{
+ u8 pri_prt;
+
+ if (lag && lag->bonded && lag->primary) {
+ if (!lag->bond_aa) {
+ pri_prt = lag->pf->hw.port_info->lport;
+ if (act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt,
+ act_prt);
+ } else {
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, NULL);
+ }
+ }
}
/**
@@ -831,13 +897,12 @@ cp_free:
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_bonding_info *info;
+ struct netdev_notifier_bonding_info *info = ptr;
struct netdev_bonding_info *bonding_info;
struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
- info = ptr;
lag_netdev_name = netdev_name(lag->netdev);
bonding_info = &info->bonding_info;
@@ -855,7 +920,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (bonding_info->slave.state)
- ice_lag_set_backup(lag);
+ ice_lag_set_bkup(lag);
else
ice_lag_set_primary(lag);
@@ -864,6 +929,295 @@ lag_out:
}
/**
+ * ice_lag_aa_qbuf_recfg - fill a single queue buffer for recfg cmd
+ * @hw: HW struct that contains the queue context
+ * @qbuf: pointer to single queue buffer
+ * @vsi_num: index of the VF VSI in PF space
+ * @qnum: queue index
+ *
+ * Return: Zero on success, error code on failure.
+ */
+static int
+ice_lag_aa_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
+ u16 vsi_num, int qnum)
+{
+ struct ice_pf *pf = hw->back;
+ struct ice_q_ctx *q_ctx;
+ u16 q_id;
+
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_num, 0, qnum);
+ if (!q_ctx) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d no Q context\n", qnum);
+ return -ENOENT;
+ }
+
+ if (q_ctx->q_teid == ICE_INVAL_TEID) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL TEID\n", qnum);
+ return -EINVAL;
+ }
+
+ if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
+ dev_dbg(ice_hw_to_dev(hw), "LAG queue %d INVAL Q HANDLE\n", qnum);
+ return -EINVAL;
+ }
+
+ q_id = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
+ qbuf->queue_info[0].q_handle = cpu_to_le16(q_id);
+ qbuf->queue_info[0].tc = 0;
+ qbuf->queue_info[0].q_teid = cpu_to_le32(q_ctx->q_teid);
+
+ return 0;
+}
+
+/**
+ * ice_lag_aa_move_vf_qs - Move some/all VF queues to destination
+ * @lag: primary interface's lag struct
+ * @dest: index of destination port
+ * @vsi_num: index of VF VSI in PF space
+ * @all: if true move all queues to destination
+ * @odd: VF wide q indicator for odd/even
+ * @e_pf: PF struct for the event interface
+ *
+ * the parameter "all" is to control whether we are splitting the queues
+ * between two interfaces or moving them all to the destination interface
+ */
+static void ice_lag_aa_move_vf_qs(struct ice_lag *lag, u8 dest, u16 vsi_num,
+ bool all, bool *odd, struct ice_pf *e_pf)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_cfg_txqs_buf, qbuf, queue_info, 1);
+ struct ice_hw *old_hw, *new_hw, *pri_hw, *sec_hw;
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ struct ice_vsi_ctx *pv_ctx, *sv_ctx;
+ struct ice_lag_netdev_list ndlist;
+ u16 num_q, qbuf_size, sec_vsi_num;
+ u8 pri_lport, sec_lport;
+ u32 pvf_teid, svf_teid;
+ u16 vf_id;
+
+ vf_id = lag->pf->vsi[vsi_num]->vf->vf_id;
+ /* If sec_vf[] not defined, then no second interface to share with */
+ if (lag->sec_vf[vf_id])
+ sec_vsi_num = lag->sec_vf[vf_id]->idx;
+ else
+ return;
+
+ pri_lport = lag->bond_lport_pri;
+ sec_lport = lag->bond_lport_sec;
+
+ if (pri_lport == ICE_LAG_INVALID_PORT ||
+ sec_lport == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!e_pf)
+ ice_lag_build_netdev_list(lag, &ndlist);
+
+ pri_hw = &lag->pf->hw;
+ if (e_pf && lag->pf != e_pf)
+ sec_hw = &e_pf->hw;
+ else
+ sec_hw = ice_lag_find_hw_by_lport(lag, sec_lport);
+
+ if (!pri_hw || !sec_hw)
+ return;
+
+ if (dest == ICE_LAGP_IDX) {
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(lag->pf);
+ if (!vsi)
+ return;
+
+ old_hw = sec_hw;
+ new_hw = pri_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ } else {
+ struct ice_pf *sec_pf = sec_hw->back;
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(sec_pf);
+ if (!vsi)
+ return;
+
+ old_hw = pri_hw;
+ new_hw = sec_hw;
+ ice_lag_config_eswitch(lag, vsi->netdev);
+ }
+
+ pv_ctx = ice_get_vsi_ctx(pri_hw, vsi_num);
+ if (!pv_ctx) {
+ dev_warn(dev, "Unable to locate primary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ sv_ctx = ice_get_vsi_ctx(sec_hw, sec_vsi_num);
+ if (!sv_ctx) {
+ dev_warn(dev, "Unable to locate secondary VSI %d context for LAG failover\n",
+ vsi_num);
+ return;
+ }
+
+ num_q = pv_ctx->num_lan_q_entries[0];
+ qbuf_size = __struct_size(qbuf);
+
+ /* Suspend traffic for primary VSI VF */
+ pvf_teid = le32_to_cpu(pv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, true);
+
+ /* Suspend traffic for secondary VSI VF */
+ svf_teid = le32_to_cpu(sv_ctx->sched.vsi_node[0]->info.node_teid);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, true);
+
+ for (int i = 0; i < num_q; i++) {
+ struct ice_sched_node *n_prt, *q_node, *parent;
+ struct ice_port_info *pi, *new_pi;
+ struct ice_vsi_ctx *src_ctx;
+ struct ice_sched_node *p;
+ struct ice_q_ctx *q_ctx;
+ u16 dst_vsi_num;
+
+ pi = old_hw->port_info;
+ new_pi = new_hw->port_info;
+
+ *odd = !(*odd);
+ if ((dest == ICE_LAGP_IDX && *odd && !all) ||
+ (dest == ICE_LAGS_IDX && !(*odd) && !all) ||
+ lag->q_home[vf_id][i] == dest)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ dst_vsi_num = vsi_num;
+ else
+ dst_vsi_num = sec_vsi_num;
+
+ n_prt = ice_sched_get_free_qparent(new_hw->port_info,
+ dst_vsi_num, 0,
+ ICE_SCHED_NODE_OWNER_LAN);
+ if (!n_prt)
+ continue;
+
+ q_ctx = ice_get_lan_q_ctx(pri_hw, vsi_num, 0, i);
+ if (!q_ctx)
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ src_ctx = sv_ctx;
+ else
+ src_ctx = pv_ctx;
+
+ q_node = ice_sched_find_node_by_teid(src_ctx->sched.vsi_node[0],
+ q_ctx->q_teid);
+ if (!q_node)
+ continue;
+
+ qbuf->src_parent_teid = q_node->info.parent_teid;
+ qbuf->dst_parent_teid = n_prt->info.node_teid;
+
+ /* Move the node in the HW/FW */
+ if (ice_lag_aa_qbuf_recfg(pri_hw, qbuf, vsi_num, i))
+ continue;
+
+ if (dest == ICE_LAGP_IDX)
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ sec_lport, pri_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+ else
+ ice_aq_cfg_lan_txq(pri_hw, qbuf, qbuf_size, 1,
+ pri_lport, sec_lport,
+ ICE_AQC_Q_CFG_MOVE_TC_CHNG,
+ NULL);
+
+ /* Move the node in the SW */
+ parent = q_node->parent;
+ if (!parent)
+ continue;
+
+ for (int n = 0; n < parent->num_children; n++) {
+ int j;
+
+ if (parent->children[n] != q_node)
+ continue;
+
+ for (j = n + 1; j < parent->num_children;
+ j++) {
+ parent->children[j - 1] =
+ parent->children[j];
+ }
+ parent->children[j] = NULL;
+ parent->num_children--;
+ break;
+ }
+
+ p = pi->sib_head[0][q_node->tx_sched_layer];
+ while (p) {
+ if (p->sibling == q_node) {
+ p->sibling = q_node->sibling;
+ break;
+ }
+ p = p->sibling;
+ }
+
+ if (pi->sib_head[0][q_node->tx_sched_layer] == q_node)
+ pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node->sibling;
+
+ q_node->parent = n_prt;
+ q_node->info.parent_teid = n_prt->info.node_teid;
+ q_node->sibling = NULL;
+ p = new_pi->sib_head[0][q_node->tx_sched_layer];
+ if (p) {
+ while (p) {
+ if (!p->sibling) {
+ p->sibling = q_node;
+ break;
+ }
+ p = p->sibling;
+ }
+ } else {
+ new_pi->sib_head[0][q_node->tx_sched_layer] =
+ q_node;
+ }
+
+ n_prt->children[n_prt->num_children++] = q_node;
+ lag->q_home[vf_id][i] = dest;
+ }
+
+ ice_sched_suspend_resume_elems(pri_hw, 1, &pvf_teid, false);
+ ice_sched_suspend_resume_elems(sec_hw, 1, &svf_teid, false);
+
+ if (!e_pf)
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
+/**
+ * ice_lag_aa_failover - move VF queues in A/A mode
+ * @lag: primary lag struct
+ * @dest: index of destination port
+ * @e_pf: PF struct for event port
+ */
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf)
+{
+ bool odd = true, all = false;
+ int i;
+
+ /* Primary can be a target if down (cleanup), but secondary can't */
+ if (dest == ICE_LAGS_IDX && !(lag->port_bitmap & ICE_LAGS_M))
+ return;
+
+ /* Move all queues to a destination if only one port is active,
+ * or no ports are active and dest is primary.
+ */
+ if ((lag->port_bitmap ^ (ICE_LAGP_M | ICE_LAGS_M)) ||
+ (!lag->port_bitmap && dest == ICE_LAGP_IDX))
+ all = true;
+
+ ice_for_each_vsi(lag->pf, i)
+ if (lag->pf->vsi[i] && lag->pf->vsi[i]->type == ICE_VSI_VF)
+ ice_lag_aa_move_vf_qs(lag, dest, i, all, &odd, e_pf);
+}
+
+/**
* ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
* @lag: primary interface lag struct
* @src_hw: HW struct current node location
@@ -879,13 +1233,12 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
@@ -926,7 +1279,7 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
src_hw->port_info->lport, hw->port_info->lport,
- NULL)) {
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG failover\n");
goto reclaim_qerr;
}
@@ -997,14 +1350,15 @@ static void ice_lag_link(struct ice_lag *lag)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
+ lag->need_fltr_cfg = true;
netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
}
/**
- * ice_lag_unlink - handle unlink event
+ * ice_lag_act_bkup_unlink - handle unlink event for A/B bond
* @lag: LAG info struct
*/
-static void ice_lag_unlink(struct ice_lag *lag)
+static void ice_lag_act_bkup_unlink(struct ice_lag *lag)
{
u8 pri_port, act_port, loc_port;
struct ice_pf *pf = lag->pf;
@@ -1021,6 +1375,9 @@ static void ice_lag_unlink(struct ice_lag *lag)
ice_lag_move_vf_nodes(lag, act_port, pri_port);
lag->primary = false;
lag->active_port = ICE_LAG_INVALID_PORT;
+
+ /* Config primary's eswitch back to normal operation. */
+ ice_lag_config_eswitch(lag, lag->netdev);
} else {
struct ice_lag *primary_lag;
@@ -1037,10 +1394,32 @@ static void ice_lag_unlink(struct ice_lag *lag)
}
}
}
+}
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
- lag->upper_netdev = NULL;
+/**
+ * ice_lag_aa_unlink - handle unlink event for Active-Active bond
+ * @lag: LAG info struct
+ */
+static void ice_lag_aa_unlink(struct ice_lag *lag)
+{
+ struct ice_lag *pri_lag;
+
+ if (lag->primary) {
+ pri_lag = lag;
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ } else {
+ pri_lag = ice_lag_find_primary(lag);
+ if (pri_lag)
+ pri_lag->port_bitmap &= ICE_LAGS_M;
+ }
+
+ if (pri_lag) {
+ ice_lag_aa_failover(pri_lag, ICE_LAGP_IDX, lag->pf);
+ if (lag->primary)
+ pri_lag->bond_lport_pri = ICE_LAG_INVALID_PORT;
+ else
+ pri_lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
}
/**
@@ -1056,10 +1435,20 @@ static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
if (netdev != lag->netdev)
return;
- if (info->linking)
+ if (info->linking) {
ice_lag_link(lag);
- else
- ice_lag_unlink(lag);
+ } else {
+ if (lag->bond_aa)
+ ice_lag_aa_unlink(lag);
+ else
+ ice_lag_act_bkup_unlink(lag);
+
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+ lag->upper_netdev = NULL;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
+ }
}
/**
@@ -1077,7 +1466,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
{
struct ice_aqc_alloc_free_res_elem *buf;
struct ice_aqc_set_port_params *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_len, swid;
int status, i;
@@ -1125,7 +1514,7 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
else
swid = local_lag->pf->hw.port_info->sw_id;
- cmd = &desc.params.set_port_params;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid);
@@ -1157,11 +1546,8 @@ ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
*/
static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
{
- struct ice_hw *hw;
- u16 swid;
-
- hw = &lag->pf->hw;
- swid = hw->port_info->sw_id;
+ struct ice_hw *hw = &lag->pf->hw;
+ u16 swid = hw->port_info->sw_id;
if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
@@ -1174,12 +1560,10 @@ static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
*/
static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
event_vsi_num = event_pf->vsi[0]->vsi_num;
prim_vsi_idx = lag->pf->vsi[0]->idx;
@@ -1215,12 +1599,10 @@ static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
*/
static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
{
- u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
- struct ice_sw_rule_vsi_list *s_rule = NULL;
+ u16 vsi_num, vsi_idx, rule_buf_sz, vsi_list_id, num_vsi = 1;
+ struct ice_sw_rule_vsi_list *s_rule;
struct device *dev;
- num_vsi = 1;
-
dev = ice_pf_to_dev(lag->pf);
vsi_num = event_pf->vsi[0]->vsi_num;
vsi_idx = lag->pf->vsi[0]->idx;
@@ -1268,6 +1650,11 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
else
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+
+ if (caps->sriov_aa_lag && ice_pkg_has_lport_extract(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
}
/**
@@ -1277,11 +1664,10 @@ static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
*/
static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_lag *primary_lag;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
/* not for this netdev */
@@ -1296,25 +1682,47 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
if (!primary_lag) {
lag->primary = true;
+ if (!ice_is_switchdev_running(lag->pf))
+ return;
+
/* Configure primary's SWID to be shared */
ice_lag_primary_swid(lag, true);
primary_lag = lag;
+ lag->bond_lport_pri = lag->pf->hw.port_info->lport;
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0;
} else {
u16 swid;
+ if (!ice_is_switchdev_running(primary_lag->pf))
+ return;
+
swid = primary_lag->pf->hw.port_info->sw_id;
ice_lag_set_swid(swid, lag, true);
ice_lag_add_prune_list(primary_lag, lag->pf);
- ice_lag_cfg_drop_fltr(lag, true);
+ primary_lag->bond_lport_sec =
+ lag->pf->hw.port_info->lport;
}
/* add filter for primary control packets */
- ice_lag_cfg_cp_fltr(lag, true);
+ ice_lag_cfg_lp_fltr(lag, true, true);
} else {
if (!primary_lag && lag->primary)
primary_lag = lag;
+ if (primary_lag) {
+ for (int i = 0; i < ICE_MAX_SRIOV_VFS; i++) {
+ if (primary_lag->sec_vf[i]) {
+ ice_vsi_release(primary_lag->sec_vf[i]);
+ primary_lag->sec_vf[i] = NULL;
+ }
+ }
+ }
+
if (!lag->primary) {
ice_lag_set_swid(0, lag, false);
+ if (primary_lag)
+ primary_lag->bond_lport_sec =
+ ICE_LAG_INVALID_PORT;
} else {
if (primary_lag && lag->primary) {
ice_lag_primary_swid(lag, false);
@@ -1322,7 +1730,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
}
}
/* remove filter for control packets */
- ice_lag_cfg_cp_fltr(lag, false);
+ ice_lag_cfg_lp_fltr(lag, false, !lag->bond_aa);
}
}
@@ -1335,7 +1743,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct ice_hw *prim_hw, *active_hw;
struct net_device *event_netdev;
struct ice_pf *pf;
@@ -1348,19 +1756,34 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
if (!netif_is_same_ice(lag->pf, event_netdev))
return;
+ if (info->upper_dev != lag->upper_netdev)
+ return;
+
+ if (info->linking)
+ return;
+
pf = lag->pf;
prim_hw = &pf->hw;
prim_port = prim_hw->port_info->lport;
- info = (struct netdev_notifier_changeupper_info *)ptr;
- if (info->upper_dev != lag->upper_netdev)
- return;
-
- if (!info->linking) {
- /* Since there are only two interfaces allowed in SRIOV+LAG, if
- * one port is leaving, then nodes need to be on primary
- * interface.
- */
+ /* Since there are only two interfaces allowed in SRIOV+LAG, if
+ * one port is leaving, then nodes need to be on primary
+ * interface.
+ */
+ if (lag->bond_aa) {
+ struct ice_netdev_priv *e_ndp;
+ struct ice_pf *e_pf;
+
+ e_ndp = netdev_priv(event_netdev);
+ e_pf = e_ndp->vsi->back;
+
+ if (lag->bond_lport_pri != ICE_LAG_INVALID_PORT &&
+ lag->port_bitmap & ICE_LAGS_M) {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, e_pf);
+ lag->bond_lport_sec = ICE_LAG_INVALID_PORT;
+ }
+ } else {
if (prim_port != lag->active_port &&
lag->active_port != ICE_LAG_INVALID_PORT) {
active_hw = ice_lag_find_hw_by_lport(lag,
@@ -1372,45 +1795,32 @@ static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
}
/**
- * ice_lag_monitor_active - main PF keep track of which port is active
+ * ice_lag_monitor_act_bkup - keep track of which port is active in A/B LAG
* @lag: lag info struct
- * @ptr: opaque data containing notifier event
+ * @b_info: bonding info
+ * @event_netdev: net_device got target netdev
*
* This function is for the primary PF to monitor changes in which port is
* active and handle changes for SRIOV VF functionality
*/
-static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+static void ice_lag_monitor_act_bkup(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
{
- struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
- struct netdev_bonding_info *bonding_info;
struct ice_netdev_priv *event_np;
struct ice_pf *pf, *event_pf;
u8 prim_port, event_port;
- if (!lag->primary)
- return;
-
pf = lag->pf;
if (!pf)
return;
- event_netdev = netdev_notifier_info_to_dev(ptr);
- rcu_read_lock();
- event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
- rcu_read_unlock();
- if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
- return;
-
event_np = netdev_priv(event_netdev);
event_pf = event_np->vsi->back;
event_port = event_pf->hw.port_info->lport;
prim_port = pf->hw.port_info->lport;
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
-
- if (!bonding_info->slave.state) {
+ if (!b_info->slave.state) {
/* if no port is currently active, then nodes and filters exist
* on primary port, check if we need to move them
*/
@@ -1419,6 +1829,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
ice_lag_move_vf_nodes(lag, prim_port,
event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
return;
}
@@ -1428,6 +1839,7 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
/* new active port */
ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
lag->active_port = event_port;
+ ice_lag_config_eswitch(lag, event_netdev);
} else {
/* port not set as currently active (e.g. new active port
* has already claimed the nodes and filters
@@ -1445,6 +1857,128 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
}
/**
+ * ice_lag_aa_clear_spoof - adjust the placeholder VSI spoofing for A/A LAG
+ * @vsi: placeholder VSI to adjust
+ */
+static void ice_lag_aa_clear_spoof(struct ice_vsi *vsi)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+}
+
+/**
+ * ice_lag_monitor_act_act - Keep track of active ports in A/A LAG
+ * @lag: lag struct for primary interface
+ * @b_info: bonding_info for event
+ * @event_netdev: net_device for target netdev
+ */
+static void ice_lag_monitor_act_act(struct ice_lag *lag,
+ struct netdev_bonding_info *b_info,
+ struct net_device *event_netdev)
+{
+ struct ice_netdev_priv *event_np;
+ u8 prim_port, event_port;
+ struct ice_pf *event_pf;
+
+ event_np = netdev_priv(event_netdev);
+ event_pf = event_np->vsi->back;
+ event_port = event_pf->hw.port_info->lport;
+ prim_port = lag->pf->hw.port_info->lport;
+
+ if (b_info->slave.link == BOND_LINK_UP) {
+ /* Port is coming up */
+ if (prim_port == event_port) {
+ /* Processing event for primary interface */
+ if (lag->bond_lport_pri == ICE_LAG_INVALID_PORT)
+ return;
+
+ if (!(lag->port_bitmap & ICE_LAGP_M)) {
+ /* Primary port was not marked up before, move
+ * some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ } else {
+ if (lag->bond_lport_sec == ICE_LAG_INVALID_PORT)
+ return;
+
+ /* Create placeholder VSIs on secondary PF.
+ * The placeholder is necessary so that we have
+ * an element that represents the VF on the secondary
+ * interface's scheduling tree. This will be a tree
+ * root for scheduling nodes when they are moved to
+ * the secondary interface.
+ */
+ if (!lag->sec_vf[0]) {
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *nvsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ params.type = ICE_VSI_VF;
+ params.port_info = event_pf->hw.port_info;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ ice_for_each_vf(lag->pf, bkt, vf) {
+ params.vf = vf;
+ nvsi = ice_vsi_setup(event_pf,
+ &params);
+ ice_lag_aa_clear_spoof(nvsi);
+ lag->sec_vf[vf->vf_id] = nvsi;
+ }
+ }
+
+ if (!(lag->port_bitmap & ICE_LAGS_M)) {
+ /* Secondary port was not marked up before,
+ * move some|all VF queues to it and mark as up
+ */
+ lag->port_bitmap |= ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ }
+ }
+ } else {
+ /* Port is going down */
+ if (prim_port == event_port) {
+ lag->port_bitmap &= ~ICE_LAGP_M;
+ ice_lag_aa_failover(lag, ICE_LAGS_IDX, event_pf);
+ } else {
+ lag->port_bitmap &= ~ICE_LAGS_M;
+ ice_lag_aa_failover(lag, ICE_LAGP_IDX, event_pf);
+ }
+ }
+}
+
+/**
+ * ice_lag_monitor_info - Calls relevant A/A or A/B monitoring function
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function is for the primary PF to monitor changes in which port is
+ * active and handle changes for SRIOV VF functionality
+ */
+static void ice_lag_monitor_info(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info = ptr;
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_bonding_info *bonding_info;
+
+ if (!lag->primary)
+ return;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ bonding_info = &info->bonding_info;
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
+ return;
+
+ if (lag->bond_aa)
+ ice_lag_monitor_act_act(lag, bonding_info, event_netdev);
+ else
+ ice_lag_monitor_act_bkup(lag, bonding_info, event_netdev);
+}
+/**
* ice_lag_chk_comp - evaluate bonded interface for feature support
* @lag: lag info struct
* @ptr: opaque data for netdev event info
@@ -1452,13 +1986,21 @@ static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
static bool
ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
{
+ struct netdev_notifier_bonding_info *info = ptr;
struct net_device *event_netdev, *event_upper;
- struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
struct list_head *tmp;
struct device *dev;
int count = 0;
+ /* All members need to know if bond A/A or A/B */
+ bonding_info = &info->bonding_info;
+ lag->bond_mode = bonding_info->master.bond_mode;
+ if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP)
+ lag->bond_aa = true;
+ else
+ lag->bond_aa = false;
+
if (!lag->primary)
return true;
@@ -1479,13 +2021,9 @@ ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
return false;
}
- info = (struct netdev_notifier_bonding_info *)ptr;
- bonding_info = &info->bonding_info;
- lag->bond_mode = bonding_info->master.bond_mode;
- if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
- dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
+ if (lag->bond_aa && !ice_is_feature_supported(lag->pf,
+ ICE_F_SRIOV_AA_LAG))
return false;
- }
list_for_each(tmp, lag->netdev_head) {
struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
@@ -1589,10 +2127,9 @@ ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
static void
ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
{
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
struct net_device *netdev;
- info = ptr;
netdev = netdev_notifier_info_to_dev(ptr);
if (netdev != lag->netdev)
@@ -1640,12 +2177,29 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
*/
static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
{
- struct ice_netdev_priv *np;
- struct ice_pf *pf;
+ struct ice_pf *pf = ice_netdev_to_pf(lag->netdev);
- np = netdev_priv(lag->netdev);
- pf = np->vsi->back;
ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+ ice_clear_feature_support(pf, ICE_F_SRIOV_AA_LAG);
+}
+
+/**
+ * ice_lag_preset_drop_fltr - preset drop filter for A/B bonds
+ * @lag: local lag struct
+ * @ptr: opaque data containing event
+ *
+ * Sets the initial drop filter for secondary interface in an
+ * active-backup bond
+ */
+static void ice_lag_preset_drop_fltr(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev || lag->primary || !lag->need_fltr_cfg)
+ return;
+
+ ice_lag_cfg_drop_fltr(lag, true);
+ lag->need_fltr_cfg = false;
}
/**
@@ -1686,10 +2240,12 @@ static void ice_lag_process_event(struct work_struct *work)
ice_lag_unregister(lag_work->lag, netdev);
goto lag_cleanup;
}
- ice_lag_monitor_active(lag_work->lag,
- &lag_work->info.bonding_info);
ice_lag_cfg_pf_fltrs(lag_work->lag,
&lag_work->info.bonding_info);
+ ice_lag_preset_drop_fltr(lag_work->lag,
+ &lag_work->info.bonding_info);
+ ice_lag_monitor_info(lag_work->lag,
+ &lag_work->info.bonding_info);
}
ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
break;
@@ -1762,9 +2318,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
lag_work->lag = lag;
lag_work->event = event;
if (event == NETDEV_CHANGEUPPER) {
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *info = ptr;
- info = ptr;
upper_netdev = info->upper_dev;
} else {
upper_netdev = netdev_master_upper_dev_get(netdev);
@@ -1814,10 +2369,8 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
*/
static int ice_register_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
-
- notif_blk = &lag->notif_block;
if (!notif_blk->notifier_call) {
notif_blk->notifier_call = ice_lag_event_handler;
@@ -1837,10 +2390,9 @@ static int ice_register_lag_handler(struct ice_lag *lag)
*/
static void ice_unregister_lag_handler(struct ice_lag *lag)
{
+ struct notifier_block *notif_blk = &lag->notif_block;
struct device *dev = ice_pf_to_dev(lag->pf);
- struct notifier_block *notif_blk;
- notif_blk = &lag->notif_block;
if (notif_blk->notifier_call) {
unregister_netdevice_notifier(notif_blk);
dev_dbg(dev, "LAG event handler unregistered\n");
@@ -1902,13 +2454,12 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_hw *hw = &lag->pf->hw;
struct ice_sched_node *n_prt;
__le32 teid, parent_teid;
struct ice_vsi_ctx *ctx;
- struct ice_hw *hw;
u32 tmp_teid;
- hw = &lag->pf->hw;
ctx = ice_get_vsi_ctx(hw, vsi_num);
if (!ctx) {
dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
@@ -1945,7 +2496,8 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
}
if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
- dest_hw->port_info->lport, NULL)) {
+ dest_hw->port_info->lport,
+ ICE_AQC_Q_CFG_TC_CHNG, NULL)) {
dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
goto sync_qerr;
}
@@ -2041,9 +2593,13 @@ int ice_init_lag(struct ice_pf *pf)
lag->netdev = vsi->netdev;
lag->role = ICE_LAG_NONE;
lag->active_port = ICE_LAG_INVALID_PORT;
+ lag->port_bitmap = 0x0;
lag->bonded = false;
+ lag->bond_aa = false;
+ lag->need_fltr_cfg = false;
lag->upper_netdev = NULL;
lag->notif_block.notifier_call = NULL;
+ memset(lag->sec_vf, 0, sizeof(lag->sec_vf));
err = ice_register_lag_handler(lag);
if (err) {
@@ -2061,6 +2617,11 @@ int ice_init_lag(struct ice_pf *pf)
if (err)
goto free_rcp_res;
+ err = ice_create_lag_recipe(&pf->hw, &lag->act_act_recipe,
+ ice_lport_rcp, 1);
+ if (err)
+ goto free_lport_res;
+
/* associate recipes to profiles */
for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
err = ice_aq_get_recipe_to_profile(&pf->hw, n,
@@ -2070,7 +2631,8 @@ int ice_init_lag(struct ice_pf *pf)
if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
recipe_bits |= BIT(lag->pf_recipe) |
- BIT(lag->lport_recipe);
+ BIT(lag->lport_recipe) |
+ BIT(lag->act_act_recipe);
ice_aq_map_recipe_to_profile(&pf->hw, n,
recipe_bits, NULL);
}
@@ -2081,9 +2643,13 @@ int ice_init_lag(struct ice_pf *pf)
dev_dbg(dev, "INIT LAG complete\n");
return 0;
+free_lport_res:
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &lag->lport_recipe);
+
free_rcp_res:
ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
- &pf->lag->pf_recipe);
+ &lag->pf_recipe);
lag_error:
kfree(lag);
pf->lag = NULL;
@@ -2099,9 +2665,7 @@ lag_error:
*/
void ice_deinit_lag(struct ice_pf *pf)
{
- struct ice_lag *lag;
-
- lag = pf->lag;
+ struct ice_lag *lag = pf->lag;
if (!lag)
return;
@@ -2170,11 +2734,15 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
}
- ice_lag_cfg_cp_fltr(lag, true);
+ if (!lag->bond_aa) {
+ ice_lag_cfg_lp_fltr(lag, true, true);
+ if (lag->pf_rx_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
+ } else {
+ ice_lag_cfg_lp_fltr(lag, true, false);
+ }
- if (lag->pf_rx_rule_id)
- if (ice_lag_cfg_dflt_fltr(lag, true))
- dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
ice_clear_rdma_cap(pf);
lag_rebuild_out:
@@ -2193,7 +2761,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf)
struct ice_lag *lag = pf->lag;
struct net_device *tmp_nd;
- if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) ||
+ !lag || !lag->upper_netdev)
return false;
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index bab2c83142a1..f77ebcd61042 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -14,7 +14,11 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
-#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAG_INVALID_PORT 0xFF
+#define ICE_LAGP_IDX 0
+#define ICE_LAGS_IDX 1
+#define ICE_LAGP_M 0x1
+#define ICE_LAGS_M 0x2
#define ICE_LAG_RESET_RETRIES 5
#define ICE_SW_DEFAULT_PROFILE 0
@@ -41,12 +45,26 @@ struct ice_lag {
u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
+ u8 bond_aa:1; /* is this bond active-active */
+ u8 need_fltr_cfg:1; /* fltrs for A/A bond still need to be make */
+ u8 port_bitmap:2; /* bitmap of active ports */
+ u8 bond_lport_pri; /* lport values for primary PF */
+ u8 bond_lport_sec; /* lport values for secondary PF */
+
+ /* q_home keeps track of which interface the q is currently on */
+ u8 q_home[ICE_MAX_SRIOV_VFS][ICE_MAX_RSS_QS_PER_VF];
+
+ /* placeholder VSI for hanging VF queues from on secondary interface */
+ struct ice_vsi *sec_vf[ICE_MAX_SRIOV_VFS];
+
u16 pf_recipe;
u16 lport_recipe;
+ u16 act_act_recipe;
u16 pf_rx_rule_id;
u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
+ u16 act_act_rule_idx;
u8 role;
};
@@ -64,10 +82,12 @@ struct ice_lag_work {
} info;
};
-void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
+void ice_lag_aa_failover(struct ice_lag *lag, u8 dest, struct ice_pf *e_pf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
void ice_lag_rebuild(struct ice_pf *pf);
bool ice_lag_is_switchdev_running(struct ice_pf *pf);
void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
+u8 ice_lag_prepare_vf_reset(struct ice_lag *lag);
+void ice_lag_complete_vf_reset(struct ice_lag *lag, u8 act_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 611577ebc29d..185672c7e17d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -229,7 +229,7 @@ struct ice_32b_rx_flex_desc_nic {
__le16 status_error1;
u8 flexi_flags2;
u8 ts_low;
- __le16 l2tag2_1st;
+ __le16 raw_csum;
__le16 l2tag2_2nd;
/* Qword 3 */
@@ -342,6 +342,9 @@ enum ice_flg64_bits {
/* for ice_32byte_rx_flex_desc.pkt_length member */
#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
+/* ice_32byte_rx_flex_desc::hdr_len_sph_flex_flags1 */
+#define ICE_RX_FLEX_DESC_HDR_LEN_M GENMASK(10, 0)
+
enum ice_rx_flex_desc_status_error_0_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
@@ -371,29 +374,21 @@ enum ice_rx_flex_desc_status_error_1_bits {
ICE_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
};
-#define ICE_RXQ_CTX_SIZE_DWORDS 8
-#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
-/* RLAN Rx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* RLAN Rx queue context data */
struct ice_rlan_ctx {
u16 head;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
- u16 dbuf; /* bigger than needed, see above for reason */
+ u8 dbuf;
#define ICE_RLAN_CTX_HBUF_S 6
- u16 hbuf; /* bigger than needed, see above for reason */
+ u8 hbuf;
u8 dtype;
u8 dsize;
u8 crcstrip;
@@ -401,29 +396,15 @@ struct ice_rlan_ctx {
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
- u32 rxmax; /* bigger than needed, see above for reason */
+ u16 rxmax;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
- u16 lrxqthresh; /* bigger than needed, see above for reason */
+ u8 lrxqthresh;
u8 prefena; /* NOTE: normally must be set to 1 at init */
};
-struct ice_ctx_ele {
- u16 offset;
- u16 size_of;
- u16 width;
- u16 lsb;
-};
-
-#define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \
- .offset = offsetof(struct _struct, _ele), \
- .size_of = sizeof_field(struct _struct, _ele), \
- .width = _width, \
- .lsb = _lsb, \
-}
-
/* for hsplit_0 field of Rx RLAN context */
enum ice_rlan_ctx_rx_hsplit_0 {
ICE_RLAN_RX_HSPLIT_0_NO_SPLIT = 0,
@@ -500,10 +481,15 @@ enum ice_tx_desc_len_fields {
struct ice_tx_ctx_desc {
__le32 tunneling_params;
__le16 l2tag2;
- __le16 rsvd;
+ __le16 gcs;
__le64 qw1;
};
+#define ICE_TX_GCS_DESC_START_M GENMASK(7, 0)
+#define ICE_TX_GCS_DESC_OFFSET_M GENMASK(11, 8)
+#define ICE_TX_GCS_DESC_TYPE_M GENMASK(14, 12)
+#define ICE_TX_GCS_DESC_CSUM_PSH 1
+
#define ICE_TXD_CTX_QW1_CMD_S 4
#define ICE_TXD_CTX_QW1_CMD_M (0x7FUL << ICE_TXD_CTX_QW1_CMD_S)
@@ -551,18 +537,12 @@ enum ice_tx_ctx_desc_eipt_offload {
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023
-/* Tx queue context data
- *
- * The sizes of the variables may be larger than needed due to crossing byte
- * boundaries. If we do not have the width of the variable set to the correct
- * size then we could end up shifting bits off the top of the variable when the
- * variable is at the top of a byte and crosses over into the next byte.
- */
+/* Tx queue context data */
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
- u16 cgd_num; /* bigger than needed, see above for reason */
+ u8 cgd_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
@@ -573,7 +553,7 @@ struct ice_tlan_ctx {
u8 tsyn_ena;
u8 internal_usage_flag;
u8 alt_vlan;
- u16 cpuid; /* bigger than needed, see above for reason */
+ u8 cpuid;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
@@ -582,7 +562,7 @@ struct ice_tlan_ctx {
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
- u32 qlen; /* bigger than needed, see above for reason */
+ u16 qlen;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
@@ -590,7 +570,47 @@ struct ice_tlan_ctx {
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
- u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
+};
+
+#define ICE_TXTIME_TX_DESC_IDX_M GENMASK(12, 0)
+#define ICE_TXTIME_STAMP_M GENMASK(31, 13)
+
+/* Tx time stamp descriptor */
+struct ice_ts_desc {
+ __le32 tx_desc_idx_tstamp;
+};
+
+#define ICE_TS_DESC(R, i) (&(((struct ice_ts_desc *)((R)->desc))[i]))
+
+#define ICE_TXTIME_MAX_QUEUE 2047
+#define ICE_SET_TXTIME_MAX_Q_AMOUNT 127
+#define ICE_TXTIME_FETCH_TS_DESC_DFLT 8
+#define ICE_TXTIME_FETCH_PROFILE_CNT 16
+
+/* Tx Time queue context data */
+struct ice_txtime_ctx {
+#define ICE_TXTIME_CTX_BASE_S 7
+ u64 base; /* base is defined in 128-byte units */
+ u8 pf_num;
+ u16 vmvf_num;
+ u8 vmvf_type;
+ u16 src_vsi;
+ u8 cpuid;
+ u8 tphrd_desc;
+ u16 qlen;
+ u8 timer_num;
+ u8 txtime_ena_q;
+ u8 drbell_mode_32;
+#define ICE_TXTIME_CTX_DRBELL_MODE_32 1
+ u8 ts_res;
+#define ICE_TXTIME_CTX_RESOLUTION_128NS 7
+ u8 ts_round_type;
+ u8 ts_pacing_slot;
+#define ICE_TXTIME_CTX_FETCH_PROF_ID_0 0
+ u8 merging_ena;
+ u8 ts_fetch_prof_id;
+ u8 ts_fetch_cache_line_aln_thld;
+ u8 tx_pipe_delay_mode;
};
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a7d45a8ce7ac..15621707fbf8 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
}
}
+static u16 ice_get_rxq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_rxq_count(pf), num_online_cpus());
+}
+
+static u16 ice_get_txq_count(struct ice_pf *pf)
+{
+ return min(ice_get_avail_txq_count(pf), num_online_cpus());
+}
+
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
@@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
} else {
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_txq = ice_get_txq_count(pf);
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -193,17 +201,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
+ vsi->alloc_rxq = ice_get_rxq_count(pf);
}
}
pf->num_lan_rx = vsi->alloc_rxq;
- vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
- max_t(int, vsi->alloc_rxq,
- vsi->alloc_txq));
+ vsi->num_q_vectors = max(vsi->alloc_rxq, vsi->alloc_txq);
break;
case ICE_VSI_SF:
vsi->alloc_txq = 1;
@@ -480,8 +484,7 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
-#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_rx_irq(q_vector->rx.rx_ring);
ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
@@ -567,6 +570,8 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
return -ENOMEM;
}
+ vsi->irq_dyn_alloc = pci_msix_can_alloc_dyn(vsi->back->pdev);
+
switch (vsi->type) {
case ICE_VSI_PF:
case ICE_VSI_SF:
@@ -827,7 +832,13 @@ bool ice_is_safe_mode(struct ice_pf *pf)
*/
bool ice_is_rdma_ena(struct ice_pf *pf)
{
- return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ union devlink_param_value value;
+ int err;
+
+ err = devl_param_driverinit_value_get(priv_to_devlink(pf),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &value);
+ return err ? test_bit(ICE_FLAG_RDMA_ENA, pf->flags) : value.vbool;
}
/**
@@ -1173,12 +1184,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
static void
ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- struct ice_pf *pf = vsi->back;
u16 qcount, qmap;
u8 offset = 0;
int pow;
- qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+ qcount = vsi->num_rxq;
pow = order_base_2(qcount);
qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
@@ -1417,9 +1427,12 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->rxq_map[i];
ring->vsi = vsi;
ring->netdev = vsi->netdev;
- ring->dev = dev;
ring->count = vsi->num_rx_desc;
ring->cached_phctime = pf->ptp.cached_phc_time;
+
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ ring->flags |= ICE_RX_FLAGS_RING_GCS;
+
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1564,7 +1577,7 @@ static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
return;
}
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HASHCFG);
if (status)
dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
vsi->vsi_num, status);
@@ -1700,6 +1713,12 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf)
return true;
}
+#define ICE_FW_MODE_REC_M BIT(1)
+bool ice_is_recovery_mode(struct ice_hw *hw)
+{
+ return rd32(hw, GL_MNG_FWSM) & ICE_FW_MODE_REC_M;
+}
+
/**
* ice_update_eth_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
@@ -1758,9 +1777,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
* @prio: priority for the RXDID for this queue
* @ena_ts: true to enable timestamp and false to disable timestamp
*/
-void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
- bool ena_ts)
+void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -2045,12 +2063,15 @@ static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
}
/**
- * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
+ * ice_vsi_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
* @create: bool to determine create or remove Rule
+ *
+ * Adding an ethtype Tx rule to the uplink VSI results in it being applied
+ * to the whole port, so LLDP transmission for VFs will be blocked too.
*/
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
enum ice_sw_fwd_act_type act);
@@ -2065,19 +2086,59 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
ICE_DROP_PACKET);
} else {
- if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
- status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
- create);
- } else {
+ if (!test_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags)) {
status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
ICE_FWD_TO_VSI);
+ if (!status || !create)
+ goto report;
+
+ dev_info(dev,
+ "Failed to add generic LLDP Rx filter on VSI %i error: %d, falling back to specialized AQ control\n",
+ vsi->vsi_num, status);
}
+
+ status = ice_lldp_fltr_add_remove(&pf->hw, vsi, create);
+ if (!status)
+ set_bit(ICE_FLAG_LLDP_AQ_FLTR, pf->flags);
+
}
+report:
if (status)
- dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
- create ? "adding" : "removing", tx ? "TX" : "RX",
- vsi->vsi_num, status);
+ dev_warn(dev, "Failed to %s %s LLDP rule on VSI %i error: %d\n",
+ create ? "add" : "remove", tx ? "Tx" : "Rx",
+ vsi->vsi_num, status);
+}
+
+/**
+ * ice_cfg_sw_rx_lldp - Enable/disable software handling of LLDP
+ * @pf: the PF being configured
+ * @enable: enable or disable
+ *
+ * Configure switch rules to enable/disable LLDP handling by software
+ * across PF.
+ */
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable)
+{
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ vsi = ice_get_main_vsi(pf);
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+
+ if (!test_bit(ICE_FLAG_SRIOV_ENA, pf->flags))
+ return;
+
+ ice_for_each_vf(pf, bkt, vf) {
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ continue;
+
+ if (ice_vf_is_lldp_ena(vf))
+ ice_vsi_cfg_sw_lldp(vsi, false, enable);
+ }
}
/**
@@ -2508,7 +2569,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
+ ice_vsi_cfg_sw_lldp(vsi, true, true);
}
if (!vsi->agg_node)
@@ -2576,7 +2637,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
return;
vsi->irqs_ready = false;
- ice_free_cpu_rx_rmap(vsi);
ice_for_each_q_vector(vsi, i) {
int irq_num;
@@ -2589,12 +2649,6 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
vsi->q_vectors[i]->num_ring_rx))
continue;
- /* clear the affinity notifier in the IRQ descriptor */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
-
- /* clear the affinity_hint in the IRQ descriptor */
- irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2714,7 +2768,6 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
* @vsi: VSI pointer
*
* Associate queue[s] with napi for all vectors.
- * The caller must hold rtnl_lock.
*/
void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
{
@@ -2724,6 +2777,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
if (!netdev)
return;
+ ASSERT_RTNL();
ice_for_each_rxq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX,
&vsi->rx_rings[q_idx]->q_vector->napi);
@@ -2744,16 +2798,23 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi)
* @vsi: VSI pointer
*
* Clear the association between all VSI queues queue[s] and napi.
- * The caller must hold rtnl_lock.
*/
void ice_vsi_clear_napi_queues(struct ice_vsi *vsi)
{
struct net_device *netdev = vsi->netdev;
- int q_idx;
+ int q_idx, v_idx;
if (!netdev)
return;
+ ASSERT_RTNL();
+ /* Clear the NAPI's interrupt number */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+
+ netif_napi_set_irq(&q_vector->napi, -1);
+ }
+
ice_for_each_txq(vsi, q_idx)
netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, NULL);
@@ -2805,9 +2866,11 @@ int ice_vsi_release(struct ice_vsi *vsi)
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
- if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
- !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
+ if (!ice_is_safe_mode(pf) &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) &&
+ (vsi->type == ICE_VSI_PF || (vsi->type == ICE_VSI_VF &&
+ ice_vf_is_lldp_ena(vsi->vf))))
+ ice_vsi_cfg_sw_lldp(vsi, false, false);
ice_vsi_decfg(vsi);
@@ -3135,7 +3198,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
if (!netdev)
return;
- /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ /* CHNL VSI doesn't have its own netdev, hence, no netdev_tc */
if (vsi->type == ICE_VSI_CHNL)
return;
@@ -3672,20 +3735,20 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
status = ice_aq_set_link_restart_an(pi, ena, NULL);
- /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
+ /* if link is owned by manageability, FW will return LIBIE_AQ_RC_EMODE.
* this is not a fatal error, so print a warning message and return
* a success code. Return an error if FW returns an error code other
- * than ICE_AQ_RC_EMODE
+ * than LIBIE_AQ_RC_EMODE
*/
if (status == -EIO) {
- if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_EMODE)
dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
(ena ? "ON" : "OFF"), status,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -3876,15 +3939,18 @@ void ice_init_feature_support(struct ice_pf *pf)
ice_set_feature_support(pf, ICE_F_CGU);
if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
+ if (ice_gnss_is_module_present(&pf->hw))
ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
}
- if (pf->hw.mac_type == ICE_MAC_E830)
+ if (pf->hw.mac_type == ICE_MAC_E830) {
ice_set_feature_support(pf, ICE_F_MBX_LIMIT);
+ ice_set_feature_support(pf, ICE_F_GCS);
+ ice_set_feature_support(pf, ICE_F_TXTIME);
+ }
}
/**
@@ -3931,24 +3997,6 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_ctx_set_allow_override - allow destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
- * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
- * @ctx: pointer to VSI ctx structure
- */
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
-{
- ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
-}
-
-/**
* ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
* @vsi: pointer to VSI structure
* @set: set or unset the bit
@@ -3972,3 +4020,38 @@ ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
vsi->info = ctx.info;
return 0;
}
+
+/**
+ * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
+ * @vsi: VSI used to update l2tsel on
+ * @l2tsel: l2tsel setting requested
+ *
+ * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
+ * This will modify which descriptor field the first offloaded VLAN will be
+ * stripped into.
+ */
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 l2tsel_bit;
+ int i;
+
+ if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
+ l2tsel_bit = 0;
+ else
+ l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
+
+ for (i = 0; i < vsi->alloc_rxq; i++) {
+ u16 pfq = vsi->rxq_map[i];
+ u32 qrx_context_offset;
+ u32 regval;
+
+ qrx_context_offset =
+ QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
+
+ regval = rd32(hw, qrx_context_offset);
+ regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
+ regval |= l2tsel_bit;
+ wr32(hw, qrx_context_offset, regval);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 10d6fc479a32..2cb1eb98b9da 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -11,6 +11,13 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
+#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
+#define ICE_L2TSEL_BIT_OFFSET 23
+enum ice_l2tsel {
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
+ ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
+};
+
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -29,7 +36,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_vsi_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
+void ice_cfg_sw_rx_lldp(struct ice_pf *pf, bool enable);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -90,6 +98,7 @@ void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_rdma_ena(struct ice_pf *pf);
+bool ice_is_recovery_mode(struct ice_hw *hw);
bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi);
bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_vsi *vsi);
@@ -104,10 +113,6 @@ ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
-
-void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
@@ -118,4 +123,5 @@ void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
+void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 1eaa4428fd24..4bb68e7a00f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -14,7 +14,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sf_eth.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
@@ -37,7 +37,11 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
MODULE_DESCRIPTION(DRV_SUMMARY);
-MODULE_IMPORT_NS(LIBIE);
+MODULE_IMPORT_NS("LIBETH");
+MODULE_IMPORT_NS("LIBETH_XDP");
+MODULE_IMPORT_NS("LIBIE");
+MODULE_IMPORT_NS("LIBIE_ADMINQ");
+MODULE_IMPORT_NS("LIBIE_FWLOG");
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -379,7 +383,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
* should go into promiscuous mode. There should be some
* space reserved for promiscuous filters.
*/
- if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC &&
+ if (hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOSPC &&
!test_and_set_bit(ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
@@ -1119,7 +1123,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (status)
dev_dbg(dev, "Failed to update link status on port %d, err %d aq_err %s\n",
pi->lport, status,
- ice_aq_str(pi->hw->adminq.sq_last_status));
+ libie_aq_str(pi->hw->adminq.sq_last_status));
ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
@@ -1144,7 +1148,10 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return 0;
- ice_ptp_link_change(pf, pf->hw.pf_id, link_up);
+ if (!link_up && old_link)
+ pf->link_down_events++;
+
+ ice_ptp_link_change(pf, link_up);
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
@@ -1247,32 +1254,6 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
}
/**
- * ice_get_fwlog_data - copy the FW log data from ARQ event
- * @pf: PF that the FW log event is associated with
- * @event: event structure containing FW log data
- */
-static void
-ice_get_fwlog_data(struct ice_pf *pf, struct ice_rq_event_info *event)
-{
- struct ice_fwlog_data *fwlog;
- struct ice_hw *hw = &pf->hw;
-
- fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail];
-
- memset(fwlog->data, 0, PAGE_SIZE);
- fwlog->data_size = le16_to_cpu(event->desc.datalen);
-
- memcpy(fwlog->data, event->msg_buf, fwlog->data_size);
- ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size);
-
- if (ice_fwlog_ring_full(&hw->fwlog_ring)) {
- /* the rings are full so bump the head to create room */
- ice_fwlog_ring_increment(&hw->fwlog_ring.head,
- hw->fwlog_ring.size);
- }
-}
-
-/**
* ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
* @task: intermediate helper storage and identifier for waiting
@@ -1562,11 +1543,15 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
}
break;
case ice_aqc_opc_fw_logs_event:
- ice_get_fwlog_data(pf, &event);
+ libie_get_fwlog_data(&hw->fwlog, event.msg_buf,
+ le16_to_cpu(event.desc.datalen));
break;
case ice_aqc_opc_lldp_set_mib_change:
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
+ case ice_aqc_opc_get_health_status:
+ ice_process_health_status_event(pf, &event);
+ break;
default:
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
@@ -1714,7 +1699,7 @@ static int ice_service_task_stop(struct ice_pf *pf)
ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state);
if (pf->serv_tmr.function)
- del_timer_sync(&pf->serv_tmr);
+ timer_delete_sync(&pf->serv_tmr);
if (pf->serv_task.func)
cancel_work_sync(&pf->serv_task);
@@ -1740,7 +1725,7 @@ static void ice_service_task_restart(struct ice_pf *pf)
*/
static void ice_service_timer(struct timer_list *t)
{
- struct ice_pf *pf = from_timer(pf, t, serv_tmr);
+ struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);
mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));
ice_service_task_schedule(pf);
@@ -1816,6 +1801,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_PQM, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
@@ -1829,6 +1816,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_TX_TCLAN, pf_num, vf_num,
+ event, queue);
wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
@@ -1842,6 +1831,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_rx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on RX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
+ ice_report_mdd_event(pf, ICE_MDD_SRC_RX, pf_num, vf_num, event,
+ queue);
wr32(hw, GL_MDET_RX, 0xffffffff);
}
@@ -2355,6 +2346,18 @@ static void ice_check_media_subtask(struct ice_pf *pf)
}
}
+static void ice_service_task_recovery_mode(struct work_struct *work)
+{
+ struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
+
+ set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
+ ice_clean_adminq_subtask(pf);
+
+ ice_service_task_complete(pf);
+
+ mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100));
+}
+
/**
* ice_service_task - manage and run subtasks
* @work: pointer to work_struct contained by the PF struct
@@ -2364,9 +2367,11 @@ static void ice_service_task(struct work_struct *work)
struct ice_pf *pf = container_of(work, struct ice_pf, serv_task);
unsigned long start_time = jiffies;
- /* subtasks */
+ if (pf->health_reporters.tx_hang_buf.tx_ring) {
+ ice_report_tx_hang(pf);
+ pf->health_reporters.tx_hang_buf.tx_ring = NULL;
+ }
- /* process reset requests first */
ice_reset_subtask(pf);
/* bail if a reset/recovery cycle is pending or rebuild failed */
@@ -2378,11 +2383,11 @@ static void ice_service_task(struct work_struct *work)
}
if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);
/* report the entire OICR value to AUX driver */
swap(event->reg, pf->oicr_err_reg);
ice_send_event_to_aux(pf, event);
@@ -2401,11 +2406,11 @@ static void ice_service_task(struct work_struct *work)
ice_plug_aux_dev(pf);
if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) {
- struct iidc_event *event;
+ struct iidc_rdma_event *event;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event) {
- set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);
ice_send_event_to_aux(pf, event);
kfree(event);
}
@@ -2505,34 +2510,6 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
}
/**
- * ice_irq_affinity_notify - Callback for affinity changes
- * @notify: context as to what irq was changed
- * @mask: the new affinity mask
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * so that we may register to receive changes to the irq affinity masks.
- */
-static void
-ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct ice_q_vector *q_vector =
- container_of(notify, struct ice_q_vector, affinity_notify);
-
- cpumask_copy(&q_vector->affinity_mask, mask);
-}
-
-/**
- * ice_irq_affinity_release - Callback for affinity notifier release
- * @ref: internal core kernel usage
- *
- * This is a callback function used by the irq_set_affinity_notifier function
- * to inform the current notification subscriber that they will no longer
- * receive notifications.
- */
-static void ice_irq_affinity_release(struct kref __always_unused *ref) {}
-
-/**
* ice_vsi_ena_irq - Enable IRQ for the given VSI
* @vsi: the VSI being configured
*/
@@ -2595,19 +2572,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err);
goto free_q_irqs;
}
-
- /* register for affinity change notifications */
- if (!IS_ENABLED(CONFIG_RFS_ACCEL)) {
- struct irq_affinity_notify *affinity_notify;
-
- affinity_notify = &q_vector->affinity_notify;
- affinity_notify->notify = ice_irq_affinity_notify;
- affinity_notify->release = ice_irq_affinity_release;
- irq_set_affinity_notifier(irq_num, affinity_notify);
- }
-
- /* assign the mask for this irq */
- irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2623,9 +2587,6 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
free_q_irqs:
while (vector--) {
irq_num = vsi->q_vectors[vector]->irq.virq;
- if (!IS_ENABLED(CONFIG_RFS_ACCEL))
- irq_set_affinity_notifier(irq_num, NULL);
- irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -2762,6 +2723,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)
}
/**
+ * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors
+ * @vsi: the VSI with XDP rings being unmapped
+ */
+static void ice_unmap_xdp_rings(struct ice_vsi *vsi)
+{
+ int v_idx;
+
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ struct ice_tx_ring *ring;
+
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (!ring->tx_buf || !ice_ring_is_xdp(ring))
+ break;
+
+ /* restore the value of last node prior to XDP setup */
+ q_vector->tx.tx_ring = ring;
+ }
+}
+
+/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
@@ -2824,7 +2806,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (status) {
dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",
status);
- goto clear_xdp_rings;
+ goto unmap_xdp_rings;
}
/* assign the prog only when it's not already present on VSI;
@@ -2840,6 +2822,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
+unmap_xdp_rings:
+ ice_unmap_xdp_rings(vsi);
clear_xdp_rings:
ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
@@ -2856,6 +2840,8 @@ err_map_xdp:
mutex_unlock(&pf->avail_q_mutex);
devm_kfree(dev, vsi->xdp_rings);
+ vsi->xdp_rings = NULL;
+
return -ENOMEM;
}
@@ -2871,7 +2857,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
- int i, v_idx;
+ int i;
/* q_vectors are freed in reset path so there's no point in detaching
* rings
@@ -2879,17 +2865,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx)
- if (!ring->tx_buf || !ice_ring_is_xdp(ring))
- break;
-
- /* restore the value of last node prior to XDP setup */
- q_vector->tx.tx_ring = ring;
- }
+ ice_unmap_xdp_rings(vsi);
free_qmap:
mutex_lock(&pf->avail_q_mutex);
@@ -2983,10 +2959,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
*/
static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
{
- if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- return ICE_RXBUF_1664;
- else
- return ICE_RXBUF_3072;
+ return ICE_RXBUF_3072;
}
/**
@@ -3034,28 +3007,24 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ goto resume_if;
} else {
xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
ICE_XDP_CFG_FULL);
- if (xdp_ring_err)
+ if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ goto resume_if;
+ }
}
xdp_features_set_redirect_target(vsi->netdev, true);
- /* reallocate Rx queues that are used for zero-copy */
- xdp_ring_err = ice_realloc_zc_buf(vsi, true);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
- /* reallocate Rx queues that were used for zero-copy */
- xdp_ring_err = ice_realloc_zc_buf(vsi, false);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
}
+resume_if:
if (if_running)
ret = ice_up(vsi);
@@ -3174,12 +3143,14 @@ static irqreturn_t ice_ll_ts_intr(int __always_unused irq, void *data)
hw = &pf->hw;
tx = &pf->ptp.port.tx;
spin_lock_irqsave(&tx->lock, flags);
- ice_ptp_complete_tx_single_tstamp(tx);
+ if (tx->init) {
+ ice_ptp_complete_tx_single_tstamp(tx);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ tx->last_ll_ts_idx_read + 1);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
spin_unlock_irqrestore(&tx->lock, flags);
val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
@@ -3281,22 +3252,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (ice_pf_state_is_nominal(pf) &&
- pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) {
- struct ice_ptp_tx *tx = &pf->ptp.port.tx;
- unsigned long flags;
- u8 idx;
-
- spin_lock_irqsave(&tx->lock, flags);
- idx = find_next_bit_wrap(tx->in_use, tx->len,
- tx->last_ll_ts_idx_read + 1);
- if (idx != tx->len)
- ice_ptp_req_tx_single_tstamp(tx, idx);
- spin_unlock_irqrestore(&tx->lock, flags);
- } else if (ice_ptp_pf_handles_tx_interrupt(pf)) {
- set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
- ret = IRQ_WAKE_THREAD;
- }
+
+ ret = ice_ptp_ts_irq(pf);
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3666,6 +3623,15 @@ void ice_set_netdev_features(struct net_device *netdev)
*/
netdev->hw_features |= NETIF_F_RXFCS;
+ /* Allow core to manage IRQs affinity */
+ netif_set_affinity_auto(netdev);
+
+ /* Mutual exclusivity for TSO and GCS is enforced by the set features
+ * ndo callback.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS))
+ netdev->hw_features |= NETIF_F_HW_CSUM;
+
netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
@@ -3974,9 +3940,10 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
* ice_deinit_pf - Unrolls initialziations done by ice_init_pf
* @pf: board private structure to initialize
*/
-static void ice_deinit_pf(struct ice_pf *pf)
+void ice_deinit_pf(struct ice_pf *pf)
{
- ice_service_task_stop(pf);
+ /* note that we unroll also on ice_init_pf() failure here */
+
mutex_destroy(&pf->lag_mutex);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
@@ -3994,9 +3961,17 @@ static void ice_deinit_pf(struct ice_pf *pf)
pf->avail_rxqs = NULL;
}
+ if (pf->txtime_txqs) {
+ bitmap_free(pf->txtime_txqs);
+ pf->txtime_txqs = NULL;
+ }
+
if (pf->ptp.clock)
ptp_clock_unregister(pf->ptp.clock);
+ if (!xa_empty(&pf->irq_tracker.entries))
+ ice_free_irq_msix_misc(pf);
+
xa_destroy(&pf->dyn_ports);
xa_destroy(&pf->sf_nums);
}
@@ -4043,21 +4018,32 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588 &&
- !(pf->hw.mac_type == ICE_MAC_E830))
+ if (func_caps->common_cap.ieee_1588)
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
+void ice_start_service_task(struct ice_pf *pf)
+{
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+}
+
/**
* ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize
+ * Return: 0 on success, negative errno otherwise.
*/
-static int ice_init_pf(struct ice_pf *pf)
+int ice_init_pf(struct ice_pf *pf)
{
- ice_set_pf_caps(pf);
+ struct udp_tunnel_nic_info *udp_tunnel_nic = &pf->hw.udp_tunnel_nic;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err = -ENOMEM;
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
@@ -4070,23 +4056,7 @@ static int ice_init_pf(struct ice_pf *pf)
init_waitqueue_head(&pf->reset_wait_queue);
- /* setup service timer and periodic service task */
- timer_setup(&pf->serv_tmr, ice_service_timer, 0);
- pf->serv_tmr_period = HZ;
- INIT_WORK(&pf->serv_task, ice_service_task);
- clear_bit(ICE_SERVICE_SCHED, pf->state);
-
mutex_init(&pf->avail_q_mutex);
- pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
- if (!pf->avail_txqs)
- return -ENOMEM;
-
- pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
- if (!pf->avail_rxqs) {
- bitmap_free(pf->avail_txqs);
- pf->avail_txqs = NULL;
- return -ENOMEM;
- }
mutex_init(&pf->vfs.table_lock);
hash_init(pf->vfs.table);
@@ -4099,7 +4069,36 @@ static int ice_init_pf(struct ice_pf *pf)
xa_init(&pf->dyn_ports);
xa_init(&pf->sf_nums);
+ pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
+ pf->txtime_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
+ if (!pf->avail_txqs || !pf->avail_rxqs || !pf->txtime_txqs)
+ goto undo_init;
+
+ udp_tunnel_nic->set_port = ice_udp_tunnel_set_port;
+ udp_tunnel_nic->unset_port = ice_udp_tunnel_unset_port;
+ udp_tunnel_nic->shared = &hw->udp_tunnel_shared;
+ udp_tunnel_nic->tables[0].n_entries = hw->tnl.valid_count[TNL_VXLAN];
+ udp_tunnel_nic->tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
+ udp_tunnel_nic->tables[1].n_entries = hw->tnl.valid_count[TNL_GENEVE];
+ udp_tunnel_nic->tables[1].tunnel_types = UDP_TUNNEL_TYPE_GENEVE;
+
+ /* In case of MSIX we are going to setup the misc vector right here
+ * to handle admin queue events etc. In case of legacy and MSI
+ * the misc functionality and queue processing is combined in
+ * the same vector and that gets setup at open.
+ */
+ err = ice_req_irq_msix_misc(pf);
+ if (err) {
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
+ goto undo_init;
+ }
+
return 0;
+undo_init:
+ /* deinit handles half-initialized pf just fine */
+ ice_deinit_pf(pf);
+ return err;
}
/**
@@ -4229,7 +4228,7 @@ static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
@@ -4540,17 +4539,23 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
dev_info(dev, "Tx scheduling layers switching feature disabled\n");
else
dev_info(dev, "Tx scheduling layers switching feature enabled\n");
- /* if there was a change in topology ice_cfg_tx_topo triggered
- * a CORER and we need to re-init hw
+ return 0;
+ } else if (err == -ENODEV) {
+ /* If we failed to re-initialize the device, we can no longer
+ * continue loading.
*/
- ice_deinit_hw(hw);
- err = ice_init_hw(hw);
-
+ dev_warn(dev, "Failed to initialize hardware after applying Tx scheduling configuration.\n");
return err;
} else if (err == -EIO) {
dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
+ return 0;
+ } else if (err == -EEXIST) {
+ return 0;
}
+ /* Do not treat this as a fatal error. */
+ dev_info(dev, "Failed to apply Tx scheduling configuration, err %pe\n",
+ ERR_PTR(err));
return 0;
}
@@ -4650,19 +4655,6 @@ static void ice_print_wake_reason(struct ice_pf *pf)
}
/**
- * ice_pf_fwlog_update_module - update 1 module
- * @pf: pointer to the PF struct
- * @log_level: log_level to use for the @module
- * @module: module to update
- */
-void ice_pf_fwlog_update_module(struct ice_pf *pf, int log_level, int module)
-{
- struct ice_hw *hw = &pf->hw;
-
- hw->fwlog_cfg.module_entries[module].log_level = log_level;
-}
-
-/**
* ice_register_netdev - register netdev
* @vsi: pointer to the VSI struct
*/
@@ -4741,55 +4733,11 @@ static void ice_decfg_netdev(struct ice_vsi *vsi)
vsi->netdev = NULL;
}
-/**
- * ice_wait_for_fw - wait for full FW readiness
- * @hw: pointer to the hardware structure
- * @timeout: milliseconds that can elapse before timing out
- */
-static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
-{
- int fw_loading;
- u32 elapsed = 0;
-
- while (elapsed <= timeout) {
- fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
-
- /* firmware was not yet loaded, we have to wait more */
- if (fw_loading) {
- elapsed += 100;
- msleep(100);
- continue;
- }
- return 0;
- }
-
- return -ETIMEDOUT;
-}
-
-int ice_init_dev(struct ice_pf *pf)
+void ice_init_dev_hw(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int err;
- err = ice_init_hw(hw);
- if (err) {
- dev_err(dev, "ice_init_hw failed: %d\n", err);
- return err;
- }
-
- /* Some cards require longer initialization times
- * due to necessity of loading FW from an external source.
- * This can take even half a minute.
- */
- if (ice_is_pf_c827(hw)) {
- err = ice_wait_for_fw(hw, 30000);
- if (err) {
- dev_err(dev, "ice_wait_for_fw timed out");
- return err;
- }
- }
-
ice_init_feature_support(pf);
err = ice_init_ddp_config(hw, pf);
@@ -4806,64 +4754,28 @@ int ice_init_dev(struct ice_pf *pf)
*/
ice_set_safe_mode_caps(hw);
}
+}
- err = ice_init_pf(pf);
- if (err) {
- dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf;
- }
-
- pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
- pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
- pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
- pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
- if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
- pf->hw.udp_tunnel_nic.tables[0].n_entries =
- pf->hw.tnl.valid_count[TNL_VXLAN];
- pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
- UDP_TUNNEL_TYPE_VXLAN;
- }
- if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
- pf->hw.udp_tunnel_nic.tables[1].n_entries =
- pf->hw.tnl.valid_count[TNL_GENEVE];
- pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
- UDP_TUNNEL_TYPE_GENEVE;
- }
+int ice_init_dev(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+ ice_set_pf_caps(pf);
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
- err = -EIO;
- goto err_init_interrupt_scheme;
+ return -EIO;
}
- /* In case of MSIX we are going to setup the misc vector right here
- * to handle admin queue events etc. In case of legacy and MSI
- * the misc functionality and queue processing is combined in
- * the same vector and that gets setup at open.
- */
- err = ice_req_irq_msix_misc(pf);
- if (err) {
- dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_req_irq_msix_misc;
- }
+ ice_start_service_task(pf);
return 0;
-
-err_req_irq_msix_misc:
- ice_clear_interrupt_scheme(pf);
-err_init_interrupt_scheme:
- ice_deinit_pf(pf);
-err_init_pf:
- ice_deinit_hw(hw);
- return err;
}
void ice_deinit_dev(struct ice_pf *pf)
{
- ice_free_irq_msix_misc(pf);
- ice_deinit_pf(pf);
- ice_deinit_hw(&pf->hw);
+ ice_service_task_stop(pf);
/* Service task is already stopped, so call reset directly. */
ice_reset(&pf->hw, ICE_RESET_PFR);
@@ -5088,12 +5000,14 @@ static int ice_init_devlink(struct ice_pf *pf)
ice_devlink_init_regions(pf);
ice_devlink_register(pf);
+ ice_health_init(pf);
return 0;
}
static void ice_deinit_devlink(struct ice_pf *pf)
{
+ ice_health_deinit(pf);
ice_devlink_unregister(pf);
ice_devlink_destroy_regions(pf);
ice_devlink_unregister_params(pf);
@@ -5101,15 +5015,24 @@ static void ice_deinit_devlink(struct ice_pf *pf)
static int ice_init(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
int err;
- err = ice_init_dev(pf);
- if (err)
+ err = ice_init_pf(pf);
+ if (err) {
+ dev_err(dev, "ice_init_pf failed: %d\n", err);
return err;
+ }
+
+ if (pf->hw.mac_type == ICE_MAC_E830) {
+ err = pci_enable_ptm(pf->pdev, NULL);
+ if (err)
+ dev_dbg(dev, "PCIe PTM not supported by PCIe bus/controller\n");
+ }
err = ice_alloc_vsis(pf);
if (err)
- goto err_alloc_vsis;
+ goto unroll_pf_init;
err = ice_init_pf_sw(pf);
if (err)
@@ -5146,8 +5069,8 @@ err_init_link:
ice_deinit_pf_sw(pf);
err_init_pf_sw:
ice_dealloc_vsis(pf);
-err_alloc_vsis:
- ice_deinit_dev(pf);
+unroll_pf_init:
+ ice_deinit_pf(pf);
return err;
}
@@ -5158,7 +5081,7 @@ static void ice_deinit(struct ice_pf *pf)
ice_deinit_pf_sw(pf);
ice_dealloc_vsis(pf);
- ice_deinit_dev(pf);
+ ice_deinit_pf(pf);
}
/**
@@ -5206,11 +5129,12 @@ int ice_load(struct ice_pf *pf)
ice_napi_add(vsi);
+ ice_init_features(pf);
+
err = ice_init_rdma(pf);
if (err)
goto err_init_rdma;
- ice_init_features(pf);
ice_service_task_restart(pf);
clear_bit(ICE_DOWN, pf->state);
@@ -5218,6 +5142,7 @@ int ice_load(struct ice_pf *pf)
return 0;
err_init_rdma:
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
err_tc_indir_block_register:
ice_unregister_netdev(vsi);
@@ -5241,14 +5166,44 @@ void ice_unload(struct ice_pf *pf)
devl_assert_locked(priv_to_devlink(pf));
- ice_deinit_features(pf);
ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
ice_tc_indir_block_unregister(vsi);
ice_unregister_netdev(vsi);
ice_devlink_destroy_pf_port(pf);
ice_decfg_netdev(vsi);
}
+static int ice_probe_recovery_mode(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ dev_err(dev, "Firmware recovery mode detected. Limiting functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode\n");
+
+ INIT_HLIST_HEAD(&pf->aq_wait_list);
+ spin_lock_init(&pf->aq_wait_lock);
+ init_waitqueue_head(&pf->aq_wait_queue);
+
+ timer_setup(&pf->serv_tmr, ice_service_timer, 0);
+ pf->serv_tmr_period = HZ;
+ INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode);
+ clear_bit(ICE_SERVICE_SCHED, pf->state);
+ err = ice_create_all_ctrlq(&pf->hw);
+ if (err)
+ return err;
+
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ err = ice_init_devlink(pf);
+ if (err)
+ return err;
+ }
+
+ ice_service_task_restart(pf);
+
+ return 0;
+}
+
/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
@@ -5260,6 +5215,7 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ bool need_dev_deinit = false;
struct ice_adapter *adapter;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -5312,13 +5268,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
pci_set_master(pdev);
-
- adapter = ice_adapter_get(pdev);
- if (IS_ERR(adapter))
- return PTR_ERR(adapter);
-
pf->pdev = pdev;
- pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5346,29 +5296,55 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->debug_mask = debug;
#endif
+ if (ice_is_recovery_mode(hw))
+ return ice_probe_recovery_mode(pf);
+
+ err = ice_init_hw(hw);
+ if (err) {
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
+ return err;
+ }
+
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter)) {
+ err = PTR_ERR(adapter);
+ goto unroll_hw_init;
+ }
+ pf->adapter = adapter;
+
+ err = ice_init_dev(pf);
+ if (err)
+ goto unroll_adapter;
+
err = ice_init(pf);
if (err)
- goto err_init;
+ goto unroll_dev_init;
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
if (err)
- goto err_load;
+ goto unroll_init;
err = ice_init_devlink(pf);
if (err)
- goto err_init_devlink;
+ goto unroll_load;
devl_unlock(priv_to_devlink(pf));
return 0;
-err_init_devlink:
+unroll_load:
ice_unload(pf);
-err_load:
+unroll_init:
devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
-err_init:
+unroll_dev_init:
+ need_dev_deinit = true;
+unroll_adapter:
ice_adapter_put(pdev);
+unroll_hw_init:
+ ice_deinit_hw(hw);
+ if (need_dev_deinit)
+ ice_deinit_dev(pf);
return err;
}
@@ -5430,7 +5406,7 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
status = ice_aq_manage_mac_write(hw, mac_addr, flags, NULL);
if (status)
dev_err(dev, "Failed to enable Multicast Magic Packet wake, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
}
/**
@@ -5448,6 +5424,14 @@ static void ice_remove(struct pci_dev *pdev)
msleep(100);
}
+ if (ice_is_recovery_mode(&pf->hw)) {
+ ice_service_task_stop(pf);
+ scoped_guard(devl, priv_to_devlink(pf)) {
+ ice_deinit_devlink(pf);
+ }
+ return;
+ }
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -5455,10 +5439,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_hwmon_exit(pf);
- ice_service_task_stop(pf);
- ice_aq_cancel_waiting_tasks(pf);
- set_bit(ICE_DOWN, pf->state);
-
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
@@ -5476,6 +5456,11 @@ static void ice_remove(struct pci_dev *pdev)
ice_set_wake(pf);
ice_adapter_put(pdev);
+ ice_deinit_hw(&pf->hw);
+
+ ice_deinit_dev(pf);
+ ice_aq_cancel_waiting_tasks(pf);
+ set_bit(ICE_DOWN, pf->state);
}
/**
@@ -5668,7 +5653,6 @@ static int ice_resume(struct device *dev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- pci_save_state(pdev);
if (!pci_device_is_present(pdev))
return -ENODEV;
@@ -5768,7 +5752,6 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
- pci_save_state(pdev);
pci_wake_from_d3(pdev, false);
/* Check for life */
@@ -5888,6 +5871,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_QSFP56), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835CC_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E835_L_SFP), },
/* required last entry */
{}
};
@@ -6408,10 +6400,12 @@ ice_set_vlan_filtering_features(struct ice_vsi *vsi, netdev_features_t features)
int err = 0;
/* support Single VLAN Mode (SVM) and Double VLAN Mode (DVM) by checking
- * if either bit is set
+ * if either bit is set. In switchdev mode Rx filtering should never be
+ * enabled.
*/
- if (features &
- (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER))
+ if ((features &
+ (NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)) &&
+ !ice_is_eswitch_mode_switchdev(vsi->back))
err = vlan_ops->ena_rx_filtering(vsi);
else
err = vlan_ops->dis_rx_filtering(vsi);
@@ -6565,6 +6559,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (changed & NETIF_F_LOOPBACK)
ret = ice_set_loopback(vsi, !!(features & NETIF_F_LOOPBACK));
+ /* Due to E830 hardware limitations, TSO (NETIF_F_ALL_TSO) with GCS
+ * (NETIF_F_HW_CSUM) is not supported.
+ */
+ if (ice_is_feature_supported(pf, ICE_F_GCS) &&
+ ((features & NETIF_F_HW_CSUM) && (features & NETIF_F_ALL_TSO))) {
+ if (netdev->features & NETIF_F_HW_CSUM)
+ dev_err(ice_pf_to_dev(pf), "To enable TSO, you must first disable HW checksum.\n");
+ else
+ dev_err(ice_pf_to_dev(pf), "To enable HW checksum, you must first disable TSO.\n");
+ return -EIO;
+ }
+
return ret;
}
@@ -6788,7 +6794,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
- ice_ptp_link_change(pf, pf->hw.pf_id, true);
+ ice_ptp_link_change(pf, true);
}
/* Perform an initial read of the statistics registers now to
@@ -7120,6 +7126,9 @@ void ice_update_pf_stats(struct ice_pf *pf)
&prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults);
+ ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
+ &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
+
ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize);
@@ -7258,7 +7267,7 @@ int ice_down(struct ice_vsi *vsi)
if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
- ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
+ ice_ptp_link_change(vsi->back, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
}
@@ -7480,7 +7489,8 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+ if (bitmap_empty(pf->txtime_txqs, pf->max_pf_txqs))
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) {
/* Notify the stack of the actual queue counts. */
@@ -7791,6 +7801,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+ ice_health_clear(pf);
+
ice_plug_aux_dev(pf);
if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
ice_lag_rebuild(pf);
@@ -7841,12 +7853,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
- } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
- if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
- netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
- ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
- return -EINVAL;
- }
}
/* if a reset is in progress, wait for some time for it to complete */
@@ -7877,69 +7883,6 @@ int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
- * ice_eth_ioctl - Access the hwtstamp interface
- * @netdev: network interface device structure
- * @ifr: interface request data
- * @cmd: ioctl command
- */
-static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return ice_ptp_get_ts_config(pf, ifr);
- case SIOCSHWTSTAMP:
- return ice_ptp_set_ts_config(pf, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
- * ice_aq_str - convert AQ err code to a string
- * @aq_err: the AQ error code to convert
- */
-const char *ice_aq_str(enum ice_aq_err aq_err)
-{
- switch (aq_err) {
- case ICE_AQ_RC_OK:
- return "OK";
- case ICE_AQ_RC_EPERM:
- return "ICE_AQ_RC_EPERM";
- case ICE_AQ_RC_ENOENT:
- return "ICE_AQ_RC_ENOENT";
- case ICE_AQ_RC_ENOMEM:
- return "ICE_AQ_RC_ENOMEM";
- case ICE_AQ_RC_EBUSY:
- return "ICE_AQ_RC_EBUSY";
- case ICE_AQ_RC_EEXIST:
- return "ICE_AQ_RC_EEXIST";
- case ICE_AQ_RC_EINVAL:
- return "ICE_AQ_RC_EINVAL";
- case ICE_AQ_RC_ENOSPC:
- return "ICE_AQ_RC_ENOSPC";
- case ICE_AQ_RC_ENOSYS:
- return "ICE_AQ_RC_ENOSYS";
- case ICE_AQ_RC_EMODE:
- return "ICE_AQ_RC_EMODE";
- case ICE_AQ_RC_ENOSEC:
- return "ICE_AQ_RC_ENOSEC";
- case ICE_AQ_RC_EBADSIG:
- return "ICE_AQ_RC_EBADSIG";
- case ICE_AQ_RC_ESVN:
- return "ICE_AQ_RC_ESVN";
- case ICE_AQ_RC_EBADMAN:
- return "ICE_AQ_RC_EBADMAN";
- case ICE_AQ_RC_EBADBUF:
- return "ICE_AQ_RC_EBADBUF";
- }
-
- return "ICE_AQ_RC_UNKNOWN";
-}
-
-/**
* ice_set_rss_lut - Set RSS LUT
* @vsi: Pointer to VSI structure
* @lut: Lookup table
@@ -7964,7 +7907,7 @@ int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_set_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -7987,7 +7930,7 @@ int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8017,7 +7960,7 @@ int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size)
status = ice_aq_get_rss_lut(hw, &params);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8040,7 +7983,7 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed)
status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed);
if (status)
dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
+ status, libie_aq_str(hw->adminq.sq_last_status));
return status;
}
@@ -8113,9 +8056,7 @@ static int
ice_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask, int nlflags)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
u16 bmode;
bmode = pf->first_sw->bridge_mode;
@@ -8157,7 +8098,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\n",
- bmode, ret, ice_aq_str(hw->adminq.sq_last_status));
+ bmode, ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
/* Update sw flags for book keeping */
@@ -8185,8 +8126,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 __always_unused flags,
struct netlink_ext_ack __always_unused *extack)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
struct nlattr *attr, *br_spec;
struct ice_hw *hw = &pf->hw;
struct ice_sw *pf_sw;
@@ -8225,7 +8165,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (err) {
netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %s\n",
mode, err,
- ice_aq_str(hw->adminq.sq_last_status));
+ libie_aq_str(hw->adminq.sq_last_status));
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
return err;
@@ -8281,16 +8221,18 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
if (tx_ring) {
struct ice_hw *hw = &pf->hw;
- u32 head, val = 0;
+ u32 head, intr = 0;
head = FIELD_GET(QTX_COMM_HEAD_HEAD_M,
rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue])));
/* Read interrupt register */
- val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
+ intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %u, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, txqueue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use, val);
+ head, tx_ring->next_to_use, intr);
+
+ ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr);
}
pf->tx_timeout_last_recovery = jiffies;
@@ -8324,11 +8266,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @np: net device to configure
* @filter_dev: device on which filter is added
* @cls_flower: offload data
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added or deleted,
+ * negative error code otherwise.
*/
static int
ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
struct net_device *filter_dev,
- struct flow_cls_offload *cls_flower)
+ struct flow_cls_offload *cls_flower,
+ bool ingress)
{
struct ice_vsi *vsi = np->vsi;
@@ -8337,7 +8284,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
switch (cls_flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(vsi, cls_flower);
default:
@@ -8346,20 +8293,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
}
/**
- * ice_setup_tc_block_cb - callback handler registered for TC block
+ * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block
* @type: TC SETUP type
* @type_data: TC flower offload data that contains user input
* @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
*/
static int
-ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
{
struct ice_netdev_priv *np = cb_priv;
switch (type) {
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, np->vsi->netdev,
- type_data);
+ type_data, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb_egress - callback handler for egress TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ *
+ * Return: 0 if the setup was successful, negative error code otherwise.
+ */
+static int
+ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9112,7 +9085,7 @@ static int ice_create_q_channels(struct ice_vsi *vsi)
list_add_tail(&ch->list, &vsi->ch_list);
vsi->tc_map_vsi[i] = ch->ch_vsi;
dev_dbg(ice_pf_to_dev(pf),
- "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ "successfully created channel: VSI %p\n", ch->ch_vsi);
}
return 0;
@@ -9297,6 +9270,96 @@ exit:
return ret;
}
+/**
+ * ice_cfg_txtime - configure Tx Time for the Tx ring
+ * @tx_ring: pointer to the Tx ring structure
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_cfg_txtime(struct ice_tx_ring *tx_ring)
+{
+ int err, timeout = 50;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ u32 queue;
+
+ if (!tx_ring)
+ return -EINVAL;
+
+ vsi = tx_ring->vsi;
+ pf = vsi->back;
+ while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+ usleep_range(1000, 2000);
+ }
+
+ queue = tx_ring->q_index;
+ dev = ice_pf_to_dev(pf);
+
+ /* Ignore return value, and always attempt to enable queue. */
+ ice_qp_dis(vsi, queue);
+
+ err = ice_qp_ena(vsi, queue);
+ if (err)
+ dev_err(dev, "Failed to enable Tx queue %d for TxTime configuration\n",
+ queue);
+
+ clear_bit(ICE_CFG_BUSY, pf->state);
+ return err;
+}
+
+/**
+ * ice_offload_txtime - set earliest TxTime first
+ * @netdev: network interface device structure
+ * @qopt_off: etf queue option offload from the skb to set
+ *
+ * Return: 0 on success, negative value on failure.
+ */
+static int ice_offload_txtime(struct net_device *netdev,
+ void *qopt_off)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ struct tc_etf_qopt_offload *qopt;
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_tx_ring *tx_ring;
+ int ret = 0;
+
+ if (!ice_is_feature_supported(pf, ICE_F_TXTIME))
+ return -EOPNOTSUPP;
+
+ qopt = qopt_off;
+ if (!qopt_off || qopt->queue < 0 || qopt->queue >= vsi->num_txq)
+ return -EINVAL;
+
+ if (qopt->enable)
+ set_bit(qopt->queue, pf->txtime_txqs);
+ else
+ clear_bit(qopt->queue, pf->txtime_txqs);
+
+ if (netif_running(vsi->netdev)) {
+ tx_ring = vsi->tx_rings[qopt->queue];
+ ret = ice_cfg_txtime(tx_ring);
+ if (ret)
+ goto err;
+ }
+
+ netdev_info(netdev, "%s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+ return 0;
+
+err:
+ netdev_err(netdev, "Failed to %s TxTime on queue: %i\n",
+ str_enable_disable(qopt->enable), qopt->queue);
+
+ if (qopt->enable)
+ clear_bit(qopt->queue, pf->txtime_txqs);
+ return ret;
+}
+
static LIST_HEAD(ice_block_cb_list);
static int
@@ -9304,27 +9367,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ enum flow_block_binder_type binder_type;
+ struct iidc_rdma_core_dev_info *cdev;
struct ice_pf *pf = np->vsi->back;
+ flow_setup_cb_t *flower_handler;
bool locked = false;
int err;
switch (type) {
case TC_SETUP_BLOCK:
+ binder_type =
+ ((struct flow_block_offload *)type_data)->binder_type;
+
+ switch (binder_type) {
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
+ flower_handler = ice_setup_tc_block_cb_ingress;
+ break;
+ case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
+ flower_handler = ice_setup_tc_block_cb_egress;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
return flow_block_cb_setup_simple(type_data,
&ice_block_cb_list,
- ice_setup_tc_block_cb,
- np, np, true);
+ flower_handler,
+ np, np, false);
case TC_SETUP_QDISC_MQPRIO:
if (ice_is_eswitch_mode_switchdev(pf)) {
netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
return -EOPNOTSUPP;
}
- if (pf->adev) {
+ cdev = pf->cdev_info;
+ if (cdev && cdev->adev) {
mutex_lock(&pf->adev_mutex);
- device_lock(&pf->adev->dev);
+ device_lock(&cdev->adev->dev);
locked = true;
- if (pf->adev->dev.driver) {
+ if (cdev->adev->dev.driver) {
netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");
err = -EBUSY;
goto adev_unlock;
@@ -9338,10 +9419,12 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
adev_unlock:
if (locked) {
- device_unlock(&pf->adev->dev);
+ device_unlock(&cdev->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return err;
+ case TC_SETUP_QDISC_ETF:
+ return ice_offload_txtime(netdev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -9374,7 +9457,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
return ice_setup_tc_cls_flower(np, priv->netdev,
(struct flow_cls_offload *)
- type_data);
+ type_data, false);
default:
return -EOPNOTSUPP;
}
@@ -9477,8 +9560,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
*/
int ice_open(struct net_device *netdev)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (ice_is_reset_in_progress(pf->state)) {
netdev_err(netdev, "can't open net device while reset is in progress");
@@ -9681,7 +9763,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
- .ndo_eth_ioctl = ice_eth_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
@@ -9705,4 +9786,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
+ .ndo_hwtstamp_get = ice_ptp_hwtstamp_get,
+ .ndo_hwtstamp_set = ice_ptp_hwtstamp_set,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 59e8879ac059..7e187a804dfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -22,10 +22,10 @@ int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command,
bool read_shadow_ram, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
if (offset > ICE_AQC_NVM_MAX_OFFSET)
return -EINVAL;
@@ -125,10 +125,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000)
@@ -146,7 +146,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
cmd->offset_high = (offset >> 16) & 0xFF;
cmd->length = cpu_to_le16(length);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, data, length, cd);
}
@@ -161,10 +161,10 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*/
int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_erase);
@@ -869,7 +869,7 @@ static int ice_discover_flash_size(struct ice_hw *hw)
status = ice_read_flat_nvm(hw, offset, &len, &data, false);
if (status == -EIO &&
- hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
status = 0;
@@ -1182,14 +1182,14 @@ int ice_init_nvm(struct ice_hw *hw)
int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
return status;
- cmd = &desc.params.nvm_checksum;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_checksum);
cmd->flags = ICE_AQC_NVM_CHECKSUM_VERIFY;
@@ -1226,11 +1226,11 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
*/
int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
+ struct libie_aq_desc desc;
struct ice_aqc_nvm *cmd;
- struct ice_aq_desc desc;
int err;
- cmd = &desc.params.nvm;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
@@ -1252,7 +1252,7 @@ int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
*/
int ice_aq_nvm_update_empr(struct ice_hw *hw)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_update_empr);
@@ -1278,15 +1278,15 @@ ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
u16 length, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pkg_data *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (length != 0 && !data)
return -EINVAL;
- cmd = &desc.params.pkg_data;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_pkg_data);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
if (del_pkg_data_flag)
cmd->cmd_flags |= ICE_AQC_NVM_PKG_DELETE;
@@ -1316,17 +1316,17 @@ ice_nvm_pass_component_tbl(struct ice_hw *hw, u8 *data, u16 length,
u8 *comp_response_code, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_pass_comp_tbl *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
if (!data || !comp_response || !comp_response_code)
return -EINVAL;
- cmd = &desc.params.pass_comp_tbl;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_nvm_pass_component_tbl);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->transfer_flag = transfer_flag;
status = ice_aq_send_cmd(hw, &desc, data, length, cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_parser.h b/drivers/net/ethernet/intel/ice/ice_parser.h
index 6509d807627c..4f56d53d56b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser.h
+++ b/drivers/net/ethernet/intel/ice/ice_parser.h
@@ -257,7 +257,6 @@ ice_pg_nm_cam_match(struct ice_pg_nm_cam_item *table, int size,
/*** ICE_SID_RXPARSER_BOOST_TCAM and ICE_SID_LBL_RXPARSER_TMEM sections ***/
#define ICE_BST_TCAM_TABLE_SIZE 256
#define ICE_BST_TCAM_KEY_SIZE 20
-#define ICE_BST_KEY_TCAM_SIZE 19
/* Boost TCAM item */
struct ice_bst_tcam_item {
@@ -401,7 +400,6 @@ u16 ice_xlt_kb_flag_get(struct ice_xlt_kb *kb, u64 pkt_flag);
#define ICE_PARSER_GPR_NUM 128
#define ICE_PARSER_FLG_NUM 64
#define ICE_PARSER_ERR_NUM 16
-#define ICE_BST_KEY_SIZE 10
#define ICE_MARKER_ID_SIZE 9
#define ICE_MARKER_MAX_SIZE \
(ICE_MARKER_ID_SIZE * BITS_PER_BYTE - 1)
@@ -431,13 +429,13 @@ struct ice_parser_rt {
u8 pkt_buf[ICE_PARSER_MAX_PKT_LEN + ICE_PARSER_PKT_REV];
u16 pkt_len;
u16 po;
- u8 bst_key[ICE_BST_KEY_SIZE];
+ u8 bst_key[ICE_BST_TCAM_KEY_SIZE];
struct ice_pg_cam_key pg_key;
+ u8 pg_prio;
struct ice_alu *alu0;
struct ice_alu *alu1;
struct ice_alu *alu2;
struct ice_pg_cam_action *action;
- u8 pg_prio;
struct ice_gpr_pu pu;
u8 markers[ICE_MARKER_ID_SIZE];
bool protocols[ICE_PO_PAIR_SIZE];
diff --git a/drivers/net/ethernet/intel/ice/ice_parser_rt.c b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
index dedf5e854e4b..3995d662e050 100644
--- a/drivers/net/ethernet/intel/ice/ice_parser_rt.c
+++ b/drivers/net/ethernet/intel/ice/ice_parser_rt.c
@@ -125,22 +125,20 @@ static void ice_bst_key_init(struct ice_parser_rt *rt,
else
key[idd] = imem->b_kb.prio;
- idd = ICE_BST_KEY_TCAM_SIZE - 1;
+ idd = ICE_BST_TCAM_KEY_SIZE - 2;
for (i = idd; i >= 0; i--) {
int j;
j = ho + idd - i;
if (j < ICE_PARSER_MAX_PKT_LEN)
- key[i] = rt->pkt_buf[ho + idd - i];
+ key[i] = rt->pkt_buf[j];
else
key[i] = 0;
}
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "Generated Boost TCAM Key:\n");
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "%02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- key[0], key[1], key[2], key[3], key[4],
- key[5], key[6], key[7], key[8], key[9]);
- ice_debug(rt->psr->hw, ICE_DBG_PARSER, "\n");
+ ice_debug_array_w_prefix(rt->psr->hw, ICE_DBG_PARSER,
+ KBUILD_MODNAME ": Generated Boost TCAM Key",
+ key, ICE_BST_TCAM_KEY_SIZE);
}
static u16 ice_bit_rev_u16(u16 v, int len)
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 7c09ea0f03ba..725167d557a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -82,26 +82,46 @@ enum ice_sw_tunnel_type {
enum ice_prot_id {
ICE_PROT_ID_INVAL = 0,
ICE_PROT_MAC_OF_OR_S = 1,
+ ICE_PROT_MAC_O2 = 2,
ICE_PROT_MAC_IL = 4,
+ ICE_PROT_MAC_IN_MAC = 7,
ICE_PROT_ETYPE_OL = 9,
ICE_PROT_ETYPE_IL = 10,
+ ICE_PROT_PAY = 15,
+ ICE_PROT_EVLAN_O = 16,
+ ICE_PROT_VLAN_O = 17,
+ ICE_PROT_VLAN_IF = 18,
+ ICE_PROT_MPLS_OL_MINUS_1 = 27,
+ ICE_PROT_MPLS_OL_OR_OS = 28,
+ ICE_PROT_MPLS_IL = 29,
ICE_PROT_IPV4_OF_OR_S = 32,
ICE_PROT_IPV4_IL = 33,
+ ICE_PROT_IPV4_IL_IL = 34,
ICE_PROT_IPV6_OF_OR_S = 40,
ICE_PROT_IPV6_IL = 41,
+ ICE_PROT_IPV6_IL_IL = 42,
+ ICE_PROT_IPV6_NEXT_PROTO = 43,
+ ICE_PROT_IPV6_FRAG = 47,
ICE_PROT_TCP_IL = 49,
ICE_PROT_UDP_OF = 52,
ICE_PROT_UDP_IL_OR_S = 53,
ICE_PROT_GRE_OF = 64,
+ ICE_PROT_NSH_F = 84,
ICE_PROT_ESP_F = 88,
ICE_PROT_ESP_2 = 89,
ICE_PROT_SCTP_IL = 96,
ICE_PROT_ICMP_IL = 98,
ICE_PROT_ICMPV6_IL = 100,
+ ICE_PROT_VRRP_F = 101,
+ ICE_PROT_OSPF = 102,
ICE_PROT_PPPOE = 103,
ICE_PROT_L2TPV3 = 104,
+ ICE_PROT_ATAOE_OF = 114,
+ ICE_PROT_CTRL_OF = 116,
+ ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_META_ID = 255, /* when offset == metadata */
+ ICE_PROT_EAPOL_OF = 120,
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index a999fface272..4c8d20f2d2c0 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -4,7 +4,6 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_trace.h"
-#include "ice_cgu_regs.h"
static const char ice_pin_names[][64] = {
"SDP0",
@@ -16,45 +15,43 @@ static const char ice_pin_names[][64] = {
};
static const struct ice_ptp_pin_desc ice_pin_desc_e82x[] = {
- /* name, gpio */
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { TIME_SYNC, { 4, -1 }, { 0, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 11 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e825c[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { TIME_SYNC, { 4, -1 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 15, 14 }},
+ { SDP1, { 1, 1 }, { 15, 14 }},
+ { SDP2, { 2, 2 }, { 15, 14 }},
+ { SDP3, { 3, 3 }, { 15, 14 }},
+ { TIME_SYNC, { 4, -1 }, { 11, 0 }},
+ { ONE_PPS, { -1, 5 }, { 0, 9 }},
};
static const struct ice_ptp_pin_desc ice_pin_desc_e810[] = {
- /* name, gpio */
- { SDP0, { 0, 0 }},
- { SDP1, { 1, 1 }},
- { SDP2, { 2, 2 }},
- { SDP3, { 3, 3 }},
- { ONE_PPS, { -1, 5 }},
+ /* name, gpio, delay */
+ { SDP0, { 0, 0 }, { 0, 1 }},
+ { SDP1, { 1, 1 }, { 0, 1 }},
+ { SDP2, { 2, 2 }, { 0, 1 }},
+ { SDP3, { 3, 3 }, { 0, 1 }},
+ { ONE_PPS, { -1, 5 }, { 0, 1 }},
};
-static const char ice_pin_names_nvm[][64] = {
- "GNSS",
- "SMA1",
- "U.FL1",
- "SMA2",
- "U.FL2",
+static const char ice_pin_names_dpll[][64] = {
+ "SDP20",
+ "SDP21",
+ "SDP22",
+ "SDP23",
};
-static const struct ice_ptp_pin_desc ice_pin_desc_e810_sma[] = {
- /* name, gpio */
- { GNSS, { 1, -1 }},
- { SMA1, { 1, 0 }},
- { UFL1, { -1, 0 }},
- { SMA2, { 3, 2 }},
- { UFL2, { 3, -1 }},
+static const struct ice_ptp_pin_desc ice_pin_desc_dpll[] = {
+ /* name, gpio, delay */
+ { SDP0, { -1, 0 }, { 0, 1 }},
+ { SDP1, { 1, -1 }, { 0, 0 }},
+ { SDP2, { -1, 2 }, { 0, 1 }},
+ { SDP3, { 3, -1 }, { 0, 0 }},
};
static struct ice_pf *ice_get_ctrl_pf(struct ice_pf *pf)
@@ -93,101 +90,6 @@ static int ice_ptp_find_pin_idx(struct ice_pf *pf, enum ptp_pin_function func,
}
/**
- * ice_ptp_update_sma_data - update SMA pins data according to pins setup
- * @pf: Board private structure
- * @sma_pins: parsed SMA pins status
- * @data: SMA data to update
- */
-static void ice_ptp_update_sma_data(struct ice_pf *pf, unsigned int sma_pins[],
- u8 *data)
-{
- const char *state1, *state2;
-
- /* Set the right state based on the desired configuration.
- * When bit is set, functionality is disabled.
- */
- *data &= ~ICE_ALL_SMA_MASK;
- if (!sma_pins[UFL1 - 1]) {
- if (sma_pins[SMA1 - 1] == PTP_PF_EXTTS) {
- state1 = "SMA1 Rx, U.FL1 disabled";
- *data |= ICE_SMA1_TX_EN;
- } else if (sma_pins[SMA1 - 1] == PTP_PF_PEROUT) {
- state1 = "SMA1 Tx U.FL1 disabled";
- *data |= ICE_SMA1_DIR_EN;
- } else {
- state1 = "SMA1 disabled, U.FL1 disabled";
- *data |= ICE_SMA1_MASK;
- }
- } else {
- /* U.FL1 Tx will always enable SMA1 Rx */
- state1 = "SMA1 Rx, U.FL1 Tx";
- }
-
- if (!sma_pins[UFL2 - 1]) {
- if (sma_pins[SMA2 - 1] == PTP_PF_EXTTS) {
- state2 = "SMA2 Rx, U.FL2 disabled";
- *data |= ICE_SMA2_TX_EN | ICE_SMA2_UFL2_RX_DIS;
- } else if (sma_pins[SMA2 - 1] == PTP_PF_PEROUT) {
- state2 = "SMA2 Tx, U.FL2 disabled";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_UFL2_RX_DIS;
- } else {
- state2 = "SMA2 disabled, U.FL2 disabled";
- *data |= ICE_SMA2_MASK;
- }
- } else {
- if (!sma_pins[SMA2 - 1]) {
- state2 = "SMA2 disabled, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN;
- } else {
- state2 = "SMA2 Tx, U.FL2 Rx";
- *data |= ICE_SMA2_DIR_EN;
- }
- }
-
- dev_dbg(ice_pf_to_dev(pf), "%s, %s\n", state1, state2);
-}
-
-/**
- * ice_ptp_set_sma_cfg - set the configuration of the SMA control logic
- * @pf: Board private structure
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_set_sma_cfg(struct ice_pf *pf)
-{
- const struct ice_ptp_pin_desc *ice_pins = pf->ptp.ice_pin_desc;
- struct ptp_pin_desc *pins = pf->ptp.pin_desc;
- unsigned int sma_pins[ICE_SMA_PINS_NUM] = {};
- int err;
- u8 data;
-
- /* Read initial pin state value */
- err = ice_read_sma_ctrl(&pf->hw, &data);
- if (err)
- return err;
-
- /* Get SMA/U.FL pins states */
- for (int i = 0; i < pf->ptp.info.n_pins; i++)
- if (pins[i].func) {
- int name_idx = ice_pins[i].name_idx;
-
- switch (name_idx) {
- case SMA1:
- case UFL1:
- case SMA2:
- case UFL2:
- sma_pins[name_idx - 1] = pins[i].func;
- break;
- default:
- continue;
- }
- }
-
- ice_ptp_update_sma_data(pf, sma_pins, &data);
- return ice_write_sma_ctrl(&pf->hw, data);
-}
-
-/**
* ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
* @pf: Board private structure
*
@@ -298,18 +200,30 @@ void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
* @sts: Optional parameter for holding a pair of system timestamps from
* the system clock. Will be ignored if NULL is given.
*/
-static u64
-ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
{
struct ice_hw *hw = &pf->hw;
u32 hi, lo, lo2;
u8 tmr_idx;
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
tmr_idx = ice_get_ptp_src_clock_index(hw);
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
+ if (hw->mac_type == ICE_MAC_E830) {
+ u64 clk_time = rd64(hw, E830_GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ ptp_read_system_postts(sts);
+
+ return clk_time;
+ }
+
lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
/* Read the system timestamp post PHC read */
@@ -464,7 +378,9 @@ ice_ptp_is_tx_tracker_up(struct ice_ptp_tx *tx)
*/
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
{
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
+ unsigned long flags;
struct sk_buff *skb;
struct ice_pf *pf;
@@ -473,6 +389,7 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ params = &pf->hw.ptp.phy.e810;
/* Drop packets which have waited for more than 2 seconds */
if (time_is_before_jiffies(tx->tstamps[idx].start + 2 * HZ)) {
@@ -489,11 +406,17 @@ void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ params->atqbal_flags |= ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
/* Write TS index to read to the PF register so the FW can read it */
- wr32(&pf->hw, PF_SB_ATQBAL,
- TS_LL_READ_TS_INTR | FIELD_PREP(TS_LL_READ_TS_IDX, idx) |
- TS_LL_READ_TS);
+ wr32(&pf->hw, REG_LL_PROXY_H,
+ REG_LL_PROXY_H_TS_INTR_ENA | FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) |
+ REG_LL_PROXY_H_EXEC);
tx->last_ll_ts_idx_read = idx;
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
}
/**
@@ -504,35 +427,52 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
{
struct skb_shared_hwtstamps shhwtstamps = {};
u8 idx = tx->last_ll_ts_idx_read;
+ struct ice_e810_params *params;
struct ice_ptp_port *ptp_port;
u64 raw_tstamp, tstamp;
bool drop_ts = false;
struct sk_buff *skb;
+ unsigned long flags;
+ struct device *dev;
struct ice_pf *pf;
- u32 val;
+ u32 reg_ll_high;
if (!tx->init || tx->last_ll_ts_idx_read < 0)
return;
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
+ dev = ice_pf_to_dev(pf);
+ params = &pf->hw.ptp.phy.e810;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
- val = rd32(&pf->hw, PF_SB_ATQBAL);
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ if (!(params->atqbal_flags & ATQBAL_FLAGS_INTR_IN_PROGRESS))
+ dev_dbg(dev, "%s: low latency interrupt request not in progress?\n",
+ __func__);
+
+ /* Read the low 32 bit value */
+ raw_tstamp = rd32(&pf->hw, REG_LL_PROXY_L);
+ /* Read the status together with high TS part */
+ reg_ll_high = rd32(&pf->hw, REG_LL_PROXY_H);
+
+ /* Wake up threads waiting on low latency interface */
+ params->atqbal_flags &= ~ATQBAL_FLAGS_INTR_IN_PROGRESS;
+
+ wake_up_locked(&params->atqbal_wq);
+
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
/* When the bit is cleared, the TS is ready in the register */
- if (val & TS_LL_READ_TS) {
+ if (reg_ll_high & REG_LL_PROXY_H_EXEC) {
dev_err(ice_pf_to_dev(pf), "Failed to get the Tx tstamp - FW not ready");
return;
}
/* High 8 bit value of the TS is on the bits 16:23 */
- raw_tstamp = FIELD_GET(TS_LL_READ_TS_HIGH, val);
- raw_tstamp <<= 32;
-
- /* Read the low 32 bit value */
- raw_tstamp |= (u64)rd32(&pf->hw, PF_SB_ATQBAH);
+ raw_tstamp |= ((u64)FIELD_GET(REG_LL_PROXY_H_TS_HIGH, reg_ll_high)) << 32;
/* Devices using this interface always verify the timestamp differs
* relative to the last cached timestamp value.
@@ -560,6 +500,9 @@ void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx)
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
+
+ /* Count the number of Tx timestamps that succeeded */
+ pf->ptp.tx_hwtstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
@@ -618,6 +561,7 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
{
struct ice_ptp_port *ptp_port;
unsigned long flags;
+ u32 tstamp_good = 0;
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
@@ -718,11 +662,16 @@ skip_ts_read:
if (tstamp) {
shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
ice_trace(tx_tstamp_complete, skb, idx);
+
+ /* Count the number of Tx timestamps that succeeded */
+ tstamp_good++;
}
skb_tstamp_tx(skb, &shhwtstamps);
dev_kfree_skb_any(skb);
}
+
+ pf->ptp.tx_hwtstamp_good += tstamp_good;
}
/**
@@ -946,28 +895,6 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
- * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
- * have independent memory blocks for all ports.
- *
- * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
- */
-static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
- u8 port)
-{
- tx->block = port;
- tx->offset = 0;
- tx->len = INDEX_PER_PORT_ETH56G;
- tx->has_ready_bitmap = 1;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -977,9 +904,11 @@ static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
* the timestamp block is shared for all ports in the same quad. To avoid
* ports using the same timestamp index, logically break the block of
* registers into chunks based on the port number.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
{
tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
@@ -990,24 +919,27 @@ ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
}
/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * ice_ptp_init_tx - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
*
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
+ * Initialize the Tx timestamp tracker for this PF. For all PHYs except E82X,
+ * each port has its own block of timestamps, independent of the other ports.
+ *
+ * Return: 0 on success, -ENOMEM when out of memory
*/
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+static int ice_ptp_init_tx(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = pf->hw.port_info->lport;
+ tx->block = port;
tx->offset = 0;
- tx->len = INDEX_PER_PORT_E810;
+ tx->len = INDEX_PER_PORT;
+
/* The E810 PHY does not provide a timestamp ready bitmap. Instead,
* verify new timestamps against cached copy of the last read
* timestamp.
*/
- tx->has_ready_bitmap = 0;
+ tx->has_ready_bitmap = pf->hw.mac_type != ICE_MAC_E810;
return ice_ptp_alloc_tx_tracker(tx);
}
@@ -1292,20 +1224,21 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
struct ice_hw *hw = &pf->hw;
int err;
- if (ice_is_e810(hw))
- return 0;
-
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_stop_phy_timer_eth56g(hw, port, true);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
err = ice_stop_phy_timer_e82x(hw, port, true);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
default:
err = -ENODEV;
}
@@ -1335,19 +1268,17 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
unsigned long flags;
int err;
- if (ice_is_e810(hw))
- return 0;
-
if (!ptp_port->link_up)
return ice_ptp_port_phy_stop(ptp_port);
mutex_lock(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_start_phy_timer_eth56g(hw, port);
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ err = 0;
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
/* Start the PHY timer in Vernier mode */
kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
@@ -1372,6 +1303,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
0);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
default:
err = -ENODEV;
}
@@ -1388,10 +1322,9 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
/**
* ice_ptp_link_change - Reconfigure PTP after link status change
* @pf: Board private structure
- * @port: Port for which the PHY start is set
* @linkup: Link is up or down
*/
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
struct ice_ptp_port *ptp_port;
struct ice_hw *hw = &pf->hw;
@@ -1399,14 +1332,7 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
- return;
-
ptp_port = &pf->ptp.port;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- port *= 2;
- if (WARN_ON_ONCE(ptp_port->port_num != port))
- return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
@@ -1414,12 +1340,14 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Skip HW writes if reset is in progress */
if (pf->hw.reset_ongoing)
return;
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
- /* Do not reconfigure E810 PHY */
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ /* Do not reconfigure E810 or E830 PHY */
return;
- case ICE_PHY_ETH56G:
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_port_phy_restart(ptp_port);
return;
default:
@@ -1447,46 +1375,45 @@ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
ice_ptp_reset_ts_memory(hw);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G: {
- int port;
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ return 0;
+ case ICE_MAC_GENERIC: {
+ int quad;
- for (port = 0; port < hw->ptp.num_lports; port++) {
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
int err;
- err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
- port, err);
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E82X: {
- int quad;
+ case ICE_MAC_GENERIC_3K_E825: {
+ int port;
- for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
- quad++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
- err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
if (err) {
- dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
- quad, err);
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
return err;
}
}
return 0;
}
- case ICE_PHY_E810:
- return 0;
- case ICE_PHY_UNSUP:
+ case ICE_MAC_UNKNOWN:
default:
- dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
- ice_get_phy_model(hw));
return -EOPNOTSUPP;
}
}
@@ -1566,18 +1493,29 @@ void ice_ptp_extts_event(struct ice_pf *pf)
* Event is defined in GLTSYN_EVNT_0 register
*/
for (chan = 0; chan < GLTSYN_EVNT_H_IDX_MAX; chan++) {
+ int pin_desc_idx;
+
/* Check if channel is enabled */
- if (pf->ptp.ext_ts_irq & (1 << chan)) {
- lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
- hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
- event.timestamp = (((u64)hi) << 32) | lo;
- event.type = PTP_CLOCK_EXTTS;
- event.index = chan;
-
- /* Fire event */
- ptp_clock_event(pf->ptp.clock, &event);
- pf->ptp.ext_ts_irq &= ~(1 << chan);
+ if (!(pf->ptp.ext_ts_irq & (1 << chan)))
+ continue;
+
+ lo = rd32(hw, GLTSYN_EVNT_L(chan, tmr_idx));
+ hi = rd32(hw, GLTSYN_EVNT_H(chan, tmr_idx));
+ event.timestamp = (u64)hi << 32 | lo;
+
+ /* Add delay compensation */
+ pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_EXTTS, chan);
+ if (pin_desc_idx >= 0) {
+ const struct ice_ptp_pin_desc *desc;
+
+ desc = &pf->ptp.ice_pin_desc[pin_desc_idx];
+ event.timestamp -= desc->delay[0];
}
+
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = chan;
+ pf->ptp.ext_ts_irq &= ~(1 << chan);
+ ptp_clock_event(pf->ptp.clock, &event);
}
}
@@ -1600,14 +1538,6 @@ static int ice_ptp_cfg_extts(struct ice_pf *pf, struct ptp_extts_request *rq,
int pin_desc_idx;
u8 tmr_idx;
- /* Reject requests with unsupported flags */
-
- if (rq->flags & ~(PTP_ENABLE_FEATURE |
- PTP_RISING_EDGE |
- PTP_FALLING_EDGE |
- PTP_STRICT_FLAGS))
- return -EOPNOTSUPP;
-
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
chan = rq->index;
@@ -1711,11 +1641,11 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
/* 0. Reset mode & out_en in AUX_OUT */
wr32(hw, GLTSYN_AUX_OUT(chan, tmr_idx), 0);
- if (ice_is_e825c(hw)) {
+ if (hw->mac_type == ICE_MAC_GENERIC_3K_E825) {
int err;
/* Enable/disable CGU 1PPS output for E825C */
- err = ice_cgu_cfg_pps_out(hw, !!period);
+ err = ice_tspll_cfg_pps_out_e825c(hw, !!period);
if (err)
return err;
}
@@ -1754,6 +1684,7 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
8 + chan + (tmr_idx * 4));
wr32(hw, GLGEN_GPIO_CTL(gpio_pin), val);
+ ice_flush(hw);
return 0;
}
@@ -1772,19 +1703,17 @@ static int ice_ptp_write_perout(struct ice_hw *hw, unsigned int chan,
static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
int on)
{
+ unsigned int gpio_pin, prop_delay_ns;
u64 clk, period, start, phase;
struct ice_hw *hw = &pf->hw;
- unsigned int gpio_pin;
int pin_desc_idx;
- if (rq->flags & ~PTP_PEROUT_PHASE)
- return -EOPNOTSUPP;
-
pin_desc_idx = ice_ptp_find_pin_idx(pf, PTP_PF_PEROUT, rq->index);
if (pin_desc_idx < 0)
return -EIO;
gpio_pin = pf->ptp.ice_pin_desc[pin_desc_idx].gpio[1];
+ prop_delay_ns = pf->ptp.ice_pin_desc[pin_desc_idx].delay[1];
period = rq->period.sec * NSEC_PER_SEC + rq->period.nsec;
/* If we're disabling the output or period is 0, clear out CLKO and TGT
@@ -1794,7 +1723,7 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
return ice_ptp_write_perout(hw, rq->index, gpio_pin, 0, 0);
if (strncmp(pf->ptp.pin_desc[pin_desc_idx].name, "1PPS", 64) == 0 &&
- period != NSEC_PER_SEC && hw->ptp.phy_model == ICE_PHY_E82X) {
+ period != NSEC_PER_SEC && hw->mac_type == ICE_MAC_GENERIC) {
dev_err(ice_pf_to_dev(pf), "1PPS pin supports only 1 s period\n");
return -EOPNOTSUPP;
}
@@ -1813,14 +1742,15 @@ static int ice_ptp_cfg_perout(struct ice_pf *pf, struct ptp_perout_request *rq,
div64_u64_rem(start, period, &phase);
/* If we have only phase or start time is in the past, start the timer
- * at the next multiple of period, maintaining phase.
+ * at the next multiple of period, maintaining phase at least 0.5 second
+ * from now, so we have time to write it to HW.
*/
- clk = ice_ptp_read_src_clk_reg(pf, NULL);
- if (rq->flags & PTP_PEROUT_PHASE || start <= clk - ice_prop_delay(hw))
+ clk = ice_ptp_read_src_clk_reg(pf, NULL) + NSEC_PER_MSEC * 500;
+ if (rq->flags & PTP_PEROUT_PHASE || start <= clk - prop_delay_ns)
start = div64_u64(clk + period - 1, period) * period + phase;
/* Compensate for propagation delay from the generator to the pin. */
- start -= ice_prop_delay(hw);
+ start -= prop_delay_ns;
return ice_ptp_write_perout(hw, rq->index, gpio_pin, start, period);
}
@@ -1860,63 +1790,6 @@ static void ice_ptp_enable_all_perout(struct ice_pf *pf)
}
/**
- * ice_ptp_disable_shared_pin - Disable enabled pin that shares GPIO
- * @pf: Board private structure
- * @pin: Pin index
- * @func: Assigned function
- *
- * Return: 0 on success, negative error code otherwise
- */
-static int ice_ptp_disable_shared_pin(struct ice_pf *pf, unsigned int pin,
- enum ptp_pin_function func)
-{
- unsigned int gpio_pin;
-
- switch (func) {
- case PTP_PF_PEROUT:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[1];
- break;
- case PTP_PF_EXTTS:
- gpio_pin = pf->ptp.ice_pin_desc[pin].gpio[0];
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
- struct ptp_pin_desc *pin_desc = &pf->ptp.pin_desc[i];
- unsigned int chan = pin_desc->chan;
-
- /* Skip pin idx from the request */
- if (i == pin)
- continue;
-
- if (pin_desc->func == PTP_PF_PEROUT &&
- pf->ptp.ice_pin_desc[i].gpio[1] == gpio_pin) {
- pf->ptp.perout_rqs[chan].period.sec = 0;
- pf->ptp.perout_rqs[chan].period.nsec = 0;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared output GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_perout(pf, &pf->ptp.perout_rqs[chan],
- false);
- } else if (pf->ptp.pin_desc->func == PTP_PF_EXTTS &&
- pf->ptp.ice_pin_desc[i].gpio[0] == gpio_pin) {
- pf->ptp.extts_rqs[chan].flags &= ~PTP_ENABLE_FEATURE;
- pin_desc->func = PTP_PF_NONE;
- pin_desc->chan = 0;
- dev_dbg(ice_pf_to_dev(pf), "Disabling pin %u with shared input GPIO pin %u\n",
- i, gpio_pin);
- return ice_ptp_cfg_extts(pf, &pf->ptp.extts_rqs[chan],
- false);
- }
- }
-
- return 0;
-}
-
-/**
* ice_verify_pin - verify if pin supports requested pin function
* @info: the driver's PTP info structure
* @pin: Pin index
@@ -1950,14 +1823,6 @@ static int ice_verify_pin(struct ptp_clock_info *info, unsigned int pin,
return -EOPNOTSUPP;
}
- /* On adapters with SMA_CTRL disable other pins that share same GPIO */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ice_ptp_disable_shared_pin(pf, pin, func);
- pf->ptp.pin_desc[pin].func = func;
- pf->ptp.pin_desc[pin].chan = chan;
- return ice_ptp_set_sma_cfg(pf);
- }
-
return 0;
}
@@ -2048,7 +1913,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* For Vernier mode on E82X, we need to recalibrate after new settime.
* Start with marking timestamps as invalid.
*/
- if (ice_get_phy_model(hw) == ICE_PHY_E82X) {
+ if (hw->mac_type == ICE_MAC_GENERIC) {
err = ice_ptp_clear_phy_offset_ready_e82x(hw);
if (err)
dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
@@ -2072,7 +1937,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_perout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (ice_get_phy_model(hw) == ICE_PHY_E82X)
+ if (hw->mac_type == ICE_MAC_GENERIC)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2150,93 +2015,158 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return 0;
}
+/**
+ * struct ice_crosststamp_cfg - Device cross timestamp configuration
+ * @lock_reg: The hardware semaphore lock to use
+ * @lock_busy: Bit in the semaphore lock indicating the lock is busy
+ * @ctl_reg: The hardware register to request cross timestamp
+ * @ctl_active: Bit in the control register to request cross timestamp
+ * @art_time_l: Lower 32-bits of ART system time
+ * @art_time_h: Upper 32-bits of ART system time
+ * @dev_time_l: Lower 32-bits of device time (per timer index)
+ * @dev_time_h: Upper 32-bits of device time (per timer index)
+ */
+struct ice_crosststamp_cfg {
+ /* HW semaphore lock register */
+ u32 lock_reg;
+ u32 lock_busy;
+
+ /* Capture control register */
+ u32 ctl_reg;
+ u32 ctl_active;
+
+ /* Time storage */
+ u32 art_time_l;
+ u32 art_time_h;
+ u32 dev_time_l[2];
+ u32 dev_time_h[2];
+};
+
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e82x = {
+ .lock_reg = PFHH_SEM,
+ .lock_busy = PFHH_SEM_BUSY_M,
+ .ctl_reg = GLHH_ART_CTL,
+ .ctl_active = GLHH_ART_CTL_ACTIVE_M,
+ .art_time_l = GLHH_ART_TIME_L,
+ .art_time_h = GLHH_ART_TIME_H,
+ .dev_time_l[0] = GLTSYN_HHTIME_L(0),
+ .dev_time_h[0] = GLTSYN_HHTIME_H(0),
+ .dev_time_l[1] = GLTSYN_HHTIME_L(1),
+ .dev_time_h[1] = GLTSYN_HHTIME_H(1),
+};
+
#ifdef CONFIG_ICE_HWTS
+static const struct ice_crosststamp_cfg ice_crosststamp_cfg_e830 = {
+ .lock_reg = E830_PFPTM_SEM,
+ .lock_busy = E830_PFPTM_SEM_BUSY_M,
+ .ctl_reg = E830_GLPTM_ART_CTL,
+ .ctl_active = E830_GLPTM_ART_CTL_ACTIVE_M,
+ .art_time_l = E830_GLPTM_ART_TIME_L,
+ .art_time_h = E830_GLPTM_ART_TIME_H,
+ .dev_time_l[0] = E830_GLTSYN_PTMTIME_L(0),
+ .dev_time_h[0] = E830_GLTSYN_PTMTIME_H(0),
+ .dev_time_l[1] = E830_GLTSYN_PTMTIME_L(1),
+ .dev_time_h[1] = E830_GLTSYN_PTMTIME_H(1),
+};
+
+#endif /* CONFIG_ICE_HWTS */
+/**
+ * struct ice_crosststamp_ctx - Device cross timestamp context
+ * @snapshot: snapshot of system clocks for historic interpolation
+ * @pf: pointer to the PF private structure
+ * @cfg: pointer to hardware configuration for cross timestamp
+ */
+struct ice_crosststamp_ctx {
+ struct system_time_snapshot snapshot;
+ struct ice_pf *pf;
+ const struct ice_crosststamp_cfg *cfg;
+};
+
/**
- * ice_ptp_get_syncdevicetime - Get the cross time stamp info
+ * ice_capture_crosststamp - Capture a device/system cross timestamp
* @device: Current device time
* @system: System counter value read synchronously with device time
- * @ctx: Context provided by timekeeping code
+ * @__ctx: Context passed from ice_ptp_getcrosststamp
*
* Read device and system (ART) clock simultaneously and return the corrected
* clock values in ns.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_get_syncdevicetime(ktime_t *device,
- struct system_counterval_t *system,
- void *ctx)
+static int ice_capture_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *__ctx)
{
- struct ice_pf *pf = (struct ice_pf *)ctx;
- struct ice_hw *hw = &pf->hw;
- u32 hh_lock, hh_art_ctl;
- int i;
+ struct ice_crosststamp_ctx *ctx = __ctx;
+ const struct ice_crosststamp_cfg *cfg;
+ u32 lock, ctl, ts_lo, ts_hi, tmr_idx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+ u64 ts;
-#define MAX_HH_HW_LOCK_TRIES 5
-#define MAX_HH_CTL_LOCK_TRIES 100
+ cfg = ctx->cfg;
+ pf = ctx->pf;
+ hw = &pf->hw;
- for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- if (hh_lock & PFHH_SEM_BUSY_M) {
- usleep_range(10000, 15000);
- continue;
- }
- break;
- }
- if (hh_lock & PFHH_SEM_BUSY_M) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (tmr_idx > 1)
+ return -EINVAL;
+
+ /* Poll until we obtain the cross-timestamp hardware semaphore */
+ err = rd32_poll_timeout(hw, cfg->lock_reg, lock,
+ !(lock & cfg->lock_busy),
+ 10 * USEC_PER_MSEC, 50 * USEC_PER_MSEC);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get cross timestamp lock\n");
return -EBUSY;
}
+ /* Snapshot system time for historic interpolation */
+ ktime_get_snapshot(&ctx->snapshot);
+
/* Program cmd to master timer */
ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
/* Start the ART and device clock sync sequence */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
- wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-
- for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
- /* Wait for sync to complete */
- hh_art_ctl = rd32(hw, GLHH_ART_CTL);
- if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
- udelay(1);
- continue;
- } else {
- u32 hh_ts_lo, hh_ts_hi, tmr_idx;
- u64 hh_ts;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- /* Read ART time */
- hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
- hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- system->cycles = hh_ts;
- system->cs_id = CSID_X86_ART;
- /* Read Device source clock time */
- hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
- hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
- hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *device = ns_to_ktime(hh_ts);
- break;
- }
- }
+ ctl = rd32(hw, cfg->ctl_reg);
+ ctl |= cfg->ctl_active;
+ wr32(hw, cfg->ctl_reg, ctl);
+ /* Poll until hardware completes the capture */
+ err = rd32_poll_timeout(hw, cfg->ctl_reg, ctl, !(ctl & cfg->ctl_active),
+ 5, 20 * USEC_PER_MSEC);
+ if (err)
+ goto err_timeout;
+
+ /* Read ART system time */
+ ts_lo = rd32(hw, cfg->art_time_l);
+ ts_hi = rd32(hw, cfg->art_time_h);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ system->cycles = ts;
+ system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
+
+ /* Read Device source clock time */
+ ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
+ ts_hi = rd32(hw, cfg->dev_time_h[tmr_idx]);
+ ts = ((u64)ts_hi << 32) | ts_lo;
+ *device = ns_to_ktime(ts);
+
+err_timeout:
/* Clear the master timer */
ice_ptp_src_cmd(hw, ICE_PTP_NOP);
/* Release HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
- hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
- wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
-
- if (i == MAX_HH_CTL_LOCK_TRIES)
- return -ETIMEDOUT;
+ lock = rd32(hw, cfg->lock_reg);
+ lock &= ~cfg->lock_busy;
+ wr32(hw, cfg->lock_reg, lock);
- return 0;
+ return err;
}
/**
- * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2244,41 +2174,55 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 and E823 devices which have support for
- * generating the cross timestamp via PCIe PTM.
- *
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
+ *
+ * Return: zero on success, or a negative error code on failure.
*/
-static int
-ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
- struct system_device_crosststamp *cts)
+static int ice_ptp_getcrosststamp(struct ptp_clock_info *info,
+ struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_crosststamp_ctx ctx = {
+ .pf = pf,
+ };
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ ctx.cfg = &ice_crosststamp_cfg_e82x;
+ break;
+#ifdef CONFIG_ICE_HWTS
+ case ICE_MAC_E830:
+ ctx.cfg = &ice_crosststamp_cfg_e830;
+ break;
+#endif /* CONFIG_ICE_HWTS */
+ default:
+ return -EOPNOTSUPP;
+ }
- return get_device_system_crosststamp(ice_ptp_get_syncdevicetime,
- pf, NULL, cts);
+ return get_device_system_crosststamp(ice_capture_crosststamp, &ctx,
+ &ctx.snapshot, cts);
}
-#endif /* CONFIG_ICE_HWTS */
/**
- * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_get - interface to read the timestamping config
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
*
* Copy the timestamping config to user buffer
*/
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
- struct hwtstamp_config *config;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (pf->ptp.state != ICE_PTP_READY)
return -EIO;
- config = &pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2286,8 +2230,8 @@ int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* @pf: Board private structure
* @config: hwtstamp settings requested or saved
*/
-static int
-ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
+static int ice_ptp_set_timestamp_mode(struct ice_pf *pf,
+ struct kernel_hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
@@ -2331,32 +2275,31 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
}
/**
- * ice_ptp_set_ts_config - ioctl interface to control the timestamping
- * @pf: Board private structure
- * @ifr: ioctl data
+ * ice_ptp_hwtstamp_set - interface to control the timestamping
+ * @netdev: Pointer to network interface device structure
+ * @config: Timestamping configuration structure
+ * @extack: Netlink extended ack structure for error reporting
*
* Get the user config and store it
*/
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
- struct hwtstamp_config config;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
int err;
if (pf->ptp.state != ICE_PTP_READY)
return -EAGAIN;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- err = ice_ptp_set_timestamp_mode(pf, &config);
+ err = ice_ptp_set_timestamp_mode(pf, config);
if (err)
return err;
/* Return the actual configuration set */
- config = pf->ptp.tstamp_config;
+ *config = pf->ptp.tstamp_config;
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
+ return 0;
}
/**
@@ -2402,14 +2345,14 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
for (unsigned int i = 0; i < pf->ptp.info.n_pins; i++) {
const struct ice_ptp_pin_desc *desc = &pf->ptp.ice_pin_desc[i];
struct ptp_pin_desc *pin = &pf->ptp.pin_desc[i];
- const char *name = NULL;
+ const char *name;
if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
name = ice_pin_names[desc->name_idx];
- else if (desc->name_idx != GPIO_NA)
- name = ice_pin_names_nvm[desc->name_idx];
- if (name)
- strscpy(pin->name, name, sizeof(pin->name));
+ else
+ name = ice_pin_names_dpll[desc->name_idx];
+
+ strscpy(pin->name, name, sizeof(pin->name));
pin->index = i;
}
@@ -2421,8 +2364,8 @@ static void ice_ptp_setup_pin_cfg(struct ice_pf *pf)
* ice_ptp_disable_pins - Disable PTP pins
* @pf: pointer to the PF structure
*
- * Disable the OS access to the SMA pins. Called to clear out the OS
- * indications of pin support when we fail to setup the SMA control register.
+ * Disable the OS access to the pins. Called to clear out the OS
+ * indications of pin support when we fail to setup pin array.
*/
static void ice_ptp_disable_pins(struct ice_pf *pf)
{
@@ -2463,40 +2406,30 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
for (i = 0; i < num_entries; i++) {
u16 entry = le16_to_cpu(entries[i]);
DECLARE_BITMAP(bitmap, GPIO_NA);
- unsigned int bitmap_idx;
+ unsigned int idx;
bool dir;
u16 gpio;
*bitmap = FIELD_GET(ICE_AQC_NVM_SDP_AC_PIN_M, entry);
+
+ /* Check if entry's pin bitmap is valid. */
+ if (bitmap_empty(bitmap, GPIO_NA))
+ continue;
+
dir = !!FIELD_GET(ICE_AQC_NVM_SDP_AC_DIR_M, entry);
gpio = FIELD_GET(ICE_AQC_NVM_SDP_AC_SDP_NUM_M, entry);
- for_each_set_bit(bitmap_idx, bitmap, GPIO_NA + 1) {
- unsigned int idx;
-
- /* Check if entry's pin bit is valid */
- if (bitmap_idx >= NUM_PTP_PINS_NVM &&
- bitmap_idx != GPIO_NA)
- continue;
- /* Check if pin already exists */
- for (idx = 0; idx < ICE_N_PINS_MAX; idx++)
- if (pins[idx].name_idx == bitmap_idx)
- break;
-
- if (idx == ICE_N_PINS_MAX) {
- /* Pin not found, setup its entry and name */
- idx = n_pins++;
- pins[idx].name_idx = bitmap_idx;
- if (bitmap_idx == GPIO_NA)
- strscpy(pf->ptp.pin_desc[idx].name,
- ice_pin_names[gpio],
- sizeof(pf->ptp.pin_desc[idx]
- .name));
- }
+ for (idx = 0; idx < ICE_N_PINS_MAX; idx++) {
+ if (pins[idx].name_idx == gpio)
+ break;
+ }
- /* Setup in/out GPIO number */
- pins[idx].gpio[dir] = gpio;
+ if (idx == ICE_N_PINS_MAX) {
+ /* Pin not found, setup its entry and name */
+ idx = n_pins++;
+ pins[idx].name_idx = gpio;
}
+ pins[idx].gpio[dir] = gpio;
}
for (i = 0; i < n_pins; i++) {
@@ -2520,18 +2453,14 @@ static int ice_ptp_parse_sdp_entries(struct ice_pf *pf, __le16 *entries,
*/
static void ice_ptp_set_funcs_e82x(struct ice_pf *pf)
{
-#ifdef CONFIG_ICE_HWTS
- if (boot_cpu_has(X86_FEATURE_ART) &&
- boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp_e82x;
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
-#endif /* CONFIG_ICE_HWTS */
- if (ice_is_e825c(&pf->hw)) {
+ if (pf->hw.mac_type == ICE_MAC_GENERIC_3K_E825) {
pf->ptp.ice_pin_desc = ice_pin_desc_e825c;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e825c);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e825c);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e82x;
- pf->ptp.info.n_pins = ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e82x);
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e82x);
}
ice_ptp_setup_pin_cfg(pf);
}
@@ -2557,15 +2486,13 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
if (err) {
/* SDP section does not exist in NVM or is corrupted */
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- ptp->ice_pin_desc = ice_pin_desc_e810_sma;
- ptp->info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810_sma);
+ ptp->ice_pin_desc = ice_pin_desc_dpll;
+ ptp->info.n_pins = ARRAY_SIZE(ice_pin_desc_dpll);
} else {
pf->ptp.ice_pin_desc = ice_pin_desc_e810;
- pf->ptp.info.n_pins =
- ICE_PIN_DESC_ARR_LEN(ice_pin_desc_e810);
- err = 0;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
}
+ err = 0;
} else {
desc = devm_kcalloc(ice_pf_to_dev(pf), ICE_N_PINS_MAX,
sizeof(struct ice_ptp_pin_desc),
@@ -2583,8 +2510,6 @@ static void ice_ptp_set_funcs_e810(struct ice_pf *pf)
ptp->info.pin_config = ptp->pin_desc;
ice_ptp_setup_pin_cfg(pf);
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
- err = ice_ptp_set_sma_cfg(pf);
err:
if (err) {
devm_kfree(ice_pf_to_dev(pf), desc);
@@ -2593,6 +2518,28 @@ err:
}
/**
+ * ice_ptp_set_funcs_e830 - Set specialized functions for E830 support
+ * @pf: Board private structure
+ *
+ * Assign functions to the PTP capabiltiies structure for E830 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E830
+ * devices.
+ */
+static void ice_ptp_set_funcs_e830(struct ice_pf *pf)
+{
+#ifdef CONFIG_ICE_HWTS
+ if (pcie_ptm_enabled(pf->pdev) && boot_cpu_has(X86_FEATURE_ART))
+ pf->ptp.info.getcrosststamp = ice_ptp_getcrosststamp;
+
+#endif /* CONFIG_ICE_HWTS */
+ /* Rest of the config is the same as base E810 */
+ pf->ptp.ice_pin_desc = ice_pin_desc_e810;
+ pf->ptp.info.n_pins = ARRAY_SIZE(ice_pin_desc_e810);
+ ice_ptp_setup_pin_cfg(pf);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2614,10 +2561,25 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
info->enable = ice_ptp_gpio_enable;
info->verify = ice_verify_pin;
- if (ice_is_e810(&pf->hw))
+ info->supported_extts_flags = PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS;
+ info->supported_perout_flags = PTP_PEROUT_PHASE;
+
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_E810:
ice_ptp_set_funcs_e810(pf);
- else
+ return;
+ case ICE_MAC_E830:
+ ice_ptp_set_funcs_e830(pf);
+ return;
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
ice_ptp_set_funcs_e82x(pf);
+ return;
+ default:
+ return;
+ }
}
/**
@@ -2728,6 +2690,68 @@ enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
}
/**
+ * ice_ptp_ts_irq - Process the PTP Tx timestamps in IRQ context
+ * @pf: Board private structure
+ *
+ * Return: IRQ_WAKE_THREAD if Tx timestamp read has to be handled in the bottom
+ * half of the interrupt and IRQ_HANDLED otherwise.
+ */
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ /* E810 capable of low latency timestamping with interrupt can
+ * request a single timestamp in the top half and wait for
+ * a second LL TS interrupt from the FW when it's ready.
+ */
+ if (hw->dev_caps.ts_dev_info.ts_ll_int_read) {
+ struct ice_ptp_tx *tx = &pf->ptp.port.tx;
+ u8 idx, last;
+
+ if (!ice_pf_state_is_nominal(pf))
+ return IRQ_HANDLED;
+
+ spin_lock(&tx->lock);
+ if (tx->init) {
+ last = tx->last_ll_ts_idx_read + 1;
+ idx = find_next_bit_wrap(tx->in_use, tx->len,
+ last);
+ if (idx != tx->len)
+ ice_ptp_req_tx_single_tstamp(tx, idx);
+ }
+ spin_unlock(&tx->lock);
+
+ return IRQ_HANDLED;
+ }
+ fallthrough; /* non-LL_TS E810 */
+ case ICE_MAC_GENERIC:
+ case ICE_MAC_GENERIC_3K_E825:
+ /* All other devices process timestamps in the bottom half due
+ * to sleeping or polling.
+ */
+ if (!ice_ptp_pf_handles_tx_interrupt(pf))
+ return IRQ_HANDLED;
+
+ set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
+ return IRQ_WAKE_THREAD;
+ case ICE_MAC_E830:
+ /* E830 can read timestamps in the top half using rd32() */
+ if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) {
+ /* Process outstanding Tx timestamps. If there
+ * is more work, re-arm the interrupt to trigger again.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
+ }
+ return IRQ_HANDLED;
+ default:
+ return IRQ_HANDLED;
+ }
+}
+
+/**
* ice_ptp_maybe_trigger_tx_interrupt - Trigger Tx timstamp interrupt
* @pf: Board private structure
*
@@ -2747,7 +2771,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
bool trigger_oicr = false;
unsigned int i;
- if (ice_is_e810(hw))
+ if (!pf->ptp.port.tx.has_ready_bitmap)
return;
if (!ice_pf_src_tmr_owned(pf))
@@ -2794,6 +2818,32 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
}
/**
+ * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild
+ * @pf: Board private structure
+ * @rebuild: rebuild if true, prepare if false
+ * @reset_type: the reset type being performed
+ */
+static void ice_ptp_prepare_rebuild_sec(struct ice_pf *pf, bool rebuild,
+ enum ice_reset_req reset_type)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->adapter->ports.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_node);
+ struct ice_pf *peer_pf = ptp_port_to_pf(port);
+
+ if (!ice_is_primary(&peer_pf->hw)) {
+ if (rebuild)
+ ice_ptp_rebuild(peer_pf, reset_type);
+ else
+ ice_ptp_prepare_for_reset(peer_pf, reset_type);
+ }
+ }
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
* @reset_type: the reset type being performed
@@ -2801,6 +2851,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_ptp *ptp = &pf->ptp;
+ struct ice_hw *hw = &pf->hw;
u8 src_tmr;
if (ptp->state != ICE_PTP_READY)
@@ -2816,6 +2867,9 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (reset_type == ICE_RESET_PFR)
return;
+ if (ice_pf_src_tmr_owned(pf) && hw->mac_type == ICE_MAC_GENERIC_3K_E825)
+ ice_ptp_prepare_rebuild_sec(pf, false, reset_type);
+
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
/* Disable periodic outputs */
@@ -2849,6 +2903,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
if (err)
return err;
+ err = ice_tspll_init(hw);
+ if (err)
+ return err;
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -2882,14 +2940,12 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
*/
ice_ptp_flush_all_tx_tracker(pf);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- return err;
+ /* Enable quad interrupts */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ return err;
- ice_ptp_restart_all_phy(pf);
- }
+ ice_ptp_restart_all_phy(pf);
/* Re-enable all periodic outputs and external timestamp events */
ice_ptp_enable_all_perout(pf);
@@ -2939,12 +2995,6 @@ err:
dev_err(ice_pf_to_dev(pf), "PTP reset failed %d\n", err);
}
-static bool ice_is_primary(struct ice_hw *hw)
-{
- return ice_is_e825c(hw) && ice_is_dual(hw) ?
- !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) : true;
-}
-
static int ice_ptp_setup_adapter(struct ice_pf *pf)
{
if (!ice_pf_src_tmr_owned(pf) || !ice_is_primary(&pf->hw))
@@ -2960,7 +3010,7 @@ static int ice_ptp_setup_pf(struct ice_pf *pf)
struct ice_ptp *ctrl_ptp = ice_get_ctrl_ptp(pf);
struct ice_ptp *ptp = &pf->ptp;
- if (WARN_ON(!ctrl_ptp) || ice_get_phy_model(&pf->hw) == ICE_PHY_UNSUP)
+ if (WARN_ON(!ctrl_ptp) || pf->hw.mac_type == ICE_MAC_UNKNOWN)
return -ENODEV;
INIT_LIST_HEAD(&ptp->port.list_node);
@@ -2977,7 +3027,7 @@ static void ice_ptp_cleanup_pf(struct ice_pf *pf)
{
struct ice_ptp *ptp = &pf->ptp;
- if (ice_get_phy_model(&pf->hw) != ICE_PHY_UNSUP) {
+ if (pf->hw.mac_type != ICE_MAC_UNKNOWN) {
mutex_lock(&pf->adapter->ports.lock);
list_del(&ptp->port.list_node);
mutex_unlock(&pf->adapter->ports.lock);
@@ -3024,6 +3074,13 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
return err;
}
+ err = ice_tspll_init(hw);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to initialize CGU, status %d\n",
+ err);
+ return err;
+ }
+
/* Acquire the global hardware lock */
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -3080,7 +3137,7 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
/* Allocate a kworker for handling work required for the ports
* connected to the PTP hardware clock.
*/
- kworker = kthread_create_worker(0, "ice-ptp-%s",
+ kworker = kthread_run_worker(0, "ice-ptp-%s",
dev_name(ice_pf_to_dev(pf)));
if (IS_ERR(kworker))
return PTR_ERR(kworker);
@@ -3097,6 +3154,8 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
* ice_ptp_init_port - Initialize PTP port structure
* @pf: Board private structure
* @ptp_port: PTP port structure
+ *
+ * Return: 0 on success, -ENODEV on invalid MAC type, -ENOMEM on failed alloc.
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
@@ -3104,16 +3163,14 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
- ptp_port->port_num);
- case ICE_PHY_E810:
- return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_init_tx(pf, &ptp_port->tx, ptp_port->port_num);
+ case ICE_MAC_GENERIC:
kthread_init_delayed_work(&ptp_port->ov_work,
ice_ptp_wait_for_offsets);
-
return ice_ptp_init_tx_e82x(pf, &ptp_port->tx,
ptp_port->port_num);
default:
@@ -3132,8 +3189,8 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (ice_get_phy_model(&pf->hw)) {
- case ICE_PHY_E82X:
+ switch (pf->hw.mac_type) {
+ case ICE_MAC_GENERIC:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
*/
@@ -3168,6 +3225,12 @@ void ice_ptp_init(struct ice_pf *pf)
ptp->state = ICE_PTP_INITIALIZING;
+ if (hw->lane_num < 0) {
+ err = hw->lane_num;
+ goto err_exit;
+ }
+ ptp->port.port_num = hw->lane_num;
+
ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3188,13 +3251,9 @@ void ice_ptp_init(struct ice_pf *pf)
if (err)
goto err_exit;
- ptp->port.port_num = hw->pf_id;
- if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
- ptp->port.port_num = hw->pf_id * 2;
-
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
- goto err_exit;
+ goto err_clean_pf;
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
@@ -3211,13 +3270,19 @@ void ice_ptp_init(struct ice_pf *pf)
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
+err_clean_pf:
+ mutex_destroy(&ptp->port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
err_exit:
/* If we registered a PTP clock, release it */
if (pf->ptp.clock) {
ptp_clock_unregister(ptp->clock);
pf->ptp.clock = NULL;
}
- ptp->state = ICE_PTP_ERROR;
+ /* Keep ICE_PTP_UNINIT state to avoid ambiguity at driver unload
+ * and to avoid duplicated resources release.
+ */
+ ptp->state = ICE_PTP_UNINIT;
dev_err(ice_pf_to_dev(pf), "PTP failed %d\n", err);
}
@@ -3230,9 +3295,19 @@ err_exit:
*/
void ice_ptp_release(struct ice_pf *pf)
{
- if (pf->ptp.state != ICE_PTP_READY)
+ if (pf->ptp.state == ICE_PTP_UNINIT)
return;
+ if (pf->ptp.state != ICE_PTP_READY) {
+ mutex_destroy(&pf->ptp.port.ps_lock);
+ ice_ptp_cleanup_pf(pf);
+ if (pf->ptp.clock) {
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+ }
+ return;
+ }
+
pf->ptp.state = ICE_PTP_UNINIT;
/* Disable timestamping for both Tx and Rx */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 824e73b677a4..27016aac4f1e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -128,8 +128,7 @@ struct ice_ptp_tx {
/* Quad and port information for initializing timestamp blocks */
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
-#define INDEX_PER_PORT_E810 64
-#define INDEX_PER_PORT_ETH56G 64
+#define INDEX_PER_PORT 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -203,14 +202,12 @@ enum ice_ptp_pin_nvm {
/* Pin definitions for PTP */
#define ICE_N_PINS_MAX 6
-#define ICE_SMA_PINS_NUM 4
-#define ICE_PIN_DESC_ARR_LEN(_arr) (sizeof(_arr) / \
- sizeof(struct ice_ptp_pin_desc))
/**
* struct ice_ptp_pin_desc - hardware pin description data
* @name_idx: index of the name of pin in ice_pin_names
* @gpio: the associated GPIO input and output pins
+ * @delay: input and output signal delays in nanoseconds
*
* Structure describing a PTP-capable GPIO pin that extends ptp_pin_desc array
* for the device. Device families have separate sets of available pins with
@@ -219,6 +216,7 @@ enum ice_ptp_pin_nvm {
struct ice_ptp_pin_desc {
int name_idx;
int gpio[2];
+ unsigned int delay[2];
};
/**
@@ -239,6 +237,7 @@ struct ice_ptp_pin_desc {
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
* @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_good: number of completed Tx timestamp requests
* @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
* @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
* @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
@@ -261,8 +260,9 @@ struct ice_ptp {
struct ptp_extts_request extts_rqs[GLTSYN_EVNT_H_IDX_MAX];
struct ptp_clock_info info;
struct ptp_clock *clock;
- struct hwtstamp_config tstamp_config;
+ struct kernel_hwtstamp_config tstamp_config;
u64 reset_time;
+ u64 tx_hwtstamp_good;
u32 tx_hwtstamp_skipped;
u32 tx_hwtstamp_timeouts;
u32 tx_hwtstamp_flushed;
@@ -293,8 +293,11 @@ struct ice_ptp {
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
-int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config);
+int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack);
void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
@@ -302,6 +305,9 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx);
void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx);
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf);
+irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf);
+u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts);
u64 ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx);
@@ -310,14 +316,18 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf,
enum ice_reset_req reset_type);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
-void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup);
+void ice_ptp_link_change(struct ice_pf *pf, bool linkup);
#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
-static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+
+static inline int ice_ptp_hwtstamp_get(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config)
{
return -EOPNOTSUPP;
}
-static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+static inline int ice_ptp_hwtstamp_set(struct net_device *netdev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
{
return -EOPNOTSUPP;
}
@@ -340,6 +350,17 @@ static inline bool ice_ptp_process_ts(struct ice_pf *pf)
return true;
}
+static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf)
+{
+ return IRQ_HANDLED;
+}
+
+static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf,
+ struct ptp_system_timestamp *sts)
+{
+ return 0;
+}
+
static inline u64
ice_ptp_get_rx_hwts(const union ice_32b_rx_flex_desc *rx_desc,
const struct ice_pkt_ctx *pkt_ctx)
@@ -358,7 +379,7 @@ static inline void ice_ptp_prepare_for_reset(struct ice_pf *pf,
}
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
-static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
+static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 585ce200c60f..19dddd9b53dd 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -10,70 +10,25 @@
/* Constants defined for the PTP 1588 clock hardware. */
const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
- /* ETH56G_PHY_REG_PTP */
- {
- /* base_addr */
- {
- 0x092000,
- 0x126000,
- 0x1BA000,
- 0x24E000,
- 0x2E2000,
- },
- /* step */
- 0x98,
+ [ETH56G_PHY_REG_PTP] = {
+ .base_addr = 0x092000,
+ .step = 0x98,
},
- /* ETH56G_PHY_MEM_PTP */
- {
- /* base_addr */
- {
- 0x093000,
- 0x127000,
- 0x1BB000,
- 0x24F000,
- 0x2E3000,
- },
- /* step */
- 0x200,
+ [ETH56G_PHY_MEM_PTP] = {
+ .base_addr = 0x093000,
+ .step = 0x200,
},
- /* ETH56G_PHY_REG_XPCS */
- {
- /* base_addr */
- {
- 0x000000,
- 0x009400,
- 0x128000,
- 0x1BC000,
- 0x250000,
- },
- /* step */
- 0x21000,
+ [ETH56G_PHY_REG_XPCS] = {
+ .base_addr = 0x000000,
+ .step = 0x21000,
},
- /* ETH56G_PHY_REG_MAC */
- {
- /* base_addr */
- {
- 0x085000,
- 0x119000,
- 0x1AD000,
- 0x241000,
- 0x2D5000,
- },
- /* step */
- 0x1000,
+ [ETH56G_PHY_REG_MAC] = {
+ .base_addr = 0x085000,
+ .step = 0x1000,
},
- /* ETH56G_PHY_REG_GPCS */
- {
- /* base_addr */
- {
- 0x084000,
- 0x118000,
- 0x1AC000,
- 0x240000,
- 0x2D4000,
- },
- /* step */
- 0x400,
+ [ETH56G_PHY_REG_GPCS] = {
+ .base_addr = 0x084000,
+ .step = 0x400,
},
};
@@ -131,7 +86,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
.rx_offset = {
.serdes = 0xffffeb27, /* -10.42424 */
.no_fec = 0xffffcccd, /* -25.6 */
- .fc = 0xfffe0014, /* -255.96 */
+ .fc = 0xfffc557b, /* -469.26 */
.sfd = 0x4a4, /* 2.32 */
.bs_ds = 0x32 /* 0.0969697 */
}
@@ -326,7 +281,7 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
/* struct ice_time_ref_info_e82x
*
- * E822 hardware can use different sources as the reference for the PTP
+ * E82X hardware can use different sources as the reference for the PTP
* hardware clock. Each clock has different characteristics such as a slightly
* different frequency, etc.
*
@@ -334,226 +289,53 @@ struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
* reference. See the struct ice_time_ref_info_e82x for information about the
* meaning of each constant.
*/
-const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ] = {
+ /* ICE_TSPLL_FREQ_25_000 -> 25 MHz */
{
/* pll_freq */
823437500, /* 823.4375 MHz PLL */
/* nominal_incval */
0x136e44fabULL,
- /* pps_delay */
- 11,
},
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ /* ICE_TSPLL_FREQ_122_880 -> 122.88 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ /* ICE_TSPLL_FREQ_125_000 -> 125 MHz */
{
/* pll_freq */
796875000, /* 796.875 MHz */
/* nominal_incval */
0x141414141ULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ /* ICE_TSPLL_FREQ_153_600 -> 153.6 MHz */
{
/* pll_freq */
816000000, /* 816 MHz */
/* nominal_incval */
0x139b9b9baULL,
- /* pps_delay */
- 12,
},
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ /* ICE_TSPLL_FREQ_156_250 -> 156.25 MHz */
{
/* pll_freq */
830078125, /* 830.78125 MHz */
/* nominal_incval */
0x134679aceULL,
- /* pps_delay */
- 11,
},
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ /* ICE_TSPLL_FREQ_245_760 -> 245.76 MHz */
{
/* pll_freq */
783360000, /* 783.36 MHz */
/* nominal_incval */
0x146cc2177ULL,
- /* pps_delay */
- 12,
- },
-};
-
-const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* refclk_pre_div */
- 1,
- /* feedback_div */
- 197,
- /* frac_n_div */
- 2621440,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* refclk_pre_div */
- 5,
- /* feedback_div */
- 159,
- /* frac_n_div */
- 1572864,
- /* post_pll_div */
- 6,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* refclk_pre_div */
- 10,
- /* feedback_div */
- 223,
- /* frac_n_div */
- 524288,
- /* post_pll_div */
- 7,
- },
-};
-
-const
-struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
- /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x19,
- /* tspll_ndivratio */
- 1,
- /* tspll_fbdiv_intgr */
- 320,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x29,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 195,
- /* tspll_fbdiv_frac */
- 1342177280UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x3E,
- /* tspll_ndivratio */
- 2,
- /* tspll_fbdiv_intgr */
- 128,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x33,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 156,
- /* tspll_fbdiv_frac */
- 1073741824UL,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x1F,
- /* tspll_ndivratio */
- 5,
- /* tspll_fbdiv_intgr */
- 256,
- /* tspll_fbdiv_frac */
- 0,
- /* ref1588_ck_div */
- 0,
- },
-
- /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
- {
- /* tspll_ck_refclkfreq */
- 0x52,
- /* tspll_ndivratio */
- 3,
- /* tspll_fbdiv_intgr */
- 97,
- /* tspll_fbdiv_frac */
- 2818572288UL,
- /* ref1588_ck_div */
- 0,
},
};
@@ -761,9 +543,9 @@ const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD] = {
/* rx_desk_rsgb_par */
644531250, /* 644.53125 MHz Reed Solomon gearbox */
/* tx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* rx_desk_rsgb_pcs */
- 644531250, /* 644.53125 MHz Reed Solomon gearbox */
+ 390625000, /* 390.625 MHz Reed Solomon gearbox */
/* tx_fixed_delay */
1620,
/* pmd_adj_divisor */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index dfd49732bd5b..35680dbe4a7f 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -6,7 +6,6 @@
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
-#include "ice_cgu_regs.h"
static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
DPLL_PIN_FREQUENCY_1PPS,
@@ -150,7 +149,7 @@ static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
* | 8 bit s | | 32 bits |
* +---------------+ +---------------+
*
- * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * The increment value is added to the GLTSYN_TIME_R and GLTSYN_TIME_L
* registers every clock source tick. Depending on the specific device
* configuration, the clock source frequency could be one of a number of
* values.
@@ -226,547 +225,6 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to read from CGU
- */
-static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_rd,
- .dest_dev = cgu,
- .msg_addr_low = addr
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return 0;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- *
- * Return: 0 on success, other error codes when failed to write to CGU
- */
-static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg = {
- .opcode = ice_sbq_msg_wr,
- .dest_dev = cgu,
- .msg_addr_low = addr,
- .data = val
- };
- int err;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Return: specified TIME_REF clock frequency converted to a string
- */
-static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
-{
- switch (clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Return: specified clock source converted to its string name
- */
-static const char *ice_clk_src_str(enum ice_clk_src clk_src)
-{
- switch (clk_src) {
- case ICE_CLK_SRC_TCXO:
- return "TCXO";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.ts_pll_enable) {
- dw24.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCXO)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- *
- * Return:
- * * %0 - success
- * * %-EINVAL - input parameters are incorrect
- * * %-EBUSY - failed to lock TS PLL
- * * %other - CGU read/write failure
- */
-static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
- enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_lock_e825c ro_lock;
- union nac_cgu_dword16_e825c dw16;
- union nac_cgu_dword23_e825c dw23;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCXO &&
- clk_freq != ICE_TIME_REF_FREQ_156_250) {
- dev_warn(ice_hw_to_dev(hw),
- "TCXO only supports 156.25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw23.ts_pll_enable) {
- dw23.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
- dw23.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.time_ref_freq_sel = clk_freq;
-
- /* Enable the correct receiver */
- if (clk_src == ICE_CLK_SRC_TCXO) {
- dw9.time_ref_en = 0;
- dw9.clk_eref0_en = 1;
- } else {
- dw9.time_ref_en = 1;
- dw9.clk_eref0_en = 0;
- }
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Choose the referenced frequency */
- dw16.tspll_ck_refclkfreq =
- e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.tspll_fbdiv_intgr =
- e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
- dw19.tspll_ndivratio =
- e825c_cgu_params[clk_freq].tspll_ndivratio;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- /* These two are constant for E825C */
- dw22.time1588clk_div = 5;
- dw22.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
- if (err)
- return err;
-
- dw23.ref1588_ck_div =
- e825c_cgu_params[clk_freq].ref1588_ck_div;
- dw23.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- dw24.tspll_fbdiv_frac =
- e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw23.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
- if (err)
- return err;
-
- if (!ro_lock.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw23.time_ref_sel),
- ice_clk_freq_str(dw9.time_ref_freq_sel),
- ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-#define ICE_ONE_PPS_OUT_AMP_MAX 3
-
-/**
- * ice_cgu_cfg_pps_out - Configure 1PPS output from CGU
- * @hw: pointer to the HW struct
- * @enable: true to enable 1PPS output, false to disable it
- *
- * Return: 0 on success, other negative error code when CGU read/write failed
- */
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable)
-{
- union nac_cgu_dword9 dw9;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- dw9.one_pps_out_en = enable;
- dw9.one_pps_out_amp = enable * ICE_ONE_PPS_OUT_AMP_MAX;
- return ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
-{
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.i_plllock_sel_0 = 0;
- cntr_bist.i_plllock_sel_1 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
-}
-
-/**
- * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
- * @hw: pointer to the HW struct
- *
- * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
- * losing TS PLL lock, but always show current state.
- *
- * Return: 0 on success, other error codes when failed to read/write CGU
- */
-static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
-{
- union tspll_bw_tdc_e825c bw_tdc;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
- if (err)
- return err;
-
- bw_tdc.i_plllock_sel_1_0 = 0;
-
- return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- *
- * Return: 0 on success, other error codes when failed to read/write/cfg CGU
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- int err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- if (ice_is_e825c(hw))
- err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
- else
- err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- if (ice_is_e825c(hw))
- err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- else
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
-
- return err;
-}
-
-/**
* ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
* @hw: pointer to HW struct
* @cmd: Timer command
@@ -827,8 +285,9 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
/* Certain hardware families share the same register values for the
* port register and source timer register.
*/
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
default:
break;
@@ -873,8 +332,12 @@ static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
*/
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -890,41 +353,78 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+ if (!ice_is_primary(hw))
+ hw = ice_get_primary_hw(pf);
+
guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
}
+/**
+ * ice_ptp_cfg_sync_delay - Configure PHC to PHY synchronization delay
+ * @hw: pointer to HW struct
+ * @delay: delay between PHC and PHY SYNC command execution in nanoseconds
+ */
+static void ice_ptp_cfg_sync_delay(const struct ice_hw *hw, u32 delay)
+{
+ wr32(hw, GLTSYN_SYNC_DLAY, delay);
+ ice_flush(hw);
+}
+
/* 56G PHY device functions
*
* The following functions operate on devices with the ETH 56G PHY.
*/
/**
+ * ice_ptp_get_dest_dev_e825 - get destination PHY for given port number
+ * @hw: pointer to the HW struct
+ * @port: destination port
+ *
+ * Return: destination sideband queue PHY device.
+ */
+static enum ice_sbq_dev_id ice_ptp_get_dest_dev_e825(struct ice_hw *hw,
+ u8 port)
+{
+ u8 curr_phy, tgt_phy;
+
+ tgt_phy = port >= hw->ptp.ports_per_phy;
+ curr_phy = hw->lane_num >= hw->ptp.ports_per_phy;
+ /* In the driver, lanes 4..7 are in fact 0..3 on a second PHY.
+ * On a single complex E825C, PHY 0 is always destination device phy_0
+ * and PHY 1 is phy_0_peer.
+ * On dual complex E825C, device phy_0 points to PHY on a current
+ * complex and phy_0_peer to PHY on a different complex.
+ */
+ if ((!ice_is_dual(hw) && tgt_phy == 1) ||
+ (ice_is_dual(hw) && tgt_phy != curr_phy))
+ return ice_sbq_dev_phy_0_peer;
+ else
+ return ice_sbq_dev_phy_0;
+}
+
+/**
* ice_write_phy_eth56g - Write a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to write to PHY
*/
-static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 val)
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_wr,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr),
+ .data = val
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_wr;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = val;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
-
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
@@ -935,41 +435,36 @@ static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
/**
* ice_read_phy_eth56g - Read a PHY port register
* @hw: pointer to the HW struct
- * @phy_idx: PHY index
+ * @port: destination port
* @addr: PHY register address
* @val: Value to write
*
* Return: 0 on success, other error codes when failed to read from PHY
*/
-static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
- u32 *val)
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 port, u32 addr, u32 *val)
{
- struct ice_sbq_msg_input phy_msg;
+ struct ice_sbq_msg_input msg = {
+ .dest_dev = ice_ptp_get_dest_dev_e825(hw, port),
+ .opcode = ice_sbq_msg_rd,
+ .msg_addr_low = lower_16_bits(addr),
+ .msg_addr_high = upper_16_bits(addr)
+ };
int err;
- phy_msg.opcode = ice_sbq_msg_rd;
-
- phy_msg.msg_addr_low = lower_16_bits(addr);
- phy_msg.msg_addr_high = upper_16_bits(addr);
-
- phy_msg.data = 0;
- phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
-
- err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
- if (err) {
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
+ if (err)
ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
err);
- return err;
- }
-
- *val = phy_msg.data;
+ else
+ *val = msg.data;
- return 0;
+ return err;
}
/**
* ice_phy_res_address_eth56g - Calculate a PHY port register address
- * @port: Port number to be written
+ * @hw: pointer to the HW struct
+ * @lane: Lane number to be written
* @res_type: resource type (register/memory)
* @offset: Offset from PHY port register base
* @addr: The result address
@@ -978,17 +473,19 @@ static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
* * %0 - success
* * %EINVAL - invalid port number or resource type
*/
-static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
- u32 offset, u32 *addr)
+static int ice_phy_res_address_eth56g(struct ice_hw *hw, u8 lane,
+ enum eth56g_res_type res_type,
+ u32 offset,
+ u32 *addr)
{
- u8 lane = port % ICE_PORTS_PER_QUAD;
- u8 phy = ICE_GET_QUAD_NUM(port);
-
if (res_type >= NUM_ETH56G_PHY_RES)
return -EINVAL;
- *addr = eth56g_phy_res[res_type].base[phy] +
+ /* Lanes 4..7 are in fact 0..3 on a second PHY */
+ lane %= hw->ptp.ports_per_phy;
+ *addr = eth56g_phy_res[res_type].base_addr +
lane * eth56g_phy_res[res_type].step + offset;
+
return 0;
}
@@ -1008,19 +505,17 @@ static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_write_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1039,19 +534,17 @@ static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
u32 *val, enum eth56g_res_type res_type)
{
- u8 phy_port = port % hw->ptp.ports_per_phy;
- u8 phy_idx = port / hw->ptp.ports_per_phy;
u32 addr;
int err;
if (port >= hw->ptp.num_lports)
return -EINVAL;
- err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ err = ice_phy_res_address_eth56g(hw, port, res_type, offset, &addr);
if (err)
return err;
- return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+ return ice_read_phy_eth56g(hw, port, addr, val);
}
/**
@@ -1201,6 +694,56 @@ static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
}
/**
+ * ice_write_quad_ptp_reg_eth56g - Write a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_write_phy_eth56g(hw, port, addr, val);
+}
+
+/**
+ * ice_read_quad_ptp_reg_eth56g - Read a PHY quad register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to read
+ *
+ * Return:
+ * * %0 - success
+ * * %EIO - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_quad_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u32 offset, u32 *val)
+{
+ u32 addr;
+
+ if (port >= hw->ptp.num_lports)
+ return -EIO;
+
+ addr = eth56g_phy_res[ETH56G_PHY_REG_PTP].base_addr + offset;
+
+ return ice_read_phy_eth56g(hw, port, addr, val);
+}
+
+/**
* ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
* @low_addr: the low address to check
* @high_addr: on return, contains the high address of the 64bit register
@@ -1518,8 +1061,8 @@ static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
-
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -1918,7 +1461,6 @@ ice_phy_get_speed_eth56g(struct ice_link_status *li)
*/
static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 val;
int err;
@@ -1933,8 +1475,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
case ICE_ETH56G_LNK_SPD_1G:
case ICE_ETH56G_LNK_SPD_2_5G:
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, &val);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, &val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1945,8 +1487,8 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
ICE_ETH56G_NOMINAL_TX_THRESH);
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_GPCS_CONFIG_REG0, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port,
+ PHY_GPCS_CONFIG_REG0, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
err);
@@ -1987,50 +1529,47 @@ static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
*/
int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
{
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
- u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr, val, peer_delay;
bool enable, sfd_ena;
- u32 val, peer_delay;
int err;
enable = hw->ptp.phy.eth56g.onestep_ena;
peer_delay = hw->ptp.phy.eth56g.peer_delay;
sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
- /* PHY_PTP_1STEP_CONFIG */
- err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ addr = PHY_PTP_1STEP_CONFIG;
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
if (enable)
- val |= blk_port;
+ val |= BIT(quad_lane);
else
- val &= ~blk_port;
+ val &= ~BIT(quad_lane);
val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
- err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_PTP_1STEP_PEER_DELAY */
+ addr = PHY_PTP_1STEP_PEER_DELAY(quad_lane);
val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
if (peer_delay)
val |= PHY_PTP_1STEP_PD_ADD_PD_M;
val |= PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
- err = ice_write_ptp_reg_eth56g(hw, port_blk,
- PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ err = ice_write_quad_ptp_reg_eth56g(hw, port, addr, val);
if (err)
return err;
- /* PHY_MAC_XIF_MODE */
- err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ addr = PHY_MAC_XIF_MODE;
+ err = ice_read_mac_reg_eth56g(hw, port, addr, &val);
if (err)
return err;
@@ -2050,7 +1589,7 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
- return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+ return ice_write_mac_reg_eth56g(hw, port, addr, val);
}
/**
@@ -2092,21 +1631,22 @@ static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
bool fc, bool rs,
enum ice_eth56g_link_spd spd)
{
- u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
- u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
u32 bitslip;
int err;
if (!bs || rs)
return 0;
- if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) {
err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
&bitslip);
- else
- err = ice_read_ptp_reg_eth56g(hw, port_blk,
- PHY_REG_SD_BIT_SLIP(port_offset),
- &bitslip);
+ } else {
+ u8 quad_lane = port % ICE_PORTS_PER_QUAD;
+ u32 addr;
+
+ addr = PHY_REG_SD_BIT_SLIP(quad_lane);
+ err = ice_read_quad_ptp_reg_eth56g(hw, port, addr, &bitslip);
+ }
if (err)
return 0;
@@ -2350,6 +1890,7 @@ int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
u64 *phy_time, u64 *phc_time)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u64 tx_time, rx_time;
u32 zo, lo;
u8 tmr_idx;
@@ -2369,8 +1910,13 @@ static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
- zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
- lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ if (ice_is_primary(hw)) {
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ } else {
+ zo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_SHTIME_L(tmr_idx));
+ }
*phc_time = (u64)lo << 32 | zo;
/* Read the captured PHY time from the PHY shadow registers */
@@ -2507,6 +2053,7 @@ int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
*/
int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
u32 lo, hi;
u64 incval;
u8 tmr_idx;
@@ -2532,8 +2079,13 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
if (err)
return err;
- lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
- hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ if (ice_is_primary(hw)) {
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ } else {
+ lo = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(ice_get_primary_hw(pf), GLTSYN_INCVAL_H(tmr_idx));
+ }
incval = (u64)hi << 32 | lo;
err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
@@ -2564,42 +2116,6 @@ int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
}
/**
- * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
- * @hw: pointer to HW struct
- * @enable: Enable or disable access
- *
- * Enable sideband devices (PHY and others) access.
- */
-static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
-{
- u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
-
- if (enable)
- val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
- else
- val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
-
- wr32(hw, PF_SB_REM_DEV_CTL, val);
-}
-
-/**
- * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform PHC initialization steps specific to E82X devices.
- *
- * Return:
- * * %0 - success
- * * %other - failed to initialize CGU
- */
-static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
-{
- ice_sb_access_ena_eth56g(hw, true);
- /* Initialize the Clock Generation Unit */
- return ice_init_cgu_e82x(hw);
-}
-
-/**
* ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
* @hw: pointer to the HW struct
* @ts_status: the timestamp mask pointer
@@ -2666,59 +2182,21 @@ static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
}
/**
- * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
- * @hw: pointer to the HW struct
- *
- * Return: true if it's 2x50 breakout topology, false otherwise
- */
-static bool ice_is_muxed_topo(struct ice_hw *hw)
-{
- u8 link_topo;
- bool mux;
- u32 val;
-
- val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
- mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
- val = rd32(hw, GLGEN_MAC_LINK_TOPO);
- link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
-
- return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
-}
-
-/**
- * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * ice_ptp_init_phy_e825 - initialize PHY parameters
* @hw: pointer to the HW struct
*/
-static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+static void ice_ptp_init_phy_e825(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
struct ice_eth56g_params *params;
- u8 phy;
- ptp->phy_model = ICE_PHY_ETH56G;
params = &ptp->phy.eth56g;
params->onestep_ena = false;
params->peer_delay = 0;
params->sfd_ena = false;
- params->phy_addr[0] = eth56g_phy_0;
- params->phy_addr[1] = eth56g_phy_1;
params->num_phys = 2;
ptp->ports_per_phy = 4;
ptp->num_lports = params->num_phys * ptp->ports_per_phy;
-
- ice_sb_access_ena_eth56g(hw, true);
- for (phy = 0; phy < params->num_phys; phy++) {
- u32 phy_rev;
- int err;
-
- err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
- if (err || phy_rev != PHY_REVISION_ETH56G) {
- ptp->phy_model = ICE_PHY_UNSUP;
- return;
- }
- }
-
- ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
}
/* E822 family functions
@@ -2737,10 +2215,9 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
struct ice_sbq_msg_input *msg, u8 port,
u16 offset)
{
- int phy_port, phy, quadtype;
+ int phy_port, quadtype;
phy_port = port % hw->ptp.ports_per_phy;
- phy = port / hw->ptp.ports_per_phy;
quadtype = ICE_GET_QUAD_NUM(port) %
ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
@@ -2752,12 +2229,7 @@ static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
msg->msg_addr_high = P_Q1_H(P_4_BASE + offset, phy_port);
}
- if (phy == 0)
- msg->dest_dev = rmn_0;
- else if (phy == 1)
- msg->dest_dev = rmn_1;
- else
- msg->dest_dev = rmn_2;
+ msg->dest_dev = ice_sbq_dev_phy_0;
}
/**
@@ -2876,7 +2348,7 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2954,7 +2426,7 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3080,7 +2552,7 @@ static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
- msg->dest_dev = rmn_0;
+ msg->dest_dev = ice_sbq_dev_phy_0;
if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
@@ -3115,7 +2587,7 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3150,7 +2622,7 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -3199,7 +2671,8 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
+ *tstamp = FIELD_PREP(PHY_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_40B_LOW_M, lo);
return 0;
}
@@ -3301,7 +2774,6 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
*/
static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
- int err;
u32 val;
/* Enable reading switch and PHY registers over the sideband queue */
@@ -3311,11 +2783,6 @@ static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
wr32(hw, PF_SB_REM_DEV_CTL, val);
- /* Initialize the Clock Generation Unit */
- err = ice_init_cgu_e82x(hw);
- if (err)
- return err;
-
/* Set window length for all the ports */
return ice_ptp_set_vernier_wl(hw);
}
@@ -4772,7 +4239,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
*/
static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E82X;
ptp->num_lports = 8;
ptp->ports_per_phy = 8;
}
@@ -4799,9 +4265,9 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_rd;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -4829,10 +4295,10 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.msg_addr_low = lower_16_bits(addr);
msg.msg_addr_high = upper_16_bits(addr);
msg.opcode = ice_sbq_msg_wr;
- msg.dest_dev = rmn_0;
+ msg.dest_dev = ice_sbq_dev_phy_0;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
+ err = ice_sbq_rw_reg(hw, &msg, LIBIE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -4856,33 +4322,46 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
{
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ unsigned long flags;
u32 val;
- u8 i;
+ int err;
+
+ spin_lock_irqsave(&params->atqbal_wq.lock, flags);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
/* Write TS index to read to the PF register so the FW can read it */
- val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
- wr32(hw, PF_SB_ATQBAL, val);
+ val = FIELD_PREP(REG_LL_PROXY_H_TS_IDX, idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
/* Read the register repeatedly until the FW provides us the TS */
- for (i = TS_LL_READ_RETRIES; i > 0; i--) {
- val = rd32(hw, PF_SB_ATQBAL);
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val), 10,
+ REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
+ return err;
+ }
- /* When the bit is cleared, the TS is ready in the register */
- if (!(FIELD_GET(TS_LL_READ_TS, val))) {
- /* High 8 bit value of the TS is on the bits 16:23 */
- *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(REG_LL_PROXY_H_TS_HIGH, val);
- /* Read the low 32 bit value and set the TS valid bit */
- *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
- return 0;
- }
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, REG_LL_PROXY_L) | TS_VALID;
- udelay(10);
- }
+ spin_unlock_irqrestore(&params->atqbal_wq.lock, flags);
- /* FW failed to provide the TS in time */
- ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
- return -EINVAL;
+ return 0;
}
/**
@@ -4953,7 +4432,8 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
*/
- *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
return 0;
}
@@ -5016,8 +4496,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
u8 tmr_idx;
int err;
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
@@ -5065,6 +4544,55 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
}
/**
+ * ice_ptp_prep_phy_adj_ll_e810 - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Use the low latency firmware interface to program PHY time adjustment to
+ * all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_adj_ll_e810(struct ice_hw *hw, s32 adj)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, adj);
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_ADJ) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer adjustment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
* @hw: pointer to HW struct
* @adj: adjustment value to program
@@ -5082,6 +4610,9 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_adj_ll_e810(hw, adj);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Adjustments are represented as signed 2's complement values in
@@ -5105,6 +4636,56 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
}
/**
+ * ice_ptp_prep_phy_incval_ll_e810 - Prep PHY ports increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Use the low latency firmware interface to program PHY time increment value
+ * for all PHY ports.
+ *
+ * Return: 0 on success, -EBUSY on timeout
+ */
+static int ice_ptp_prep_phy_incval_ll_e810(struct ice_hw *hw, u64 incval)
+{
+ const u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ struct ice_e810_params *params = &hw->ptp.phy.e810;
+ u32 val;
+ int err;
+
+ spin_lock_irq(&params->atqbal_wq.lock);
+
+ /* Wait for any pending in-progress low latency interrupt */
+ err = wait_event_interruptible_locked_irq(params->atqbal_wq,
+ !(params->atqbal_flags &
+ ATQBAL_FLAGS_INTR_IN_PROGRESS));
+ if (err) {
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ wr32(hw, REG_LL_PROXY_L, lower_32_bits(incval));
+ val = FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_CMD_M, REG_LL_PROXY_H_PHY_TMR_CMD_FREQ) |
+ FIELD_PREP(REG_LL_PROXY_H_TS_HIGH, (u8)upper_32_bits(incval)) |
+ FIELD_PREP(REG_LL_PROXY_H_PHY_TMR_IDX_M, tmr_idx) | REG_LL_PROXY_H_EXEC;
+ wr32(hw, REG_LL_PROXY_H, val);
+
+ /* Read the register repeatedly until the FW indicates completion */
+ err = read_poll_timeout_atomic(rd32, val,
+ !FIELD_GET(REG_LL_PROXY_H_EXEC, val),
+ 10, REG_LL_PROXY_H_TIMEOUT_US, false, hw,
+ REG_LL_PROXY_H);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY timer increment using low latency interface\n");
+ spin_unlock_irq(&params->atqbal_wq.lock);
+ return err;
+ }
+
+ spin_unlock_irq(&params->atqbal_wq.lock);
+
+ return 0;
+}
+
+/**
* ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
* @hw: pointer to HW struct
* @incval: The new 40bit increment value to prepare
@@ -5119,6 +4700,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
u8 tmr_idx;
int err;
+ if (hw->dev_caps.ts_dev_info.ll_phy_tmr_update)
+ return ice_ptp_prep_phy_incval_ll_e810(hw, incval);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
low = lower_32_bits(incval);
high = upper_32_bits(incval);
@@ -5178,68 +4762,6 @@ ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
*/
/**
- * ice_get_pca9575_handle
- * @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
- */
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
-{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
- int status;
- u8 idx;
-
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
- return 0;
- }
-
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
-
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
-
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
-
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
- else
- return -EOPNOTSUPP;
-
- cmd->addr.topo_params.index = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
-
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
-
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
-
- return 0;
-}
-
-/**
* ice_read_sma_ctrl
* @hw: pointer to the hw struct
* @data: pointer to data to be read from the GPIO controller
@@ -5304,37 +4826,6 @@ int ice_write_sma_ctrl(struct ice_hw *hw, u8 data)
}
/**
- * ice_read_pca9575_reg
- * @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
- *
- * Read the register from the GPIO controller
- */
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data)
-{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
-
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
-
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
-}
-
-/**
* ice_ptp_read_sdp_ac - read SDP available connections section from NVM
* @hw: pointer to the HW struct
* @entries: returns the SDP available connections section from NVM
@@ -5400,16 +4891,138 @@ exit:
*/
static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
{
- ptp->phy_model = ICE_PHY_E810;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+
+ init_waitqueue_head(&ptp->phy.e810.atqbal_wq);
+}
+
+/* E830 functions
+ *
+ * The following functions operate on the E830 series devices.
+ *
+ */
+
+/**
+ * ice_ptp_init_phc_e830 - Perform E830 specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform E830-specific PTP hardware clock initialization steps.
+ */
+static void ice_ptp_init_phc_e830(const struct ice_hw *hw)
+{
+ ice_ptp_cfg_sync_delay(hw, ICE_E810_E830_SYNC_DELAY);
+}
+
+/**
+ * ice_ptp_write_direct_incval_e830 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHC
+ * GLTSYN_INCVAL_L and GLTSYN_INCVAL_H registers. The actual change is
+ * completed by FW automatically.
+ */
+static void ice_ptp_write_direct_incval_e830(const struct ice_hw *hw,
+ u64 incval)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_INCVAL_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_INCVAL_H(tmr_idx), upper_32_bits(incval));
+}
+
+/**
+ * ice_ptp_write_direct_phc_time_e830 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an ICE_PTP_INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static void ice_ptp_write_direct_phc_time_e830(const struct ice_hw *hw,
+ u64 time)
+{
+ u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ wr32(hw, GLTSYN_TIME_0(tmr_idx), 0);
+ wr32(hw, GLTSYN_TIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_TIME_H(tmr_idx), upper_32_bits(time));
+}
+
+/**
+ * ice_ptp_port_cmd_e830 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ *
+ * Return: 0 on success, negative error code when PHY write failed
+ */
+static int ice_ptp_port_cmd_e830(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+
+ return ice_write_phy_reg_e810(hw, E830_ETH_GLTSYN_CMD, val);
+}
+
+/**
+ * ice_read_phy_tstamp_e830 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E830 device.
+ */
+static void ice_read_phy_tstamp_e830(const struct ice_hw *hw, u8 idx,
+ u64 *tstamp)
+{
+ u32 hi, lo;
+
+ hi = rd32(hw, E830_PRTTSYN_TXTIME_H(idx));
+ lo = rd32(hw, E830_PRTTSYN_TXTIME_L(idx));
+
+ /* For E830 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = FIELD_PREP(PHY_EXT_40B_HIGH_M, hi) |
+ FIELD_PREP(PHY_EXT_40B_LOW_M, lo);
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_e830 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ */
+static void ice_get_phy_tx_tstamp_ready_e830(const struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ *tstamp_ready = rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_H);
+ *tstamp_ready <<= 32;
+ *tstamp_ready |= rd32(hw, E830_PRTMAC_TS_TX_MEM_VALID_L);
+}
+
+/**
+ * ice_ptp_init_phy_e830 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e830(struct ice_ptp_hw *ptp)
+{
ptp->num_lports = 8;
ptp->ports_per_phy = 4;
}
/* Device agnostic functions
*
- * The following functions implement shared behavior common to both E822 and
- * E810 devices, possibly calling a device specific implementation where
- * necessary.
+ * The following functions implement shared behavior common to all devices,
+ * possibly calling a device specific implementation where necessary.
*/
/**
@@ -5472,14 +5085,22 @@ void ice_ptp_init_hw(struct ice_hw *hw)
{
struct ice_ptp_hw *ptp = &hw->ptp;
- if (ice_is_e822(hw) || ice_is_e823(hw))
- ice_ptp_init_phy_e82x(ptp);
- else if (ice_is_e810(hw))
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
ice_ptp_init_phy_e810(ptp);
- else if (ice_is_e825c(hw))
- ice_ptp_init_phy_e825c(hw);
- else
- ptp->phy_model = ICE_PHY_UNSUP;
+ break;
+ case ICE_MAC_E830:
+ ice_ptp_init_phy_e830(ptp);
+ break;
+ case ICE_MAC_GENERIC:
+ ice_ptp_init_phy_e82x(ptp);
+ break;
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_init_phy_e825(hw);
+ break;
+ default:
+ return;
+ }
}
/**
@@ -5500,11 +5121,11 @@ void ice_ptp_init_hw(struct ice_hw *hw)
static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
default:
return -EOPNOTSUPP;
}
@@ -5565,9 +5186,11 @@ static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
u32 port;
/* PHY models which can program all ports simultaneously */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_port_cmd_e810(hw, cmd);
+ case ICE_MAC_E830:
+ return ice_ptp_port_cmd_e830(hw, cmd);
default:
break;
}
@@ -5638,23 +5261,29 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Source timers */
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_phc_time_e830(hw, time);
+ return 0;
+ }
+
wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_time_eth56g(hw,
- (u32)(time & 0xFFFFFFFF));
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_time_e82x(hw, time & 0xFFFFFFFF);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5686,20 +5315,26 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ /* For E830 we don't need to use shadow registers, its automatic */
+ if (hw->mac_type == ICE_MAC_E830) {
+ ice_ptp_write_direct_incval_e830(hw, incval);
+ return 0;
+ }
+
/* Shadow Adjust */
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_incval_e82x(hw, incval);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5759,16 +5394,19 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
- break;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ /* E830 sync PHYs automatically after setting GLTSYN_SHADJ */
+ return 0;
+ case ICE_MAC_GENERIC:
err = ice_ptp_prep_phy_adj_e82x(hw, adj);
break;
+ case ICE_MAC_GENERIC_3K_E825:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
default:
err = -EOPNOTSUPP;
}
@@ -5792,13 +5430,16 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_read_phy_tstamp_e830(hw, idx, tstamp);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_read_phy_tstamp_e82x(hw, block, idx, tstamp);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
default:
return -EOPNOTSUPP;
}
@@ -5822,13 +5463,13 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_clear_phy_tstamp_e82x(hw, block, idx);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
default:
return -EOPNOTSUPP;
}
@@ -5885,14 +5526,14 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- ice_ptp_reset_ts_memory_eth56g(hw);
- break;
- case ICE_PHY_E82X:
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
ice_ptp_reset_ts_memory_e82x(hw);
break;
- case ICE_PHY_E810:
+ case ICE_MAC_GENERIC_3K_E825:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
+ case ICE_MAC_E810:
default:
return;
}
@@ -5914,13 +5555,16 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_ptp_init_phc_eth56g(hw);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_ptp_init_phc_e810(hw);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_ptp_init_phc_e830(hw);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_ptp_init_phc_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return 0;
default:
return -EOPNOTSUPP;
}
@@ -5939,17 +5583,19 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (ice_get_phy_model(hw)) {
- case ICE_PHY_ETH56G:
- return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
- tstamp_ready);
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
- case ICE_PHY_E82X:
+ case ICE_MAC_E830:
+ ice_get_phy_tx_tstamp_ready_e830(hw, block, tstamp_ready);
+ return 0;
+ case ICE_MAC_GENERIC:
return ice_get_phy_tx_tstamp_ready_e82x(hw, block,
tstamp_ready);
- break;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 47af7c5c79b8..5896b346e579 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -65,14 +65,14 @@ enum ice_eth56g_link_spd {
/**
* struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
- * @base: base address for each PHY block
+ * @base_addr: base address for each PHY block
* @step: step between PHY lanes
*
* Characteristic information for the various PHY register parameters in the
* ETH56G devices
*/
struct ice_phy_reg_info_eth56g {
- u32 base[NUM_ETH56G_PHY_RES];
+ u32 base_addr;
u32 step;
};
@@ -80,7 +80,6 @@ struct ice_phy_reg_info_eth56g {
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
* @nominal_incval: increment to generate nanoseconds in GLTSYN_TIME_L
- * @pps_delay: propagation delay of the PPS output signal
*
* Characteristic information for the various TIME_REF sources possible in the
* E822 devices
@@ -88,7 +87,6 @@ struct ice_phy_reg_info_eth56g {
struct ice_time_ref_info_e82x {
u64 pll_freq;
u64 nominal_incval;
- u8 pps_delay;
};
/**
@@ -196,23 +194,6 @@ struct ice_eth56g_mac_reg_cfg {
extern
const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
-/**
- * struct ice_cgu_pll_params_e82x - E82X CGU parameters
- * @refclk_pre_div: Reference clock pre-divisor
- * @feedback_div: Feedback divisor
- * @frac_n_div: Fractional divisor
- * @post_pll_div: Post PLL divisor
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF frequency.
- */
-struct ice_cgu_pll_params_e82x {
- u32 refclk_pre_div;
- u32 feedback_div;
- u32 frac_n_div;
- u32 post_pll_div;
-};
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
enum ice_e810_c827_idx {
@@ -284,31 +265,6 @@ struct ice_cgu_pin_desc {
struct dpll_pin_frequency *freq_supp;
};
-extern const struct
-ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
-/**
- * struct ice_cgu_pll_params_e825c - E825C CGU parameters
- * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection
- * @tspll_ndivratio: ndiv ratio that goes directly to the pll
- * @tspll_fbdiv_intgr: TS PLL integer feedback divide
- * @tspll_fbdiv_frac: TS PLL fractional feedback divide
- * @ref1588_ck_div: clock divider for tspll ref
- *
- * Clock Generation Unit parameters used to program the PLL based on the
- * selected TIME_REF/TCXO frequency.
- */
-struct ice_cgu_pll_params_e825c {
- u32 tspll_ck_refclkfreq;
- u32 tspll_ndivratio;
- u32 tspll_fbdiv_intgr;
- u32 tspll_fbdiv_frac;
- u32 ref1588_ck_div;
-};
-
-extern const struct
-ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
-
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
@@ -316,7 +272,7 @@ ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
/* Table of constants related to possible TIME_REF sources */
-extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TIME_REF_FREQ];
+extern const struct ice_time_ref_info_e82x e82x_time_ref[NUM_ICE_TSPLL_FREQ];
/* Table of constants for Vernier calibration on E822 */
extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
@@ -326,12 +282,10 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
*/
#define ICE_E810_PLL_FREQ 812500000
#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
-#define ICE_E810_OUT_PROP_DELAY_NS 1
-#define ICE_E825C_OUT_PROP_DELAY_NS 11
+#define ICE_E810_E830_SYNC_DELAY 0
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
-int ice_cgu_cfg_pps_out(struct ice_hw *hw, bool enable);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
@@ -360,7 +314,8 @@ void ice_ptp_reset_ts_memory_quad_e82x(struct ice_hw *hw, u8 quad);
*
* Returns the current TIME_REF from the capabilities structure.
*/
-static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
+
+static inline enum ice_tspll_freq ice_e82x_time_ref(const struct ice_hw *hw)
{
return hw->func_caps.ts_func_info.time_ref;
}
@@ -374,26 +329,21 @@ static inline enum ice_time_ref_freq ice_e82x_time_ref(const struct ice_hw *hw)
* change, such as an update to the CGU registers.
*/
static inline void
-ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_time_ref_freq time_ref)
+ice_set_e82x_time_ref(struct ice_hw *hw, enum ice_tspll_freq time_ref)
{
hw->func_caps.ts_func_info.time_ref = time_ref;
}
-static inline u64 ice_e82x_pll_freq(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_pll_freq(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].pll_freq;
}
-static inline u64 ice_e82x_nominal_incval(enum ice_time_ref_freq time_ref)
+static inline u64 ice_e82x_nominal_incval(enum ice_tspll_freq time_ref)
{
return e82x_time_ref[time_ref].nominal_incval;
}
-static inline u64 ice_e82x_pps_delay(enum ice_time_ref_freq time_ref)
-{
- return e82x_time_ref[time_ref].pps_delay;
-}
-
/* E822 Vernier calibration functions */
int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
@@ -404,7 +354,6 @@ int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
int ice_read_sma_ctrl(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl(struct ice_hw *hw, u8 data);
-int ice_read_pca9575_reg(struct ice_hw *hw, u8 offset, u8 *data);
int ice_ptp_read_sdp_ac(struct ice_hw *hw, __le16 *entries, uint *num_entries);
int ice_cgu_get_num_pins(struct ice_hw *hw, bool input);
enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
@@ -432,20 +381,6 @@ int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
-static inline u64 ice_prop_delay(const struct ice_hw *hw)
-{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_E825C_OUT_PROP_DELAY_NS;
- case ICE_PHY_E810:
- return ICE_E810_OUT_PROP_DELAY_NS;
- case ICE_PHY_E82X:
- return ice_e82x_pps_delay(ice_e82x_time_ref(hw));
- default:
- return 0;
- }
-}
-
/**
* ice_get_base_incval - Get base clock increment value
* @hw: pointer to the HW struct
@@ -454,23 +389,19 @@ static inline u64 ice_prop_delay(const struct ice_hw *hw)
*/
static inline u64 ice_get_base_incval(struct ice_hw *hw)
{
- switch (hw->ptp.phy_model) {
- case ICE_PHY_ETH56G:
- return ICE_ETH56G_NOMINAL_INCVAL;
- case ICE_PHY_E810:
+ switch (hw->mac_type) {
+ case ICE_MAC_E810:
+ case ICE_MAC_E830:
return ICE_PTP_NOMINAL_INCVAL_E810;
- case ICE_PHY_E82X:
+ case ICE_MAC_GENERIC:
return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_ETH56G_NOMINAL_INCVAL;
default:
return 0;
}
}
-static inline bool ice_is_dual(struct ice_hw *hw)
-{
- return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
-}
-
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -673,28 +604,41 @@ static inline bool ice_is_dual(struct ice_hw *hw)
/* E810 timer command register */
#define E810_ETH_GLTSYN_CMD 0x03000344
+/* E830 timer command register */
+#define E830_ETH_GLTSYN_CMD 0x00088814
+
+/* E810 PHC time register */
+#define E830_GLTSYN_TIME_L(_tmr_idx) (0x0008A000 + 0x1000 * (_tmr_idx))
+
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
-/* Timestamp block macros */
+/* PHY 40b registers macros */
+#define PHY_EXT_40B_LOW_M GENMASK(31, 0)
+#define PHY_EXT_40B_HIGH_M GENMASK_ULL(39, 32)
+#define PHY_40B_LOW_M GENMASK(7, 0)
+#define PHY_40B_HIGH_M GENMASK_ULL(39, 8)
#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
-#define TS_PHY_LOW_M 0xFF
-#define TS_PHY_HIGH_M 0xFFFFFFFF
-#define TS_PHY_HIGH_S 8
-
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
/* Tx timestamp low latency read definitions */
-#define TS_LL_READ_RETRIES 200
-#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
-#define TS_LL_READ_TS_IDX GENMASK(29, 24)
-#define TS_LL_READ_TS_INTR BIT(30)
-#define TS_LL_READ_TS BIT(31)
+#define REG_LL_PROXY_H_TIMEOUT_US 2000
+#define REG_LL_PROXY_H_PHY_TMR_CMD_M GENMASK(7, 6)
+#define REG_LL_PROXY_H_PHY_TMR_CMD_ADJ 0x1
+#define REG_LL_PROXY_H_PHY_TMR_CMD_FREQ 0x2
+#define REG_LL_PROXY_H_TS_HIGH GENMASK(23, 16)
+#define REG_LL_PROXY_H_PHY_TMR_IDX_M BIT(24)
+#define REG_LL_PROXY_H_TS_IDX GENMASK(29, 24)
+#define REG_LL_PROXY_H_TS_INTR_ENA BIT(30)
+#define REG_LL_PROXY_H_EXEC BIT(31)
+
+#define REG_LL_PROXY_L PF_SB_ATQBAH
+#define REG_LL_PROXY_H PF_SB_ATQBAL
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
@@ -718,6 +662,7 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define ICE_SMA1_MASK (ICE_SMA1_DIR_EN | ICE_SMA1_TX_EN)
#define ICE_SMA2_MASK (ICE_SMA2_UFL2_RX_DIS | ICE_SMA2_DIR_EN | \
ICE_SMA2_TX_EN)
+#define ICE_SMA2_INACTIVE_MASK (ICE_SMA2_DIR_EN | ICE_SMA2_TX_EN)
#define ICE_ALL_SMA_MASK (ICE_SMA1_MASK | ICE_SMA2_MASK)
#define ICE_SMA_MIN_BIT 3
@@ -789,36 +734,19 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
-/* GPCS config register */
-#define PHY_GPCS_CONFIG_REG0 0x268
-#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
-#define PHY_GPCS_BITSLIP 0x5C
-
#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
-/* 1-step PTP config */
-#define PHY_PTP_1STEP_CONFIG 0x270
-#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
-#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
-#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
-#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
-#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
-#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
-
/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
-#define PHY_REG_REVISION 0x85000
-
#define PHY_REG_DESKEW_0 0x94
#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
-#define PHY_REG_GPCS_BITSLIP 0x5C
#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
#define PHY_REVISION_ETH56G 0x10200
#define PHY_VENDOR_TXLANE_THRESH 0x2000C
@@ -838,7 +766,21 @@ static inline bool ice_is_dual(struct ice_hw *hw)
#define PHY_MAC_BLOCKTIME 0x50
#define PHY_MAC_MARKERTIME 0x54
#define PHY_MAC_TX_OFFSET 0x58
+#define PHY_GPCS_BITSLIP 0x5C
#define PHY_PTP_INT_STATUS 0x7FD140
+/* ETH56G registers shared per quad */
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M GENMASK(27, 24)
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M GENMASK(7, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M GENMASK(11, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_quad_lane) (0x274 + 4 * (_quad_lane))
+#define PHY_PTP_1STEP_PD_ADD_PD_M BIT(0)
+#define PHY_PTP_1STEP_PD_DELAY_M GENMASK(30, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M BIT(31)
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 970a99a52bf1..cb08746556a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -4,7 +4,7 @@
#include "ice.h"
#include "ice_eswitch.h"
#include "devlink/devlink.h"
-#include "devlink/devlink_port.h"
+#include "devlink/port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
@@ -219,7 +219,8 @@ ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
{
switch (flower->command) {
case FLOW_CLS_REPLACE:
- return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower,
+ true);
case FLOW_CLS_DESTROY:
return ice_del_cls_flower(repr->src_vsi, flower);
default:
@@ -336,6 +337,7 @@ void ice_repr_destroy(struct ice_repr *repr)
static void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -417,6 +419,10 @@ static int ice_repr_add_vf(struct ice_repr *repr)
if (err)
goto err_netdev;
+ err = ice_drop_vf_tx_lldp(repr->src_vsi, true);
+ if (err)
+ goto err_drop_lldp;
+
err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
if (err)
goto err_cfg_vsi;
@@ -429,6 +435,8 @@ static int ice_repr_add_vf(struct ice_repr *repr)
return 0;
err_cfg_vsi:
+ ice_pass_vf_tx_lldp(repr->src_vsi, true);
+err_drop_lldp:
unregister_netdev(repr->netdev);
err_netdev:
ice_devlink_destroy_vf_port(vf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index 3b0054faf70c..21bb861febbf 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -46,13 +46,11 @@ struct ice_sbq_evt_desc {
u8 data[24];
};
-enum ice_sbq_msg_dev {
- eth56g_phy_0 = 0x02,
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06,
- eth56g_phy_1 = 0x0D,
+enum ice_sbq_dev_id {
+ ice_sbq_dev_phy_0 = 0x02,
+ ice_sbq_dev_cgu = 0x06,
+ ice_sbq_dev_phy_0_peer = 0x0D,
+ ice_sbq_dev_cgu_peer = 0x0F,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 6ca13c5dcb14..fff0c1afdb41 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -85,6 +85,27 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
}
/**
+ * ice_sched_find_next_vsi_node - find the next node for a given VSI
+ * @vsi_node: VSI support node to start search with
+ *
+ * Return: Next VSI support node, or NULL.
+ *
+ * The function returns a pointer to the next node from the VSI layer
+ * assigned to the given VSI, or NULL if there is no such a node.
+ */
+static struct ice_sched_node *
+ice_sched_find_next_vsi_node(struct ice_sched_node *vsi_node)
+{
+ unsigned int vsi_handle = vsi_node->vsi_handle;
+
+ while ((vsi_node = vsi_node->sibling) != NULL)
+ if (vsi_node->vsi_handle == vsi_handle)
+ break;
+
+ return vsi_node;
+}
+
+/**
* ice_aqc_send_sched_elem_cmd - send scheduling elements cmd
* @hw: pointer to the HW struct
* @cmd_opc: cmd opcode
@@ -102,13 +123,13 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.sched_elem_cmd;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
cmd->num_elem_req = cpu_to_le16(elems_req);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && elems_resp)
*elems_resp = le16_to_cpu(cmd->num_elem_resp);
@@ -371,10 +392,10 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.get_topo;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
cmd->port_num = lport;
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -497,7 +518,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_query_sched_res);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -662,13 +683,13 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.rl_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, opcode);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
cmd->num_profiles = cpu_to_le16(num_profiles);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (!status && num_processed)
@@ -1084,8 +1105,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children;
} else {
- /* This parent is full, try the next sibling */
- parent = parent->sibling;
+ /* This parent is full,
+ * try the next available sibling.
+ */
+ parent = ice_sched_find_next_vsi_node(parent);
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
@@ -1528,12 +1551,23 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
+ struct ice_sched_node *next_vsi_node;
+
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < max_children &&
qgrp_node->owner == owner)
break;
qgrp_node = qgrp_node->sibling;
+ if (qgrp_node)
+ continue;
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+ if (!next_vsi_node)
+ break;
+
+ vsi_node = next_vsi_node;
+ qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
}
/* Select the best queue group */
@@ -1604,16 +1638,16 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/**
* ice_sched_calc_vsi_child_nodes - calculate number of VSI child nodes
* @hw: pointer to the HW struct
- * @num_qs: number of queues
+ * @num_new_qs: number of new queues that will be added to the tree
* @num_nodes: num nodes array
*
* This function calculates the number of VSI child nodes based on the
* number of queues.
*/
static void
-ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
+ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_new_qs, u16 *num_nodes)
{
- u16 num = num_qs;
+ u16 num = num_new_qs;
u8 i, qgl, vsil;
qgl = ice_sched_get_qgrp_layer(hw);
@@ -1779,7 +1813,11 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!parent)
return -EIO;
- if (i == vsil)
+ /* Do not modify the VSI handle for already existing VSI nodes,
+ * (if no new VSI node was added to the tree).
+ * Assign the VSI handle only to newly added VSI nodes.
+ */
+ if (i == vsil && num_added)
parent->vsi_handle = vsi_handle;
}
@@ -1813,6 +1851,41 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
}
/**
+ * ice_sched_recalc_vsi_support_nodes - recalculate VSI support nodes count
+ * @hw: pointer to the HW struct
+ * @vsi_node: pointer to the leftmost VSI node that needs to be extended
+ * @new_numqs: new number of queues that has to be handled by the VSI
+ * @new_num_nodes: pointer to nodes count table to modify the VSI layer entry
+ *
+ * This function recalculates the number of supported nodes that need to
+ * be added after adding more Tx queues for a given VSI.
+ * The number of new VSI support nodes that shall be added will be saved
+ * to the @new_num_nodes table for the VSI layer.
+ */
+static void
+ice_sched_recalc_vsi_support_nodes(struct ice_hw *hw,
+ struct ice_sched_node *vsi_node,
+ unsigned int new_numqs, u16 *new_num_nodes)
+{
+ u32 vsi_nodes_cnt = 1;
+ u32 max_queue_cnt = 1;
+ u32 qgl, vsil;
+
+ qgl = ice_sched_get_qgrp_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (u32 i = vsil; i <= qgl; i++)
+ max_queue_cnt *= hw->max_children[i];
+
+ while ((vsi_node = ice_sched_find_next_vsi_node(vsi_node)) != NULL)
+ vsi_nodes_cnt++;
+
+ if (new_numqs > (max_queue_cnt * vsi_nodes_cnt))
+ new_num_nodes[vsil] = DIV_ROUND_UP(new_numqs, max_queue_cnt) -
+ vsi_nodes_cnt;
+}
+
+/**
* ice_sched_update_vsi_child_nodes - update VSI child nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -1863,15 +1936,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
return status;
}
- if (new_numqs)
- ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
- /* Keep the max number of queue configuration all the time. Update the
- * tree only if number of queues > previous number of queues. This may
+ ice_sched_recalc_vsi_support_nodes(hw, vsi_node,
+ new_numqs, new_num_nodes);
+ ice_sched_calc_vsi_child_nodes(hw, new_numqs - prev_numqs,
+ new_num_nodes);
+
+ /* Never decrease the number of queues in the tree. Update the tree
+ * only if number of queues > previous number of queues. This may
* leave some extra nodes in the tree if number of queues < previous
* number but that wouldn't harm anything. Removing those extra nodes
* may complicate the code if those nodes are part of SRL or
* individually rate limited.
+ * Also, add the required VSI support nodes if the existing ones cannot
+ * handle the requested new number of queues.
*/
+ status = ice_sched_add_vsi_support_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes);
+ if (status)
+ return status;
+
status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
new_num_nodes, owner);
if (status)
@@ -2013,6 +2096,58 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
}
/**
+ * ice_sched_rm_vsi_subtree - remove all nodes assigned to a given VSI
+ * @pi: port information structure
+ * @vsi_node: pointer to the leftmost node of the VSI to be removed
+ * @owner: LAN or RDMA
+ * @tc: TC number
+ *
+ * Return: Zero in case of success, or -EBUSY if the VSI has leaf nodes in TC.
+ *
+ * This function removes all the VSI support nodes associated with a given VSI
+ * and its LAN or RDMA children nodes from the scheduler tree.
+ */
+static int
+ice_sched_rm_vsi_subtree(struct ice_port_info *pi,
+ struct ice_sched_node *vsi_node, u8 owner, u8 tc)
+{
+ u16 vsi_handle = vsi_node->vsi_handle;
+ bool all_vsi_nodes_removed = true;
+ int j = 0;
+
+ while (vsi_node) {
+ struct ice_sched_node *next_vsi_node;
+
+ if (ice_sched_is_leaf_node_present(vsi_node)) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", tc);
+ return -EBUSY;
+ }
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner)
+ ice_free_sched_node(pi, vsi_node->children[j]);
+ else
+ j++;
+ }
+
+ next_vsi_node = ice_sched_find_next_vsi_node(vsi_node);
+
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children)
+ ice_free_sched_node(pi, vsi_node);
+ else
+ all_vsi_nodes_removed = false;
+
+ vsi_node = next_vsi_node;
+ }
+
+ /* clean up aggregator related VSI info if any */
+ if (all_vsi_nodes_removed)
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+
+ return 0;
+}
+
+/**
* ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
* @pi: port information structure
* @vsi_handle: software VSI handle
@@ -2038,7 +2173,6 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
- u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
@@ -2048,31 +2182,12 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_node)
continue;
- if (ice_sched_is_leaf_node_present(vsi_node)) {
- ice_debug(pi->hw, ICE_DBG_SCHED, "VSI has leaf nodes in TC %d\n", i);
- status = -EBUSY;
+ status = ice_sched_rm_vsi_subtree(pi, vsi_node, owner, i);
+ if (status)
goto exit_sched_rm_vsi_cfg;
- }
- while (j < vsi_node->num_children) {
- if (vsi_node->children[j]->owner == owner) {
- ice_free_sched_node(pi, vsi_node->children[j]);
- /* reset the counter again since the num
- * children will be updated after node removal
- */
- j = 0;
- } else {
- j++;
- }
- }
- /* remove the VSI if it has no children */
- if (!vsi_node->num_children) {
- ice_free_sched_node(pi, vsi_node);
- vsi_ctx->sched.vsi_node[i] = NULL;
+ vsi_ctx->sched.vsi_node[i] = NULL;
- /* clean up aggregator related VSI info if any */
- ice_sched_rm_agg_vsi_info(pi, vsi_handle);
- }
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_sf_eth.c b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
index 75d7147e1c01..1a2c94375ca7 100644
--- a/drivers/net/ethernet/intel/ice/ice_sf_eth.c
+++ b/drivers/net/ethernet/intel/ice/ice_sf_eth.c
@@ -5,8 +5,8 @@
#include "ice_txrx.h"
#include "ice_fltr.h"
#include "ice_sf_eth.h"
-#include "devlink/devlink_port.h"
#include "devlink/devlink.h"
+#include "devlink/port.h"
static const struct net_device_ops ice_sf_netdev_ops = {
.ndo_open = ice_open,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b83f99c01d91..6b1126ddb561 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -9,7 +9,7 @@
#include "ice_dcb_lib.h"
#include "ice_flow.h"
#include "ice_eswitch.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
#include "ice_flex_pipe.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -62,6 +63,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
if (vf->lan_vsi_idx != ICE_NO_VSI) {
ice_vf_vsi_release(vf);
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
}
last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
@@ -123,27 +125,6 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
}
/**
- * ice_sriov_free_msix_res - Reset/free any used MSIX resources
- * @pf: pointer to the PF structure
- *
- * Since no MSIX entries are taken from the pf->irq_tracker then just clear
- * the pf->sriov_base_vector.
- *
- * Returns 0 on success, and -EINVAL on error.
- */
-static int ice_sriov_free_msix_res(struct ice_pf *pf)
-{
- if (!pf)
- return -EINVAL;
-
- bitmap_free(pf->sriov_irq_bm);
- pf->sriov_irq_size = 0;
- pf->sriov_base_vector = 0;
-
- return 0;
-}
-
-/**
* ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure
*/
@@ -177,6 +158,7 @@ void ice_free_vfs(struct ice_pf *pf)
ice_eswitch_detach_vf(pf, vf);
ice_dis_vf_qs(vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
/* disable VF qp mappings and set VF disable state */
@@ -193,16 +175,9 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
- if (ice_sriov_free_msix_res(pf))
- dev_err(dev, "Failed to free MSIX resources used by SR-IOV\n");
-
vfs->num_qps_per = 0;
ice_free_vf_entries(pf);
@@ -372,40 +347,6 @@ void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
}
/**
- * ice_sriov_set_msix_res - Set any used MSIX resources
- * @pf: pointer to PF structure
- * @num_msix_needed: number of MSIX vectors needed for all SR-IOV VFs
- *
- * This function allows SR-IOV resources to be taken from the end of the PF's
- * allowed HW MSIX vectors so that the irq_tracker will not be affected. We
- * just set the pf->sriov_base_vector and return success.
- *
- * If there are not enough resources available, return an error. This should
- * always be caught by ice_set_per_vf_res().
- *
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
- * in the PF's space available for SR-IOV.
- */
-static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
-{
- u16 total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
- int vectors_used = ice_get_max_used_msix_vector(pf);
- int sriov_base_vector;
-
- sriov_base_vector = total_vectors - num_msix_needed;
-
- /* make sure we only grab irq_tracker entries from the list end and
- * that we have enough available MSIX vectors
- */
- if (sriov_base_vector < vectors_used)
- return -EINVAL;
-
- pf->sriov_base_vector = sriov_base_vector;
-
- return 0;
-}
-
-/**
* ice_set_per_vf_res - check if vectors and queues are available
* @pf: pointer to the PF structure
* @num_vfs: the number of SR-IOV VFs being configured
@@ -429,11 +370,9 @@ static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
*/
static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
{
- int vectors_used = ice_get_max_used_msix_vector(pf);
u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
int msix_avail_per_vf, msix_avail_for_sriov;
struct device *dev = ice_pf_to_dev(pf);
- int err;
lockdep_assert_held(&pf->vfs.table_lock);
@@ -441,8 +380,7 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -EINVAL;
/* determine MSI-X resources per VF */
- msix_avail_for_sriov = pf->hw.func_caps.common_cap.num_msix_vectors -
- vectors_used;
+ msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
@@ -481,13 +419,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
return -ENOSPC;
}
- err = ice_sriov_set_msix_res(pf, num_msix_per_vf * num_vfs);
- if (err) {
- dev_err(dev, "Unable to set MSI-X resources for %d VFs, err %d\n",
- num_vfs, err);
- return err;
- }
-
/* only allow equal Tx/Rx queue count (i.e. queue pairs) */
pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
pf->vfs.num_msix_per = num_msix_per_vf;
@@ -498,52 +429,6 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
- * ice_sriov_get_irqs - get irqs for SR-IOV usacase
- * @pf: pointer to PF structure
- * @needed: number of irqs to get
- *
- * This returns the first MSI-X vector index in PF space that is used by this
- * VF. This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration_id: id of VF which will
- * use this irqs
- *
- * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
- * allocated from the end of global irq index. First bit in sriov_irq_bm means
- * last irq index etc. It simplifies extension of SRIOV vectors.
- * They will be always located from sriov_base_vector to the last irq
- * index. While increasing/decreasing sriov_base_vector can be moved.
- */
-static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
-{
- int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
- pf->sriov_irq_size, 0, needed, 0);
- /* conversion from number in bitmap to global irq index */
- int index = pf->sriov_irq_size - res - needed;
-
- if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
- return -ENOENT;
-
- bitmap_set(pf->sriov_irq_bm, res, needed);
- return index;
-}
-
-/**
- * ice_sriov_free_irqs - free irqs used by the VF
- * @pf: pointer to PF structure
- * @vf: pointer to VF structure
- */
-static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
-{
- /* Move back from first vector index to first index in bitmap */
- int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
-
- bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
- vf->first_vector_idx = 0;
-}
-
-/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -556,7 +441,7 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
return -ENOMEM;
@@ -856,16 +741,10 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
- int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
- pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
- if (!pf->sriov_irq_bm)
- return -ENOMEM;
- pf->sriov_irq_size = total_vectors;
-
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -918,7 +797,6 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
- bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -992,16 +870,7 @@ u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
- return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
-}
-
-static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
-{
- if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
- return -ENOMEM;
-
- pf->sriov_base_vector -= move;
- return 0;
+ return pf->virt_irq_tracker.num_entries;
}
static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
@@ -1020,7 +889,8 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
ice_dis_vf_mappings(tmp_vf);
- ice_sriov_free_irqs(pf, tmp_vf);
+ ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
+ tmp_vf->num_msix);
vf_ids[to_remap] = tmp_vf->vf_id;
to_remap += 1;
@@ -1032,7 +902,7 @@ static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
continue;
tmp_vf->first_vector_idx =
- ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ ice_virt_get_irqs(pf, tmp_vf->num_msix);
/* there is no need to rebuild VSI as we are only changing the
* vector indexes not amount of MSI-X or queues
*/
@@ -1063,7 +933,6 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
bool needs_rebuild = false;
struct ice_vsi *vsi;
struct ice_vf *vf;
- int id;
if (!ice_get_num_vfs(pf))
return -ENOENT;
@@ -1082,17 +951,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
if (msix_vec_count < ICE_MIN_INTR_PER_VF)
return -EINVAL;
- /* Transition of PCI VF function number to function_id */
- for (id = 0; id < pci_num_vf(pdev); id++) {
- if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
- break;
- }
-
- if (id == pci_num_vf(pdev))
- return -ENOENT;
-
- vf = ice_get_vf_by_id(pf, id);
-
+ vf = ice_get_vf_by_dev(pf, vf_dev);
if (!vf)
return -ENOENT;
@@ -1102,23 +961,24 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
return -ENOENT;
}
- prev_msix = vf->num_msix;
- prev_queues = vf->num_vf_qs;
-
- if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
+ /* No need to rebuild if we're setting to the same value */
+ if (msix_vec_count == vf->num_msix) {
ice_put_vf(vf);
- return -ENOSPC;
+ return 0;
}
+ prev_msix = vf->num_msix;
+ prev_queues = vf->num_vf_qs;
+
ice_dis_vf_mappings(vf);
- ice_sriov_free_irqs(pf, vf);
+ ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
/* Remap all VFs beside the one is now configured */
ice_sriov_remap_vectors(pf, vf->vf_id);
vf->num_msix = msix_vec_count;
vf->num_vf_qs = queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0)
goto unroll;
@@ -1147,7 +1007,8 @@ unroll:
vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues;
- vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+
+ vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0) {
ice_put_vf(vf);
return -EINVAL;
@@ -1300,10 +1161,12 @@ static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
{
+ struct ice_aqc_event_lan_overflow *cmd;
u32 gldcb_rtctq, queue;
struct ice_vf *vf;
- gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
+ cmd = libie_aq_raw(&event->desc);
+ gldcb_rtctq = le32_to_cpu(cmd->prtdcb_ruptq);
dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
/* event returns device global Rx queue number */
@@ -1327,8 +1190,7 @@ ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
*/
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_pf *pf = np->vsi->back;
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vf_vsi;
struct device *dev;
struct ice_vf *vf;
@@ -1537,6 +1399,9 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
mutex_lock(&vf->cfg_lock);
+ while (!trusted && vf->num_mac_lldp)
+ ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false);
+
vf->trusted = trusted;
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 96549ca5c52c..6c4fad09a527 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -3,9 +3,9 @@
#ifndef _ICE_SRIOV_H_
#define _ICE_SRIOV_H_
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vf_lib.h"
-#include "ice_virtchnl.h"
+#include "virt/virtchnl.h"
/* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA
@@ -64,6 +64,7 @@ bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count);
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id);
#else /* CONFIG_PCI_IOV */
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
@@ -164,5 +165,11 @@ ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
{
return -EOPNOTSUPP;
}
+
+static inline int ice_vf_vsi_dis_single_txq(struct ice_vf *vf,
+ struct ice_vsi *vsi, u16 q_id)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 0e740342e294..84848f0123e7 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1511,11 +1511,11 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
- cmd = &desc.params.get_sw_conf;
+ cmd = libie_aq_raw(&desc);
cmd->element = cpu_to_le16(*req_desc);
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
@@ -1541,11 +1541,11 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- res = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ res = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
@@ -1556,7 +1556,7 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1585,11 +1585,11 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
@@ -1620,17 +1620,17 @@ ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.vsi_cmd;
- resp = &desc.params.add_update_free_vsi_res;
+ cmd = libie_aq_raw(&desc);
+ resp = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
cmd->vsi_num = cpu_to_le16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cd);
@@ -1944,7 +1944,8 @@ int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
- struct ice_aq_desc desc;
+ struct ice_aqc_sw_rules *cmd;
+ struct libie_aq_desc desc;
int status;
if (opc != ice_aqc_opc_add_sw_rules &&
@@ -1953,13 +1954,13 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
+ cmd = libie_aq_raw(&desc);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- desc.params.sw_rules.num_rules_fltr_entry_index =
- cpu_to_le16(num_rules);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
+ cmd->num_rules_fltr_entry_index = cpu_to_le16(num_rules);
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
- hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
+ hw->adminq.sq_last_status == LIBIE_AQ_RC_ENOENT)
status = -ENOENT;
if (!status) {
@@ -1989,14 +1990,14 @@ ice_aq_add_recipe(struct ice_hw *hw,
u16 num_recipes, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
cmd->num_sub_recipes = cpu_to_le16(num_recipes);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
buf_size = num_recipes * sizeof(*s_recipe_list);
@@ -2026,14 +2027,14 @@ ice_aq_get_recipe(struct ice_hw *hw,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_recipe *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
u16 buf_size;
int status;
if (*num_recipes != ICE_MAX_NUM_RECIPES)
return -EINVAL;
- cmd = &desc.params.add_get_recipe;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
cmd->return_index = cpu_to_le16(recipe_root);
@@ -2118,9 +2119,9 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
/* Set the recipe ID bit in the bitmask to let the device know which
@@ -2144,10 +2145,10 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
struct ice_sq_cd *cd)
{
struct ice_aqc_recipe_to_profile *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
int status;
- cmd = &desc.params.recipe_to_profile;
+ cmd = libie_aq_raw(&desc);
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
cmd->profile_id = cpu_to_le16(profile_id);
@@ -3146,7 +3147,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return -EEXIST;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -4784,7 +4785,8 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
*/
if (found && recp[i].tun_type == rinfo->tun_type &&
recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
- recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2 &&
+ recp[i].priority == rinfo->priority)
return i; /* Return the recipe ID */
}
}
@@ -5977,7 +5979,7 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
- return 0;
+ return -EEXIST;
/* Update the previously created VSI list set with
* the new VSI ID passed in
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index ea39b999a0d0..fb9ea7f8ef44 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -12,14 +12,11 @@
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
- * @headers: Pointer to TC flower filter header structure
* @fltr: Pointer to outer TC filter structure
*
- * Determine lookup count based on TC flower input for switch filter.
+ * Return: lookup count based on TC flower input for a switch filter.
*/
-static int
-ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
- struct ice_tc_flower_fltr *fltr)
+static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
{
int lkups_cnt = 1; /* 0th lookup is metadata */
@@ -684,26 +681,26 @@ static int ice_tc_setup_action(struct net_device *filter_dev,
fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
+ ice_tc_is_dev_uplink(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
repr = ice_netdev_to_repr(filter_dev);
fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
} else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
+ ice_is_port_repr_netdev(target_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
repr = ice_netdev_to_repr(target_dev);
fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -716,13 +713,11 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
{
fltr->action.fltr_act = ICE_DROP_PACKET;
- if (ice_is_port_repr_netdev(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
+ if (!ice_tc_is_dev_uplink(filter_dev) &&
+ !(ice_is_port_repr_netdev(filter_dev) &&
+ fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
+ "The action is not supported for this netdevice");
return -EINVAL;
}
@@ -767,10 +762,157 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
return 0;
}
+static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP);
+}
+
+static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ if (!ice_is_switchdev_running(vsi->back))
+ return false;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET &&
+ ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+}
+
+static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_vsi *vsi = fltr->src_vsi, *uplink;
+
+ uplink = vsi->back->eswitch.uplink_vsi;
+ return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) &&
+ fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == uplink;
+}
+
+static struct ice_tc_flower_fltr *
+ice_find_pf_tx_lldp_fltr(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ return fltr;
+
+ return NULL;
+}
+
+static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ ice_for_each_vf(pf, bkt, vf)
+ if (vf->lldp_tx_ena)
+ return true;
+
+ return false;
+}
+
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit)
+{
+ struct ice_rule_query_data remove_entry = {
+ .rid = vsi->vf->lldp_recipe_id,
+ .rule_id = vsi->vf->lldp_rule_id,
+ .vsi_handle = vsi->idx,
+ };
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (vsi->vf->lldp_tx_ena)
+ return 0;
+
+ if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back))
+ return -EINVAL;
+
+ if (!deinit && ice_any_vf_lldp_tx_ena(pf))
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry);
+ if (!err)
+ vsi->vf->lldp_tx_ena = true;
+
+ return err;
+}
+
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init)
+{
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_rule_info rinfo = {
+ .priority = 7,
+ .src_vsi = vsi->idx,
+ .sw_act = {
+ .src = vsi->idx,
+ .flag = ICE_FLTR_TX,
+ .fltr_act = ICE_DROP_PACKET,
+ .vsi_handle = vsi->idx,
+ },
+ .flags_info.act_valid = true,
+ };
+ struct ice_adv_lkup_elem list[3];
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (!init && !vsi->vf->lldp_tx_ena)
+ return 0;
+
+ memset(list, 0, sizeof(list));
+ ice_rule_add_direction_metadata(&list[0]);
+ ice_rule_add_src_vsi_metadata(&list[1]);
+ list[2].type = ICE_ETYPE_OL;
+ list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP);
+ list[2].m_u.ethertype.ethtype_id = htons(0xFFFF);
+
+ err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo,
+ &rule_added);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "Failed to add an LLDP rule to VSI 0x%X: %d\n",
+ vsi->idx, err);
+ } else {
+ vsi->vf->lldp_recipe_id = rule_added.rid;
+ vsi->vf->lldp_rule_id = rule_added.rule_id;
+ vsi->vf->lldp_tx_ena = false;
+ }
+
+ return err;
+}
+
+static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) {
+ if (!ice_is_fltr_vf_tx_lldp(fltr))
+ continue;
+ ice_pass_vf_tx_lldp(fltr->src_vsi, true);
+ break;
+ }
+}
+
+static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf)
+{
+ int i;
+
+ /* Make the VF LLDP fwd to uplink rule dormant */
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vf_vsi = pf->vsi[i];
+
+ if (vf_vsi && vf_vsi->type == ICE_VSI_VF)
+ ice_drop_vf_tx_lldp(vf_vsi, false);
+ }
+}
+
static int
ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct ice_adv_rule_info rule_info = { 0 };
struct ice_rule_query_data rule_added;
struct ice_hw *hw = &vsi->back->hw;
@@ -785,7 +927,10 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
return -EOPNOTSUPP;
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_pass_vf_tx_lldp(vsi, false);
+
+ lkups_cnt = ice_tc_count_lkups(flags, fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -814,6 +959,11 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
} else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) {
+ /* PF to Uplink */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
/* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
@@ -846,11 +996,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
goto exit;
}
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_add_pf_lldp_drop_rule(vsi);
+
/* store the output params, which are needed later for removing
* advanced switch filter
*/
@@ -985,7 +1141,6 @@ static int
ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_tc_flower_fltr *tc_fltr)
{
- struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
struct ice_adv_rule_info rule_info = {0};
struct ice_rule_query_data rule_added;
struct ice_adv_lkup_elem *list;
@@ -1021,7 +1176,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
return PTR_ERR(dest_vsi);
}
- lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
if (!list)
return -ENOMEM;
@@ -1056,8 +1211,13 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
case ICE_DROP_PACKET:
- rule_info.sw_act.flag |= ICE_FLTR_RX;
- rule_info.sw_act.src = hw->pf_id;
+ if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ }
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
@@ -1071,6 +1231,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
"Unable to add filter because it already exist");
ret = -EINVAL;
goto exit;
+ } else if (ret == -ENOSPC) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter: insufficient space available.");
+ goto exit;
} else if (ret) {
NL_SET_ERR_MSG_MOD(tc_fltr->extack,
"Unable to add filter due to error");
@@ -1463,11 +1627,16 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
* @filter_dev: Pointer to device on which filter is being added
* @f: Pointer to struct flow_cls_offload
* @fltr: Pointer to filter structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was parsed successfully, -EINVAL if the flower
+ * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured
+ * for the given VSI.
*/
static int
ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr *fltr)
+ struct ice_tc_flower_fltr *fltr, bool ingress)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
@@ -1551,6 +1720,20 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
}
+ if (!ingress) {
+ bool switchdev =
+ ice_is_eswitch_mode_switchdev(vsi->back);
+
+ if (switchdev != (n_proto_key == ETH_P_LLDP)) {
+ NL_SET_ERR_MSG_FMT_MOD(fltr->extack,
+ "%sLLDP filtering is not supported on egress in %s mode",
+ switchdev ? "Non-" : "",
+ switchdev ? "switchdev" :
+ "legacy");
+ return -EOPNOTSUPP;
+ }
+ }
+
headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
headers->l3_key.ip_proto = match.key->ip_proto;
@@ -1726,6 +1909,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
}
+
+ /* Ingress filter on representor results in an egress filter in HW
+ * and vice versa
+ */
+ ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
+ fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
+ ICE_ESWITCH_FLTR_EGRESS;
+
return 0;
}
@@ -1939,6 +2130,12 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
struct ice_pf *pf = vsi->back;
int err;
+ if (ice_is_fltr_pf_tx_lldp(fltr))
+ ice_handle_del_pf_lldp_drop_rule(pf);
+
+ if (ice_is_fltr_vf_tx_lldp(fltr))
+ return ice_drop_vf_tx_lldp(vsi, false);
+
rule_rem.rid = fltr->rid;
rule_rem.rule_id = fltr->rule_id;
rule_rem.vsi_handle = fltr->dest_vsi_handle;
@@ -1975,14 +2172,18 @@ static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
* @vsi: Pointer to VSI
* @f: Pointer to flower offload structure
* @__fltr: Pointer to struct ice_tc_flower_fltr
+ * @ingress: if the rule is added to an ingress block
*
* This function parses TC-flower input fields, parses action,
* and adds a filter.
+ *
+ * Return: 0 if the filter was successfully added,
+ * negative error code otherwise.
*/
static int
ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
struct flow_cls_offload *f,
- struct ice_tc_flower_fltr **__fltr)
+ struct ice_tc_flower_fltr **__fltr, bool ingress)
{
struct ice_tc_flower_fltr *fltr;
int err;
@@ -1999,7 +2200,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
fltr->src_vsi = vsi;
INIT_HLIST_NODE(&fltr->tc_flower_node);
- err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress);
if (err < 0)
goto err;
@@ -2042,10 +2243,13 @@ ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
* @netdev: Pointer to filter device
* @vsi: Pointer to VSI
* @cls_flower: Pointer to flower offload structure
+ * @ingress: if the rule is added to an ingress block
+ *
+ * Return: 0 if the flower was successfully added,
+ * negative error code otherwise.
*/
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower)
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress)
{
struct netlink_ext_ack *extack = cls_flower->common.extack;
struct net_device *vsi_netdev = vsi->netdev;
@@ -2080,7 +2284,7 @@ ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
}
/* prep and add TC-flower filter in HW */
- err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress);
if (err)
return err;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index d84f153517ec..8a3ab2f22af9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -211,13 +211,14 @@ static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
}
struct ice_vsi *ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue);
-int
-ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower);
-int
-ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower, bool ingress);
+int ice_del_cls_flower(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init);
+int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit);
static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 07aab6e130cd..4f35ef8d6b29 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
__entry->buf = buf;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p buf %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->buf)
);
@@ -158,7 +158,7 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->desc = desc;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p", __get_str(devname),
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
@@ -182,7 +182,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
+ TP_printk("netdev: %s ring: %p desc: %p skb %p", __get_str(devname),
__entry->ring, __entry->desc, __entry->skb)
);
@@ -205,7 +205,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
__entry->skb = skb;
__assign_str(devname);),
- TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
+ TP_printk("netdev: %s skb: %p ring: %p", __get_str(devname),
__entry->skb, __entry->ring)
);
@@ -228,7 +228,7 @@ DECLARE_EVENT_CLASS(ice_tx_tstamp_template,
TP_fast_assign(__entry->skb = skb;
__entry->idx = idx;),
- TP_printk("skb %pK idx %d",
+ TP_printk("skb %p idx %d",
__entry->skb, __entry->idx)
);
#define DEFINE_TX_TSTAMP_OP_EVENT(name) \
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.c b/drivers/net/ethernet/intel/ice/ice_tspll.c
new file mode 100644
index 000000000000..66320a4ab86f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_ptp_hw.h"
+
+static const struct
+ice_tspll_params_e82x e82x_tspll_params[NUM_ICE_TSPLL_FREQ] = {
+ [ICE_TSPLL_FREQ_25_000] = {
+ .refclk_pre_div = 1,
+ .post_pll_div = 6,
+ .feedback_div = 197,
+ .frac_n_div = 2621440,
+ },
+ [ICE_TSPLL_FREQ_122_880] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_125_000] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+ [ICE_TSPLL_FREQ_153_600] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_156_250] = {
+ .refclk_pre_div = 5,
+ .post_pll_div = 6,
+ .feedback_div = 159,
+ .frac_n_div = 1572864
+ },
+ [ICE_TSPLL_FREQ_245_760] = {
+ .refclk_pre_div = 10,
+ .post_pll_div = 7,
+ .feedback_div = 223,
+ .frac_n_div = 524288
+ },
+};
+
+/**
+ * ice_tspll_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string.
+ */
+static const char *ice_tspll_clk_freq_str(enum ice_tspll_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TSPLL_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TSPLL_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TSPLL_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TSPLL_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TSPLL_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TSPLL_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_default_freq - Return default frequency for a MAC type
+ * @mac_type: MAC type
+ *
+ * Return: default TSPLL frequency for a correct MAC type, -ERANGE otherwise.
+ */
+static enum ice_tspll_freq ice_tspll_default_freq(enum ice_mac_type mac_type)
+{
+ switch (mac_type) {
+ case ICE_MAC_GENERIC:
+ return ICE_TSPLL_FREQ_25_000;
+ case ICE_MAC_GENERIC_3K_E825:
+ return ICE_TSPLL_FREQ_156_250;
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_check_params - Check if TSPLL params are correct
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF or TCXO)
+ *
+ * Return: true if TSPLL params are correct, false otherwise.
+ */
+static bool ice_tspll_check_params(struct ice_hw *hw,
+ enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ if (clk_freq >= NUM_ICE_TSPLL_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TSPLL frequency %u\n",
+ clk_freq);
+ return false;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return false;
+ }
+
+ if ((hw->mac_type == ICE_MAC_GENERIC_3K_E825 ||
+ clk_src == ICE_CLK_SRC_TCXO) &&
+ clk_freq != ice_tspll_default_freq(hw->mac_type)) {
+ dev_warn(ice_hw_to_dev(hw), "Unsupported frequency for this clock source\n");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_tspll_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_tspll_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_tspll_log_cfg - Log current/new TSPLL configuration
+ * @hw: Pointer to the HW struct
+ * @enable: CGU enabled/disabled
+ * @clk_src: Current clock source
+ * @tspll_freq: Current clock frequency
+ * @lock: CGU lock status
+ * @new_cfg: true if this is a new config
+ */
+static void ice_tspll_log_cfg(struct ice_hw *hw, bool enable, u8 clk_src,
+ u8 tspll_freq, bool lock, bool new_cfg)
+{
+ dev_dbg(ice_hw_to_dev(hw),
+ "%s TSPLL configuration -- %s, src %s, freq %s, PLL %s\n",
+ new_cfg ? "New" : "Current", str_enabled_disabled(enable),
+ ice_tspll_clk_src_str((enum ice_clk_src)clk_src),
+ ice_tspll_clk_freq_str((enum ice_tspll_freq)tspll_freq),
+ lock ? "locked" : "unlocked");
+}
+
+/**
+ * ice_tspll_cfg_e82x - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e82x(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r24;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_BWM_LF_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24)) {
+ r24 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ r9 &= ~ICE_CGU_R9_TIME_REF_FREQ_SEL;
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X | ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E82X,
+ e82x_tspll_params[clk_freq].feedback_div);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO, 1);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV,
+ e82x_tspll_params[clk_freq].post_pll_div);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ r24 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R24_FBDIV_FRAC |
+ ICE_CGU_R23_R24_TIME_REF_SEL);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_REF1588_CK_DIV,
+ e82x_tspll_params[clk_freq].refclk_pre_div);
+ r24 |= FIELD_PREP(ICE_CGU_R24_FBDIV_FRAC,
+ e82x_tspll_params[clk_freq].frac_n_div);
+ r24 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r24 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, r24);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_BWM_LF, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_BWM_LF_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R24, &r24);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r24),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r24),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e82x - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_CNTR_BIST, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_CNTR_BIST_PLLLOCK_SEL_0 |
+ ICE_CGU_CNTR_BIST_PLLLOCK_SEL_1);
+
+ return ice_write_cgu_reg(hw, ICE_CGU_CNTR_BIST, val);
+}
+
+/**
+ * ice_tspll_cfg_e825c - Configure the TSPLL for E825-C
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TSPLL
+ * * %other - CGU read/write failure
+ */
+static int ice_tspll_cfg_e825c(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ u32 val, r9, r23;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ !!FIELD_GET(ICE_CGU_RO_LOCK_TRUE_LOCK, val),
+ false);
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23)) {
+ r23 &= ~ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+ }
+
+ if (FIELD_GET(ICE_CGU_R9_TIME_SYNC_EN, r9)) {
+ r9 &= ~ICE_CGU_R9_TIME_SYNC_EN;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency and enable the correct receiver */
+ r9 &= ~(ICE_CGU_R9_TIME_REF_FREQ_SEL | ICE_CGU_R9_CLK_EREF0_EN |
+ ICE_CGU_R9_TIME_REF_EN);
+ r9 |= FIELD_PREP(ICE_CGU_R9_TIME_REF_FREQ_SEL, clk_freq);
+ if (clk_src == ICE_CLK_SRC_TCXO)
+ r9 |= ICE_CGU_R9_CLK_EREF0_EN;
+ else
+ r9 |= ICE_CGU_R9_TIME_REF_EN;
+ r9 |= ICE_CGU_R9_TIME_SYNC_EN;
+ err = ice_write_cgu_reg(hw, ICE_CGU_R9, r9);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R16, &val);
+ if (err)
+ return err;
+ val &= ~ICE_CGU_R16_TSPLL_CK_REFCLKFREQ;
+ val |= FIELD_PREP(ICE_CGU_R16_TSPLL_CK_REFCLKFREQ,
+ ICE_TSPLL_CK_REFCLKFREQ_E825);
+ err = ice_write_cgu_reg(hw, ICE_CGU_R16, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL feedback divisor */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R19, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825 |
+ ICE_CGU_R19_TSPLL_NDIVRATIO);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_FBDIV_INTGR_E825,
+ ICE_TSPLL_FBDIV_INTGR_E825);
+ val |= FIELD_PREP(ICE_CGU_R19_TSPLL_NDIVRATIO,
+ ICE_TSPLL_NDIVRATIO_E825);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R19, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL post divisor, these two are constant */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R22, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R22_TIME1588CLK_DIV |
+ ICE_CGU_R22_TIME1588CLK_DIV2);
+ val |= FIELD_PREP(ICE_CGU_R22_TIME1588CLK_DIV, 5);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R22, val);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL pre divisor (constant) and clock source */
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ r23 &= ~(ICE_CGU_R23_R24_REF1588_CK_DIV | ICE_CGU_R23_R24_TIME_REF_SEL);
+ r23 |= FIELD_PREP(ICE_CGU_R23_R24_TIME_REF_SEL, clk_src);
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Clear the R24 register. */
+ err = ice_write_cgu_reg(hw, ICE_CGU_R24, 0);
+ if (err)
+ return err;
+
+ /* Wait to ensure everything is stable */
+ usleep_range(10, 20);
+
+ /* Finally, enable the PLL */
+ r23 |= ICE_CGU_R23_R24_TSPLL_ENABLE;
+
+ err = ice_write_cgu_reg(hw, ICE_CGU_R23, r23);
+ if (err)
+ return err;
+
+ /* Wait at least 1 ms to verify if the PLL locks */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_RO_LOCK, &val);
+ if (err)
+ return err;
+
+ if (!(val & ICE_CGU_RO_LOCK_TRUE_LOCK)) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &r9);
+ if (err)
+ return err;
+ err = ice_read_cgu_reg(hw, ICE_CGU_R23, &r23);
+ if (err)
+ return err;
+
+ ice_tspll_log_cfg(hw, !!FIELD_GET(ICE_CGU_R23_R24_TSPLL_ENABLE, r23),
+ FIELD_GET(ICE_CGU_R23_R24_TIME_REF_SEL, r23),
+ FIELD_GET(ICE_CGU_R9_TIME_REF_FREQ_SEL, r9),
+ true, true);
+
+ return 0;
+}
+
+/**
+ * ice_tspll_dis_sticky_bits_e825c - disable TSPLL sticky bits for E825-C
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU.
+ */
+static int ice_tspll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_BW_TDC, &val);
+ if (err)
+ return err;
+
+ val &= ~ICE_CGU_BW_TDC_PLLLOCK_SEL;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_BW_TDC, val);
+}
+
+/**
+ * ice_tspll_cfg_pps_out_e825c - Enable/disable 1PPS output and set amplitude
+ * @hw: pointer to the HW struct
+ * @enable: true to enable 1PPS output, false to disable it
+ *
+ * Return: 0 on success, other negative error code when CGU read/write failed.
+ */
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable)
+{
+ u32 val;
+ int err;
+
+ err = ice_read_cgu_reg(hw, ICE_CGU_R9, &val);
+ if (err)
+ return err;
+
+ val &= ~(ICE_CGU_R9_ONE_PPS_OUT_EN | ICE_CGU_R9_ONE_PPS_OUT_AMP);
+ val |= FIELD_PREP(ICE_CGU_R9_ONE_PPS_OUT_EN, enable) |
+ ICE_CGU_R9_ONE_PPS_OUT_AMP;
+
+ return ice_write_cgu_reg(hw, ICE_CGU_R9, val);
+}
+
+/**
+ * ice_tspll_cfg - Configure the Clock Generation Unit TSPLL
+ * @hw: Pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the TSPLL which drives the PTP hardware clock.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type, other negative error
+ * codes when failed to configure CGU.
+ */
+static int ice_tspll_cfg(struct ice_hw *hw, enum ice_tspll_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_cfg_e82x(hw, clk_freq, clk_src);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_cfg_e825c(hw, clk_freq, clk_src);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_dis_sticky_bits - disable TSPLL sticky bits
+ * @hw: Pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TSPLL sticky bits so they don't latch on
+ * losing TSPLL lock, but always show current state.
+ *
+ * Return: 0 on success, -ERANGE on unsupported MAC type.
+ */
+static int ice_tspll_dis_sticky_bits(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_GENERIC:
+ return ice_tspll_dis_sticky_bits_e82x(hw);
+ case ICE_MAC_GENERIC_3K_E825:
+ return ice_tspll_dis_sticky_bits_e825c(hw);
+ default:
+ return -ERANGE;
+ }
+}
+
+/**
+ * ice_tspll_init - Initialize TSPLL with settings from firmware
+ * @hw: Pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E82X/E825 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU.
+ */
+int ice_tspll_init(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ enum ice_tspll_freq tspll_freq;
+ enum ice_clk_src clk_src;
+ int err;
+
+ /* Only E822, E823 and E825 products support TSPLL */
+ if (hw->mac_type != ICE_MAC_GENERIC &&
+ hw->mac_type != ICE_MAC_GENERIC_3K_E825)
+ return 0;
+
+ tspll_freq = (enum ice_tspll_freq)ts_info->time_ref;
+ clk_src = (enum ice_clk_src)ts_info->clk_src;
+ if (!ice_tspll_check_params(hw, tspll_freq, clk_src))
+ return -EINVAL;
+
+ /* Disable sticky lock detection so lock status reported is accurate */
+ err = ice_tspll_dis_sticky_bits(hw);
+ if (err)
+ return err;
+
+ /* Configure the TSPLL using the parameters from the function
+ * capabilities.
+ */
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to predefined frequency. Retrying with fallback frequency.\n");
+
+ /* Try to lock to internal TCXO as a fallback. */
+ tspll_freq = ice_tspll_default_freq(hw->mac_type);
+ clk_src = ICE_CLK_SRC_TCXO;
+ err = ice_tspll_cfg(hw, tspll_freq, clk_src);
+ if (err)
+ dev_warn(ice_hw_to_dev(hw), "Failed to lock TSPLL to fallback frequency.\n");
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tspll.h b/drivers/net/ethernet/intel/ice/ice_tspll.h
new file mode 100644
index 000000000000..c0b1232cc07c
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tspll.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2025, Intel Corporation. */
+
+#ifndef _ICE_TSPLL_H_
+#define _ICE_TSPLL_H_
+
+/**
+ * struct ice_tspll_params_e82x - E82X TSPLL parameters
+ * @refclk_pre_div: Reference clock pre-divisor
+ * @post_pll_div: Post PLL divisor
+ * @feedback_div: Feedback divisor
+ * @frac_n_div: Fractional divisor
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_tspll_params_e82x {
+ u8 refclk_pre_div;
+ u8 post_pll_div;
+ u8 feedback_div;
+ u32 frac_n_div;
+};
+
+#define ICE_TSPLL_CK_REFCLKFREQ_E825 0x1F
+#define ICE_TSPLL_NDIVRATIO_E825 5
+#define ICE_TSPLL_FBDIV_INTGR_E825 256
+
+int ice_tspll_cfg_pps_out_e825c(struct ice_hw *hw, bool enable);
+int ice_tspll_init(struct ice_hw *hw);
+
+#endif /* _ICE_TSPLL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5d2d7736fd5f..ad76768a4232 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -7,6 +7,8 @@
#include <linux/netdevice.h>
#include <linux/prefetch.h>
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/rx.h>
+#include <net/libeth/xdp.h>
#include <net/dsfield.h>
#include <net/mpls.h>
#include <net/xdp.h>
@@ -20,7 +22,6 @@
#define ICE_RX_HDR_SIZE 256
-#define FDIR_DESC_RXDID 0x40
#define ICE_FDIR_CLEAN_DELAY 10
/**
@@ -112,7 +113,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (dma_unmap_len(tx_buf, len))
+ if (tx_buf->type != ICE_TX_BUF_XDP_TX && dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
@@ -126,7 +127,7 @@ ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
dev_kfree_skb_any(tx_buf->skb);
break;
case ICE_TX_BUF_XDP_TX:
- page_frag_free(tx_buf->raw_buf);
+ libeth_xdp_return_va(tx_buf->raw_buf, false);
break;
case ICE_TX_BUF_XDP_XMIT:
xdp_return_frame(tx_buf->xdpf);
@@ -145,6 +146,56 @@ static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
}
/**
+ * ice_clean_tstamp_ring - clean time stamp ring
+ * @tx_ring: Tx ring to clean the Time Stamp ring for
+ */
+static void ice_clean_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ memset(tstamp_ring->desc, 0, size);
+ tstamp_ring->next_to_use = 0;
+}
+
+/**
+ * ice_free_tstamp_ring - free time stamp resources per queue
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 size;
+
+ if (!tstamp_ring->desc)
+ return;
+
+ ice_clean_tstamp_ring(tx_ring);
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size, tstamp_ring->desc,
+ tstamp_ring->dma);
+ tstamp_ring->desc = NULL;
+}
+
+/**
+ * ice_free_tx_tstamp_ring - free time stamp resources per Tx ring
+ * @tx_ring: Tx ring to free the Time Stamp ring for
+ */
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ ice_free_tstamp_ring(tx_ring);
+ kfree_rcu(tx_ring->tstamp_ring, rcu);
+ tx_ring->tstamp_ring = NULL;
+ tx_ring->flags &= ~ICE_TX_FLAGS_TXTIME;
+}
+
+/**
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
@@ -182,6 +233,9 @@ tx_skip_free:
/* cleanup Tx queue statistics */
netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ if (ice_is_txtime_cfg(tx_ring))
+ ice_free_tx_tstamp_ring(tx_ring);
}
/**
@@ -333,6 +387,84 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
}
/**
+ * ice_alloc_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to allocate the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_alloc_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring;
+
+ /* allocate with kzalloc(), free with kfree_rcu() */
+ tstamp_ring = kzalloc(sizeof(*tstamp_ring), GFP_KERNEL);
+ if (!tstamp_ring)
+ return -ENOMEM;
+
+ tstamp_ring->tx_ring = tx_ring;
+ tx_ring->tstamp_ring = tstamp_ring;
+ tstamp_ring->desc = NULL;
+ tstamp_ring->count = ice_calc_ts_ring_count(tx_ring);
+ tx_ring->flags |= ICE_TX_FLAGS_TXTIME;
+ return 0;
+}
+
+/**
+ * ice_setup_tstamp_ring - allocate the Time Stamp ring
+ * @tx_ring: Tx ring to set up the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+static int ice_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ struct device *dev = tx_ring->dev;
+ u32 size;
+
+ /* round up to nearest page */
+ size = ALIGN(tstamp_ring->count * sizeof(struct ice_ts_desc),
+ PAGE_SIZE);
+ tstamp_ring->desc = dmam_alloc_coherent(dev, size, &tstamp_ring->dma,
+ GFP_KERNEL);
+ if (!tstamp_ring->desc) {
+ dev_err(dev, "Unable to allocate memory for Time stamp Ring, size=%d\n",
+ size);
+ return -ENOMEM;
+ }
+
+ tstamp_ring->next_to_use = 0;
+ return 0;
+}
+
+/**
+ * ice_alloc_setup_tstamp_ring - Allocate and setup the Time Stamp ring
+ * @tx_ring: Tx ring to allocate and setup the Time Stamp ring for
+ *
+ * Return: 0 on success, negative on error
+ */
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int err;
+
+ err = ice_alloc_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to allocate Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ return err;
+ }
+
+ err = ice_setup_tstamp_ring(tx_ring);
+ if (err) {
+ dev_err(dev, "Unable to setup Time stamp ring for Tx ring %d\n",
+ tx_ring->q_index);
+ ice_free_tx_tstamp_ring(tx_ring);
+ return err;
+ }
+ return 0;
+}
+
+/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the Tx ring to set up
*
@@ -376,61 +508,67 @@ err:
return -ENOMEM;
}
+void ice_rxq_pp_destroy(struct ice_rx_ring *rq)
+{
+ struct libeth_fq fq = {
+ .fqes = rq->rx_fqes,
+ .pp = rq->pp,
+ };
+
+ libeth_rx_fq_destroy(&fq);
+ rq->rx_fqes = NULL;
+ rq->pp = NULL;
+
+ if (!rq->hdr_pp)
+ return;
+
+ fq.fqes = rq->hdr_fqes;
+ fq.pp = rq->hdr_pp;
+
+ libeth_rx_fq_destroy(&fq);
+ rq->hdr_fqes = NULL;
+ rq->hdr_pp = NULL;
+}
+
/**
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct xdp_buff *xdp = &rx_ring->xdp;
- struct device *dev = rx_ring->dev;
u32 size;
- u16 i;
-
- /* ring already cleared, nothing to do */
- if (!rx_ring->rx_buf)
- return;
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
- if (xdp->data) {
- xdp_return_buff(xdp);
- xdp->data = NULL;
- }
+ /* ring already cleared, nothing to do */
+ if (!rx_ring->rx_fqes)
+ return;
+
+ libeth_xdp_return_stash(&rx_ring->xdp);
/* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ libeth_rx_recycle_slow(rx_ring->rx_fqes[i].netmem);
- if (!rx_buf->page)
- continue;
+ if (rx_ring->hdr_pp)
+ libeth_rx_recycle_slow(rx_ring->hdr_fqes[i].netmem);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(dev, rx_buf->dma,
- rx_buf->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
-
- rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
-rx_skip_free:
- if (rx_ring->xsk_pool)
- memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf)));
- else
- memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf)));
+ if (rx_ring->vsi->type == ICE_VSI_PF &&
+ xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) {
+ xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ }
+ ice_rxq_pp_destroy(rx_ring);
+
+rx_skip_free:
/* Zero out the descriptor ring */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -438,7 +576,6 @@ rx_skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
- rx_ring->first_desc = 0;
rx_ring->next_to_use = 0;
}
@@ -450,26 +587,20 @@ rx_skip_free:
*/
void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
ice_clean_rx_ring(rx_ring);
- if (rx_ring->vsi->type == ICE_VSI_PF)
- if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
- } else {
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
}
if (rx_ring->desc) {
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
- dmam_free_coherent(rx_ring->dev, size,
- rx_ring->desc, rx_ring->dma);
+ dmam_free_coherent(dev, size, rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
}
@@ -482,19 +613,9 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
*/
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
+ struct device *dev = ice_pf_to_dev(rx_ring->vsi->back);
u32 size;
- if (!dev)
- return -ENOMEM;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_buf);
- rx_ring->rx_buf =
- kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- if (!rx_ring->rx_buf)
- return -ENOMEM;
-
/* round up to nearest page */
size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
PAGE_SIZE);
@@ -503,22 +624,16 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
size);
- goto err;
+ return -ENOMEM;
}
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
- rx_ring->first_desc = 0;
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
return 0;
-
-err:
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
- return -ENOMEM;
}
/**
@@ -527,15 +642,14 @@ err:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
-ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+static u32
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct libeth_xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -543,23 +657,23 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (!xdp_prog)
goto exit;
- ice_xdp_meta_set_desc(xdp, eop_desc);
+ xdp->desc = eop_desc;
- act = bpf_prog_run_xdp(xdp_prog, xdp);
+ act = bpf_prog_run_xdp(xdp_prog, &xdp->base);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
+ ret = __ice_xmit_xdp_ring(&xdp->base, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
if (ret == ICE_XDP_CONSUMED)
goto out_failure;
break;
case XDP_REDIRECT:
- if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
+ if (xdp_do_redirect(rx_ring->netdev, &xdp->base, xdp_prog))
goto out_failure;
ret = ICE_XDP_REDIR;
break;
@@ -571,10 +685,12 @@ out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
+ libeth_xdp_return_buff(xdp);
ret = ICE_XDP_CONSUMED;
}
+
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -661,50 +777,34 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
}
/**
- * ice_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buf struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
+ * ice_init_ctrl_rx_descs - Initialize Rx descriptors for control vsi.
+ * @rx_ring: ring to init descriptors on
+ * @count: number of descriptors to initialize
*/
-static bool
-ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count)
{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page))
- return true;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u32 ntu = rx_ring->next_to_use;
- /* alloc new page for storage */
- page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ if (!count)
+ return;
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ rx_desc = ICE_RX_DESC(rx_ring, ntu);
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, ice_rx_pg_order(rx_ring));
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- return false;
- }
+ do {
+ rx_desc++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ ntu = 0;
+ }
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = rx_ring->rx_offset;
- page_ref_add(page, USHRT_MAX - 1);
- bi->pagecnt_bias = USHRT_MAX;
+ rx_desc->wb.status_error0 = 0;
+ count--;
+ } while (count);
- return true;
+ if (rx_ring->next_to_use != ntu)
+ ice_release_rx_desc(rx_ring, ntu);
}
/**
@@ -722,41 +822,60 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
*/
bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
+ const struct libeth_fq_fp hdr_fq = {
+ .pp = rx_ring->hdr_pp,
+ .fqes = rx_ring->hdr_fqes,
+ .truesize = rx_ring->hdr_truesize,
+ .count = rx_ring->count,
+ };
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *bi;
/* do nothing if no valid netdev defined */
- if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
- !cleaned_count)
+ if (!rx_ring->netdev || !cleaned_count)
return false;
/* get the Rx descriptor and buffer based on next_to_use */
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_buf[ntu];
do {
- /* if we fail here, we have work remaining */
- if (!ice_alloc_mapped_page(rx_ring, bi))
- break;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ break;
+ }
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(addr);
+
+ if (!hdr_fq.pp)
+ goto next;
+
+ addr = libeth_rx_alloc(&hdr_fq, ntu);
+ if (addr == DMA_MAPPING_ERROR) {
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+
+ libeth_rx_recycle_slow(fq.fqes[ntu].netmem);
+ break;
+ }
+
+ rx_desc->read.hdr_addr = cpu_to_le64(addr);
+next:
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = ICE_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_buf;
ntu = 0;
}
@@ -773,334 +892,41 @@ bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
}
/**
- * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
- * @rx_buf: Rx buffer to adjust
- * @size: Size of adjustment
- *
- * Update the offset within page so that Rx buf will be ready to be reused.
- * For systems with PAGE_SIZE < 8192 this function will flip the page offset
- * so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by "size" bytes
- */
-static void
-ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
-{
-#if (PAGE_SIZE < 8192)
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= size;
-#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += size;
-#endif
-}
-
-/**
- * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
- * @rx_buf: buffer containing the page
- *
- * If page is reusable, we have a green light for calling ice_reuse_rx_page,
- * which will assign the current buffer to the buffer that next_to_alloc is
- * pointing to; otherwise, the DMA mapping needs to be destroyed and
- * page freed
- */
-static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
-{
- unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
- struct page *page = rx_buf->page;
-
- /* avoid re-using remote and pfmemalloc pages */
- if (!dev_page_is_reusable(page))
- return false;
-
- /* if we are only owner of page we can reuse it */
- if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
- return false;
-#if (PAGE_SIZE >= 8192)
-#define ICE_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
- if (rx_buf->page_offset > ICE_LAST_OFFSET)
- return false;
-#endif /* PAGE_SIZE >= 8192) */
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(pagecnt_bias == 1)) {
- page_ref_add(page, USHRT_MAX - 1);
- rx_buf->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
- * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp buff to place the data into
- * @rx_buf: buffer containing page to add
- * @size: packet length from rx_desc
- *
- * This function will add the data contained in rx_buf->page to the xdp buf.
- * It will just attach the page as a frag.
- */
-static int
-ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct ice_rx_buf *rx_buf, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(xdp)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(xdp);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
- rx_buf->page_offset, size);
- sinfo->xdp_frags_size += size;
- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
- * can pop off frags but driver has to handle it on its own
- */
- rx_ring->nr_frags = sinfo->nr_frags;
-
- if (page_is_pfmemalloc(rx_buf->page))
- xdp_buff_set_frag_pfmemalloc(xdp);
-
- return 0;
-}
-
-/**
- * ice_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: Rx descriptor ring to store buffers on
- * @old_buf: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- */
-static void
-ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
-{
- u16 nta = rx_ring->next_to_alloc;
- struct ice_rx_buf *new_buf;
-
- new_buf = &rx_ring->rx_buf[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* Transfer page from old buffer to new buffer.
- * Move each member individually to avoid possible store
- * forwarding stalls and unnecessary copy of skb.
- */
- new_buf->dma = old_buf->dma;
- new_buf->page = old_buf->page;
- new_buf->page_offset = old_buf->page_offset;
- new_buf->pagecnt_bias = old_buf->pagecnt_bias;
-}
-
-/**
- * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
- * @rx_ring: Rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- * @ntc: index of next to clean element
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- const unsigned int ntc)
-{
- struct ice_rx_buf *rx_buf;
-
- rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt = page_count(rx_buf->page);
- prefetchw(rx_buf->page);
-
- if (!size)
- return rx_buf;
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset, size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buf->pagecnt_bias--;
-
- return rx_buf;
-}
-
-/**
- * ice_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp_buff pointing to the data
- *
- * This function builds an skb around an existing XDP buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead. Driver has
- * already combined frags (if any) to skb_shared_info.
- */
-static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
- u8 metasize = xdp->data - xdp->data_meta;
- struct skb_shared_info *sinfo = NULL;
- unsigned int nr_frags;
- struct sk_buff *skb;
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
-
- /* Prefetch first cache line of first page. If xdp->data_meta
- * is unused, this points exactly as xdp->data, otherwise we
- * likely have a consumer accessing first few bytes of meta
- * data, and then actual data.
- */
- net_prefetch(xdp->data_meta);
- /* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
- if (unlikely(!skb))
- return NULL;
-
- /* must to record Rx queue, otherwise OS features such as
- * symmetric queue won't work
- */
- skb_record_rx_queue(skb, rx_ring->q_index);
-
- /* update pointers within the skb to store the data */
- skb_reserve(skb, xdp->data - xdp->data_hard_start);
- __skb_put(skb, xdp->data_end - xdp->data);
- if (metasize)
- skb_metadata_set(skb, metasize);
-
- if (unlikely(xdp_buff_has_frags(xdp)))
- xdp_update_skb_shared_info(skb, nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
-
- return skb;
-}
-
-/**
- * ice_construct_skb - Allocate skb and populate it
- * @rx_ring: Rx descriptor ring to transact packets on
- * @xdp: xdp_buff pointing to the data
+ * ice_clean_ctrl_rx_irq - Clean descriptors from flow director Rx ring
+ * @rx_ring: Rx descriptor ring for ctrl_vsi to transact packets on
*
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
+ * This function cleans Rx descriptors from the ctrl_vsi Rx ring used
+ * to set flow director rules on VFs.
*/
-static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring)
{
- unsigned int size = xdp->data_end - xdp->data;
- struct skb_shared_info *sinfo = NULL;
- struct ice_rx_buf *rx_buf;
- unsigned int nr_frags = 0;
- unsigned int headlen;
- struct sk_buff *skb;
-
- /* prefetch first cache line of first page */
- net_prefetch(xdp->data);
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
-
- /* allocate a skb to store the frags */
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE);
- if (unlikely(!skb))
- return NULL;
-
- rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
- skb_record_rx_queue(skb, rx_ring->q_index);
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > ICE_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
- sizeof(long)));
-
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (size) {
- /* besides adding here a partial frag, we are going to add
- * frags from xdp_buff, make sure there is enough space for
- * them
- */
- if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
- dev_kfree_skb(skb);
- return NULL;
- }
- skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size,
- xdp->frame_sz);
- } else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
- * need for adjusting page offset and we can reuse this buffer
- * as-is
- */
- rx_buf->act = ICE_SKB_CONSUMED;
- }
+ u32 ntc = rx_ring->next_to_clean;
+ unsigned int total_rx_pkts = 0;
+ u32 cnt = rx_ring->count;
- if (unlikely(xdp_buff_has_frags(xdp))) {
- struct skb_shared_info *skinfo = skb_shinfo(skb);
+ while (likely(total_rx_pkts < ICE_DFLT_IRQ_WORK)) {
+ struct ice_vsi *ctrl_vsi = rx_ring->vsi;
+ union ice_32b_rx_flex_desc *rx_desc;
+ u16 stat_err_bits;
- memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
- sizeof(skb_frag_t) * nr_frags);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
- xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
- sinfo->xdp_frags_size,
- nr_frags * xdp->frame_sz,
- xdp_buff_is_frag_pfmemalloc(xdp));
- }
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
+ if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
+ break;
- return skb;
-}
+ dma_rmb();
-/**
- * ice_put_rx_buf - Clean up used buffer and either recycle or free
- * @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buf. It will either
- * recycle the buffer or unmap it and free the associated resources.
- */
-static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
-{
- if (!rx_buf)
- return;
+ if (ctrl_vsi->vf)
+ ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (ice_can_reuse_rx_page(rx_buf)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
- ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
- ICE_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ if (++ntc == cnt)
+ ntc = 0;
+ total_rx_pkts++;
}
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
+ rx_ring->next_to_clean = ntc;
+ ice_init_ctrl_rx_descs(rx_ring, ICE_DESC_UNUSED(rx_ring));
}
/**
@@ -1115,20 +941,19 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
- unsigned int offset = rx_ring->rx_offset;
- struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ LIBETH_XDP_ONSTACK_BUFF(xdp);
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
+
+ libeth_xdp_init_buff(xdp, &rx_ring->xdp, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1139,19 +964,21 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_buf;
struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
u16 vlan_tci;
+ bool rxe;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
rx_desc = ICE_RX_DESC(rx_ring, ntc);
- /* status_error_len will always be zero for unused descriptors
- * because it's cleared in cleanup, and overlaps with hdr_addr
- * which is always zero because packet split isn't used, if the
- * hardware wrote DD then it will be non-zero
+ /*
+ * The DD bit will always be zero for unused descriptors
+ * because it's cleared in cleanup or when setting the DMA
+ * address of the header buffer, which never uses the DD bit.
+ * If the hardware wrote the descriptor, it will be non-zero.
*/
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
@@ -1164,85 +991,66 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
dma_rmb();
ice_trace(clean_rx_irq, rx_ring, rx_desc);
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
- struct ice_vsi *ctrl_vsi = rx_ring->vsi;
-
- if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
- ctrl_vsi->vf)
- ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- if (++ntc == cnt)
- ntc = 0;
- rx_ring->first_desc = ntc;
- continue;
- }
+ stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_HBO_S) |
+ BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
+ rxe = ice_test_staterr(rx_desc->wb.status_error0,
+ stat_err_bits);
+
+ if (!rx_ring->hdr_pp)
+ goto payload;
+
+ size = le16_get_bits(rx_desc->wb.hdr_len_sph_flex_flags1,
+ ICE_RX_FLEX_DESC_HDR_LEN_M);
+ if (unlikely(rxe))
+ size = 0;
+
+ rx_buf = &rx_ring->hdr_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
+ rx_buf->netmem = 0;
+
+payload:
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
+ if (unlikely(rxe))
+ size = 0;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
-
- if (!xdp->data) {
- void *hard_start;
+ rx_buf = &rx_ring->rx_fqes[ntc];
+ libeth_xdp_process_buff(xdp, rx_buf, size);
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
- xdp_buff_clear_frags_flag(xdp);
- } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
- break;
- }
if (++ntc == cnt)
ntc = 0;
/* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
+ if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!xdp->data))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
- total_rx_bytes += xdp_get_buff_len(xdp);
+
+ if (xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR))
+ xdp_xmit |= xdp_verdict;
+ total_rx_bytes += xdp_get_buff_len(&xdp->base);
total_rx_pkts++;
xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
continue;
+
construct_skb:
- if (likely(ice_ring_uses_build_skb(rx_ring)))
- skb = ice_build_skb(rx_ring, xdp);
- else
- skb = ice_construct_skb(rx_ring, xdp);
- /* exit if we failed to retrieve a buffer */
- if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
- }
+ skb = xdp_build_skb_from_buff(&xdp->base);
xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
- if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
- stat_err_bits))) {
- dev_kfree_skb_any(skb);
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ libeth_xdp_return_buff_slow(xdp);
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
continue;
}
vlan_tci = ice_get_vlan_tci(rx_desc);
- /* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb))
- continue;
-
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1257,30 +1065,15 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_DESC_UNUSED(rx_ring));
if (xdp_xmit)
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
+ libeth_xdp_save_buff(&rx_ring->xdp, xdp);
+
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
total_rx_bytes);
@@ -1722,10 +1515,46 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
/* notify HW of packet */
kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount,
netdev_xmit_more());
- if (kick)
- /* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!kick)
+ return;
+ if (ice_is_txtime_cfg(tx_ring)) {
+ struct ice_tstamp_ring *tstamp_ring = tx_ring->tstamp_ring;
+ u32 tstamp_count = tstamp_ring->count;
+ u32 j = tstamp_ring->next_to_use;
+ struct ice_ts_desc *ts_desc;
+ struct timespec64 ts;
+ u32 tstamp;
+
+ ts = ktime_to_timespec64(first->skb->tstamp);
+ tstamp = ts.tv_nsec >> ICE_TXTIME_CTX_RESOLUTION_128NS;
+
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp = ice_build_tstamp_desc(i, tstamp);
+
+ j++;
+ if (j == tstamp_count) {
+ u32 fetch = tstamp_count - tx_ring->count;
+
+ j = 0;
+
+ /* To prevent an MDD, when wrapping the tstamp ring
+ * create additional TS descriptors equal to the number
+ * of the fetch TS descriptors value. HW will merge the
+ * TS descriptors with the same timestamp value into a
+ * single descriptor.
+ */
+ for (; j < fetch; j++) {
+ ts_desc = ICE_TS_DESC(tstamp_ring, j);
+ ts_desc->tx_desc_idx_tstamp =
+ ice_build_tstamp_desc(i, tstamp);
+ }
+ }
+ tstamp_ring->next_to_use = j;
+ writel_relaxed(j, tstamp_ring->tail);
+ } else {
+ writel_relaxed(i, tx_ring->tail);
+ }
return;
dma_error:
@@ -1753,6 +1582,7 @@ dma_error:
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
+ const struct ice_tx_ring *tx_ring = off->tx_ring;
u32 l4_len = 0, l3_len = 0, l2_len = 0;
struct sk_buff *skb = first->skb;
union {
@@ -1902,6 +1732,30 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
l3_len = l4.hdr - ip.hdr;
offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;
+ if ((tx_ring->netdev->features & NETIF_F_HW_CSUM) &&
+ !(first->tx_flags & ICE_TX_FLAGS_TSO) &&
+ !skb_csum_is_sctp(skb)) {
+ /* Set GCS */
+ u16 csum_start = (skb->csum_start - skb->mac_header) / 2;
+ u16 csum_offset = skb->csum_offset / 2;
+ u16 gcs_params;
+
+ gcs_params = FIELD_PREP(ICE_TX_GCS_DESC_START_M, csum_start) |
+ FIELD_PREP(ICE_TX_GCS_DESC_OFFSET_M, csum_offset) |
+ FIELD_PREP(ICE_TX_GCS_DESC_TYPE_M,
+ ICE_TX_GCS_DESC_CSUM_PSH);
+
+ /* Unlike legacy HW checksums, GCS requires a context
+ * descriptor.
+ */
+ off->cd_qw1 |= ICE_TX_DESC_DTYPE_CTX;
+ off->cd_gcs_params = gcs_params;
+ /* Fill out CSO info in data descriptors */
+ off->td_offset |= offset;
+ off->td_cmd |= cmd;
+ return 1;
+ }
+
/* Enable L4 checksum offloads */
switch (l4_proto) {
case IPPROTO_TCP:
@@ -2359,17 +2213,20 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
eth = (struct ethhdr *)skb_mac_header(skb);
- if (unlikely((skb->priority == TC_PRIO_CONTROL ||
- eth->h_proto == htons(ETH_P_LLDP)) &&
- vsi->type == ICE_VSI_PF &&
- vsi->port_info->qos_cfg.is_sw_lldp))
+
+ if ((ice_is_switchdev_running(vsi->back) ||
+ ice_lag_is_switchdev_running(vsi->back)) &&
+ vsi->type != ICE_VSI_SF)
+ ice_eswitch_set_target_vsi(skb, &offload);
+ else if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
+ vsi->type == ICE_VSI_PF &&
+ vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
- if (ice_is_switchdev_running(vsi->back) && vsi->type != ICE_VSI_SF)
- ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2383,7 +2240,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* setup context descriptor */
cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
- cdesc->rsvd = cpu_to_le16(0);
+ cdesc->gcs = cpu_to_le16(offload.cd_gcs_params);
cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cb347c852ba9..e440c55d9e9f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -4,6 +4,8 @@
#ifndef _ICE_TXRX_H_
#define _ICE_TXRX_H_
+#include <net/libeth/types.h>
+
#include "ice_type.h"
#define ICE_DFLT_IRQ_WORK 256
@@ -27,72 +29,6 @@
#define ICE_MAX_TXQ_PER_TXQG 128
-/* Attempt to maximize the headroom available for incoming frames. We use a 2K
- * buffer for MTUs <= 1500 and need 1536/1534 to store the data for the frame.
- * This leaves us with 512 bytes of room. From that we need to deduct the
- * space needed for the shared info and the padding needed to IP align the
- * frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define ICE_2K_TOO_SMALL_WITH_PADDING \
- ((unsigned int)(NET_SKB_PAD + ICE_RXBUF_1536) > \
- SKB_WITH_OVERHEAD(ICE_RXBUF_2048))
-
-/**
- * ice_compute_pad - compute the padding
- * @rx_buf_len: buffer length
- *
- * Figure out the size of half page based on given buffer length and
- * then subtract the skb_shared_info followed by subtraction of the
- * actual buffer length; this in turn results in the actual space that
- * is left for padding usage
- */
-static inline int ice_compute_pad(int rx_buf_len)
-{
- int half_page_size;
-
- half_page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- return SKB_WITH_OVERHEAD(half_page_size) - rx_buf_len;
-}
-
-/**
- * ice_skb_pad - determine the padding that we can supply
- *
- * Figure out the right Rx buffer size and based on that calculate the
- * padding
- */
-static inline int ice_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (ICE_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = ICE_RXBUF_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = ICE_RXBUF_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return ice_compute_pad(rx_buf_len);
-}
-
-#define ICE_SKB_PAD ice_skb_pad()
-#else
-#define ICE_2K_TOO_SMALL_WITH_PADDING false
-#define ICE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
/* We are assuming that the cache line is always 64 Bytes here for ice.
* In order to make sure that is a correct assumption there is a check in probe
* to print a warning if the read from GLPCI_CNF2 tells us that the cache line
@@ -112,10 +48,6 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
-#define ICE_RX_DESC_UNUSED(R) \
- ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->first_desc - (R)->next_to_use - 1)
-
#define ICE_RING_QUARTER(R) ((R)->count >> 2)
#define ICE_TX_FLAGS_TSO BIT(0)
@@ -193,18 +125,10 @@ struct ice_tx_offload_params {
u32 td_l2tag1;
u32 cd_tunnel_params;
u16 cd_l2tag2;
+ u16 cd_gcs_params;
u8 header_len;
};
-struct ice_rx_buf {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- unsigned int pgcnt;
- unsigned int act;
- unsigned int pagecnt_bias;
-};
-
struct ice_q_stats {
u64 pkts;
u64 bytes;
@@ -262,15 +186,6 @@ struct ice_pkt_ctx {
__be16 vlan_proto;
};
-struct ice_xdp_buff {
- struct xdp_buff xdp_buff;
- const union ice_32b_rx_flex_desc *eop_desc;
- const struct ice_pkt_ctx *pkt_ctx;
-};
-
-/* Required for compatibility with xdp_buffs from xsk_pool */
-static_assert(offsetof(struct ice_xdp_buff, xdp_buff) == 0);
-
/* indices into GLINT_ITR registers */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -310,10 +225,20 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
+struct ice_tstamp_ring {
+ struct ice_tx_ring *tx_ring; /* Backreference to associated Tx ring */
+ dma_addr_t dma; /* physical address of ring */
+ struct rcu_head rcu; /* to avoid race on free */
+ u8 __iomem *tail;
+ void *desc;
+ u16 next_to_use;
+ u16 count;
+} ____cacheline_internodealigned_in_smp;
+
struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
void *desc; /* Descriptor ring memory */
- struct device *dev; /* Used for DMA mapping */
+ struct page_pool *pp;
struct net_device *netdev; /* netdev ring maps to */
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
@@ -325,14 +250,19 @@ struct ice_rx_ring {
u16 next_to_alloc;
union {
- struct ice_rx_buf *rx_buf;
+ struct libeth_fqe *rx_fqes;
struct xdp_buff **xdp_buf;
};
+
/* CL2 - 2nd cacheline starts here */
+ struct libeth_fqe *hdr_fqes;
+ struct page_pool *hdr_pp;
+
union {
- struct ice_xdp_buff xdp_ext;
- struct xdp_buff xdp;
+ struct libeth_xdp_buff_stash xdp;
+ struct libeth_xdp_buff *xsk;
};
+
/* CL3 - 3rd cacheline starts here */
union {
struct ice_pkt_ctx pkt_ctx;
@@ -342,12 +272,13 @@ struct ice_rx_ring {
};
};
struct bpf_prog *xdp_prog;
- u16 rx_offset;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u16 first_desc;
+
+ u32 hdr_truesize;
+ u32 truesize;
/* stats structs */
struct ice_ring_stats *ring_stats;
@@ -358,15 +289,14 @@ struct ice_rx_ring {
struct ice_tx_ring *xdp_ring;
struct ice_rx_ring *next; /* pointer to next ring in q_vector */
struct xsk_buff_pool *xsk_pool;
- u32 nr_frags;
- u16 max_frame;
+ u16 rx_hdr_len;
u16 rx_buf_len;
dma_addr_t dma; /* physical address of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
#define ICE_RX_FLAGS_MULTIDEV BIT(3)
+#define ICE_RX_FLAGS_RING_GCS BIT(4)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
@@ -402,29 +332,16 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
+ struct ice_tstamp_ring *tstamp_ring;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
+#define ICE_TX_FLAGS_TXTIME BIT(3)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
u16 quanta_prof_id;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
-{
- return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
-}
-
-static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
- ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
-static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
-{
- ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
-}
-
static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
{
return !!ring->ch;
@@ -479,17 +396,13 @@ struct ice_coalesce_stored {
static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
return 0;
}
-#define ice_rx_pg_size(_ring) (PAGE_SIZE << ice_rx_pg_order(_ring))
-
union ice_32b_rx_flex_desc;
+void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 num_descs);
+void ice_rxq_pp_destroy(struct ice_rx_ring *rq);
bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
@@ -499,12 +412,15 @@ void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_alloc_setup_tstamp_ring(struct ice_tx_ring *tx_ring);
void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
+void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring);
+void ice_free_tx_tstamp_ring(struct ice_tx_ring *tx_ring);
+void ice_free_tstamp_ring(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 2719f0e20933..956da38d63b0 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -3,6 +3,7 @@
#include <linux/filter.h>
#include <linux/net/intel/libie/rx.h>
+#include <net/libeth/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
@@ -81,6 +82,23 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
}
/**
+ * ice_rx_gcs - Set generic checksum in skb
+ * @skb: skb currently being received and modified
+ * @rx_desc: receive descriptor
+ */
+static void ice_rx_gcs(struct sk_buff *skb,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ const struct ice_32b_rx_flex_desc_nic *desc;
+ u16 csum;
+
+ desc = (struct ice_32b_rx_flex_desc_nic *)rx_desc;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ csum = (__force u16)desc->raw_csum;
+ skb->csum = csum_unfold((__force __sum16)swab16(csum));
+}
+
+/**
* ice_rx_csum - Indicate in skb if checksum is good
* @ring: the ring we care about
* @skb: skb currently being received and modified
@@ -107,6 +125,15 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+ if ((ring->flags & ICE_RX_FLAGS_RING_GCS) &&
+ rx_desc->wb.rxdid == ICE_RXDID_FLEX_NIC &&
+ (decoded.inner_prot == LIBETH_RX_PT_INNER_TCP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_UDP ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_ICMP)) {
+ ice_rx_gcs(skb, rx_desc);
+ return;
+ }
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
@@ -204,9 +231,12 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
if (ice_is_port_repr_netdev(netdev))
ice_repr_inc_rx_stats(netdev, skb->len);
+
+ /* __skb_push() is needed because xdp_build_skb_from_buff()
+ * calls eth_type_trans()
+ */
+ __skb_push(skb, ETH_HLEN);
skb->protocol = eth_type_trans(skb, netdev);
- } else {
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -244,19 +274,18 @@ static void
ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
struct xdp_frame_bulk *bq)
{
- dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
-
switch (tx_buf->type) {
case ICE_TX_BUF_XDP_TX:
- page_frag_free(tx_buf->raw_buf);
+ libeth_xdp_return_va(tx_buf->raw_buf, true);
break;
case ICE_TX_BUF_XDP_XMIT:
+ dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
xdp_return_frame_bulk(tx_buf->xdpf, bq);
break;
}
+ dma_unmap_len_set(tx_buf, len, 0);
tx_buf->type = ICE_TX_BUF_EMPTY;
}
@@ -351,9 +380,11 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
struct ice_tx_buf *tx_buf;
u32 cnt = xdp_ring->count;
void *data = xdp->data;
+ struct page *page;
u32 nr_frags = 0;
u32 free_space;
u32 frag = 0;
+ u32 offset;
free_space = ICE_DESC_UNUSED(xdp_ring);
if (free_space < ICE_RING_QUARTER(xdp_ring))
@@ -373,24 +404,28 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
tx_head = &xdp_ring->tx_buf[ntu];
tx_buf = tx_head;
+ page = virt_to_page(data);
+ offset = offset_in_page(xdp->data);
+
for (;;) {
dma_addr_t dma;
- dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma))
- goto dma_unmap;
-
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, size);
- dma_unmap_addr_set(tx_buf, dma, dma);
-
if (frame) {
+ dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_unmap;
tx_buf->type = ICE_TX_BUF_FRAG;
} else {
+ dma = page_pool_get_dma_addr(page) + offset;
+ dma_sync_single_for_device(dev, dma, size, DMA_BIDIRECTIONAL);
tx_buf->type = ICE_TX_BUF_XDP_TX;
tx_buf->raw_buf = data;
}
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
+
tx_desc->buf_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
@@ -404,6 +439,8 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
tx_buf = &xdp_ring->tx_buf[ntu];
+ page = skb_frag_page(&sinfo->frags[frag]);
+ offset = skb_frag_off(&sinfo->frags[frag]);
data = skb_frag_address(&sinfo->frags[frag]);
size = skb_frag_size(&sinfo->frags[frag]);
frag++;
@@ -488,10 +525,13 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
*/
static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
+ struct ice_rx_ring *rx_ring;
+
+ rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq);
- *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->eop_desc,
- xdp_ext->pkt_ctx);
+ *ts_ns = ice_ptp_get_rx_hwts(xdp_ext->desc,
+ &rx_ring->pkt_ctx);
if (!*ts_ns)
return -ENODATA;
@@ -519,10 +559,10 @@ ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
enum xdp_rss_hash_type *rss_type)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
- *hash = ice_get_rx_hash(xdp_ext->eop_desc);
- *rss_type = ice_xdp_rx_hash_type(xdp_ext->eop_desc);
+ *hash = ice_get_rx_hash(xdp_ext->desc);
+ *rss_type = ice_xdp_rx_hash_type(xdp_ext->desc);
if (!likely(*hash))
return -ENODATA;
@@ -541,13 +581,16 @@ static int ice_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
static int ice_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
u16 *vlan_tci)
{
- const struct ice_xdp_buff *xdp_ext = (void *)ctx;
+ const struct libeth_xdp_buff *xdp_ext = (void *)ctx;
+ struct ice_rx_ring *rx_ring;
+
+ rx_ring = libeth_xdp_buff_to_rq(xdp_ext, typeof(*rx_ring), xdp_rxq);
- *vlan_proto = xdp_ext->pkt_ctx->vlan_proto;
+ *vlan_proto = rx_ring->pkt_ctx.vlan_proto;
if (!*vlan_proto)
return -ENODATA;
- *vlan_tci = ice_get_vlan_tci(xdp_ext->eop_desc);
+ *vlan_tci = ice_get_vlan_tci(xdp_ext->desc);
if (!*vlan_tci)
return -ENODATA;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 79f960c6680d..6a3f10f7a53f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
@@ -97,6 +54,20 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
+ * ice_build_tstamp_desc - build Tx time stamp descriptor
+ * @tx_desc: Tx LAN descriptor index
+ * @tstamp: time stamp
+ *
+ * Return: Tx time stamp descriptor
+ */
+static inline __le32
+ice_build_tstamp_desc(u16 tx_desc, u32 tstamp)
+{
+ return cpu_to_le32(FIELD_PREP(ICE_TXTIME_TX_DESC_IDX_M, tx_desc) |
+ FIELD_PREP(ICE_TXTIME_STAMP_M, tstamp));
+}
+
+/**
* ice_get_vlan_tci - get VLAN TCI from Rx flex descriptor
* @rx_desc: Rx 32b flex descriptor with RXDID=2
*
@@ -164,13 +135,4 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
void
ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tci);
-static inline void
-ice_xdp_meta_set_desc(struct xdp_buff *xdp,
- union ice_32b_rx_flex_desc *eop_desc)
-{
- struct ice_xdp_buff *xdp_ext = container_of(xdp, struct ice_xdp_buff,
- xdp_buff);
-
- xdp_ext->eop_desc = eop_desc;
-}
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index adb168860711..6a2ec8389a8f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -17,7 +17,9 @@
#include "ice_protocol_type.h"
#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
-#include "ice_fwlog.h"
+#include <linux/net/intel/libie/fwlog.h>
+#include <linux/wait.h>
+#include <net/dscp.h>
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -291,8 +293,10 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
- u8 roce_lag;
- u8 sriov_lag;
+
+ bool roce_lag;
+ bool sriov_lag;
+ bool sriov_aa_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -324,17 +328,17 @@ struct ice_hw_common_caps {
#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
/* TIME_REF clock rate specification */
-enum ice_time_ref_freq {
- ICE_TIME_REF_FREQ_25_000 = 0,
- ICE_TIME_REF_FREQ_122_880 = 1,
- ICE_TIME_REF_FREQ_125_000 = 2,
- ICE_TIME_REF_FREQ_153_600 = 3,
- ICE_TIME_REF_FREQ_156_250 = 4,
- ICE_TIME_REF_FREQ_245_760 = 5,
+enum ice_tspll_freq {
+ ICE_TSPLL_FREQ_25_000 = 0,
+ ICE_TSPLL_FREQ_122_880 = 1,
+ ICE_TSPLL_FREQ_125_000 = 2,
+ ICE_TSPLL_FREQ_153_600 = 3,
+ ICE_TSPLL_FREQ_156_250 = 4,
+ ICE_TSPLL_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ,
+ NUM_ICE_TSPLL_FREQ,
- ICE_TIME_REF_FREQ_INVALID = -1,
+ ICE_TSPLL_FREQ_INVALID = -1,
};
/* Clock source specification */
@@ -347,7 +351,7 @@ enum ice_clk_src {
struct ice_ts_func_info {
/* Function specific info */
- enum ice_time_ref_freq time_ref;
+ enum ice_tspll_freq time_ref;
u8 clk_freq;
u8 clk_src;
u8 tmr_index_assoc;
@@ -368,6 +372,7 @@ struct ice_ts_func_info {
#define ICE_TS_TMR1_ENA_M BIT(26)
#define ICE_TS_LL_TX_TS_READ_M BIT(28)
#define ICE_TS_LL_TX_TS_INT_READ_M BIT(29)
+#define ICE_TS_LL_PHY_TMR_UPDATE_M BIT(30)
struct ice_ts_dev_info {
/* Device specific info */
@@ -382,6 +387,7 @@ struct ice_ts_dev_info {
u8 tmr1_ena;
u8 ts_ll_read;
u8 ts_ll_int_read;
+ u8 ll_phy_tmr_update;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@@ -692,7 +698,6 @@ struct ice_dcb_app_priority_table {
#define ICE_MAX_USER_PRIORITY 8
#define ICE_DCBX_MAX_APPS 64
-#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -715,9 +720,9 @@ struct ice_dcbx_cfg {
u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
/* when DSCP mapping defined by user set its bit to 1 */
- DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ DECLARE_BITMAP(dscp_mapped, DSCP_MAX);
/* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
- u8 dscp_map[ICE_DSCP_NUM_VAL];
+ u8 dscp_map[DSCP_MAX];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -848,26 +853,26 @@ struct ice_mbx_data {
#define ICE_PORTS_PER_QUAD 4
#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+#define ATQBAL_FLAGS_INTR_IN_PROGRESS BIT(0)
+
+struct ice_e810_params {
+ /* The wait queue lock also protects the low latency interface */
+ wait_queue_head_t atqbal_wq;
+ unsigned int atqbal_flags;
+};
+
struct ice_eth56g_params {
u8 num_phys;
- u8 phy_addr[2];
bool onestep_ena;
bool sfd_ena;
u32 peer_delay;
};
union ice_phy_params {
+ struct ice_e810_params e810;
struct ice_eth56g_params eth56g;
};
-/* PHY model */
-enum ice_phy_model {
- ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
- ICE_PHY_E82X,
- ICE_PHY_ETH56G,
-};
-
/* Global Link Topology */
enum ice_global_link_topo {
ICE_LINK_TOPO_UP_TO_2_LINKS,
@@ -877,11 +882,9 @@ enum ice_global_link_topo {
};
struct ice_ptp_hw {
- enum ice_phy_model phy_model;
union ice_phy_params phy;
u8 num_lports;
u8 ports_per_phy;
- bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -945,9 +948,7 @@ struct ice_hw {
u8 fw_patch; /* firmware patch version */
u32 fw_build; /* firmware build number */
- struct ice_fwlog_cfg fwlog_cfg;
- bool fwlog_supported; /* does hardware support FW logging? */
- struct ice_fwlog_ring fwlog_ring;
+ struct libie_fwlog fwlog;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/INTRL granularity during
@@ -969,6 +970,7 @@ struct ice_hw {
u8 intrl_gran;
struct ice_ptp_hw ptp;
+ s8 lane_num;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
@@ -1061,6 +1063,7 @@ struct ice_hw_port_stats {
u64 error_bytes; /* errbc */
u64 mac_local_faults; /* mlfc */
u64 mac_remote_faults; /* mrfc */
+ u64 rx_len_errors; /* rlec */
u64 link_xon_rx; /* lxonrxc */
u64 link_xoff_rx; /* lxoffrxc */
u64 link_xon_tx; /* lxontxc */
@@ -1216,4 +1219,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
+/* AQ API version for Health Status support */
+#define ICE_FW_API_HEALTH_REPORT_MAJ 1
+#define ICE_FW_API_HEALTH_REPORT_MIN 7
+#define ICE_FW_API_HEALTH_REPORT_PATCH 6
+
#endif /* _ICE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c7c0c2f50c26..de9e81ccee66 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -5,7 +5,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "virt/allowlist.h"
/* Public functions which may be accessed by all driver files */
@@ -226,6 +226,7 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
vsi->num_vlan = 0;
vf->num_mac = 0;
+ vf->num_mac_lldp = 0;
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
}
@@ -858,16 +859,13 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
+ u8 act_prt;
bool rsd;
dev = ice_pf_to_dev(pf);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -883,16 +881,8 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
else
lockdep_assert_held(&vf->cfg_lock);
- lag = pf->lag;
mutex_lock(&pf->lag_mutex);
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
if (ice_is_vf_disabled(vf)) {
vsi = ice_get_vf_vsi(vf);
@@ -978,9 +968,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_reset_vf_mbx_cnt(vf);
out_unlock:
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
mutex_unlock(&pf->lag_mutex);
if (flags & ICE_VF_RESET_LOCK)
@@ -1021,6 +1009,9 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
vf->num_msix = vfs->num_msix_per;
vf->num_vf_qs = vfs->num_qps_per;
+ /* set default RSS hash configuration */
+ vf->rss_hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
@@ -1036,6 +1027,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
@@ -1393,3 +1392,28 @@ struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi)
rcu_read_unlock();
return ctrl_vsi;
}
+
+/**
+ * ice_vf_update_mac_lldp_num - update the VF's number of LLDP addresses
+ * @vf: a VF to add the address to
+ * @vsi: the corresponding VSI
+ * @incr: is the rule added or removed
+ */
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr)
+{
+ bool lldp_by_fw = test_bit(ICE_FLAG_FW_LLDP_AGENT, vsi->back->flags);
+ bool was_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+ bool is_ena;
+
+ if (WARN_ON(!vsi)) {
+ vf->num_mac_lldp = 0;
+ return;
+ }
+
+ vf->num_mac_lldp += incr ? 1 : -1;
+ is_ena = ice_vf_is_lldp_ena(vf) && !lldp_by_fw;
+
+ if (was_ena != is_ena)
+ ice_vsi_cfg_sw_lldp(vsi, false, is_ena);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 4261fe1c2bcd..7a9c75d1d07c 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -13,7 +13,7 @@
#include <linux/avf/virtchnl.h>
#include "ice_type.h"
#include "ice_flow.h"
-#include "ice_virtchnl_fdir.h"
+#include "virt/fdir.h"
#include "ice_vsi_vlan_ops.h"
#define ICE_MAX_SRIOV_VFS 256
@@ -53,6 +53,46 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+enum ice_hash_ip_ctx_type {
+ ICE_HASH_IP_CTX_IP = 0,
+ ICE_HASH_IP_CTX_IP_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP,
+ ICE_HASH_IP_CTX_IP_AH,
+ ICE_HASH_IP_CTX_IP_PFCP,
+ ICE_HASH_IP_CTX_IP_UDP,
+ ICE_HASH_IP_CTX_IP_TCP,
+ ICE_HASH_IP_CTX_IP_SCTP,
+ ICE_HASH_IP_CTX_MAX,
+};
+
+struct ice_vf_hash_ip_ctx {
+ struct ice_rss_hash_cfg ctx[ICE_HASH_IP_CTX_MAX];
+};
+
+enum ice_hash_gtpu_ctx_type {
+ ICE_HASH_GTPU_CTX_EH_IP = 0,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ ICE_HASH_GTPU_CTX_MAX,
+};
+
+struct ice_vf_hash_gtpu_ctx {
+ struct ice_rss_hash_cfg ctx[ICE_HASH_GTPU_CTX_MAX];
+};
+
+struct ice_vf_hash_ctx {
+ struct ice_vf_hash_ip_ctx v4;
+ struct ice_vf_hash_ip_ctx v6;
+ struct ice_vf_hash_gtpu_ctx ipv4;
+ struct ice_vf_hash_gtpu_ctx ipv6;
+};
+
/* Structure to store fdir fv entry */
struct ice_fdir_prof_info {
struct ice_parser_profile prof;
@@ -66,6 +106,12 @@ struct ice_vf_qs_bw {
u8 tc;
};
+/* Structure to store RSS field vector entry */
+struct ice_rss_prof_info {
+ struct ice_parser_profile prof;
+ bool symm;
+};
+
/* VF operations */
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
@@ -106,8 +152,9 @@ struct ice_vf {
u16 ctrl_vsi_idx;
struct ice_vf_fdir fdir;
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
- /* first vector index of this VF in the PF space */
- int first_vector_idx;
+ struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
+ struct ice_vf_hash_ctx hash_ctx;
+ u64 rss_hashcfg; /* RSS hash configuration */
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
@@ -124,13 +171,22 @@ struct ice_vf {
u8 spoofchk:1;
u8 link_forced:1;
u8 link_up:1; /* only valid if VF link is forced */
+ u8 lldp_tx_ena:1;
+
+ u16 num_msix; /* num of MSI-X configured on this VF */
+
+ u32 ptp_caps;
+
unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
+ /* first vector index of this VF in the PF space */
+ int first_vector_idx;
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
+ u16 num_mac_lldp;
u16 num_vf_qs; /* num of queue configured per VF */
u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
@@ -146,7 +202,9 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
- u16 num_msix; /* num of MSI-X configured on this VF */
+ u16 lldp_recipe_id;
+ u16 lldp_rule_id;
+
struct ice_vf_qs_bw qs_bw[ICE_MAX_RSS_QS_PER_VF];
};
@@ -177,6 +235,11 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
return vf->port_vlan_info.tpid;
}
+static inline bool ice_vf_is_lldp_ena(struct ice_vf *vf)
+{
+ return vf->num_mac_lldp && vf->trusted;
+}
+
/* VF Hash Table access functions
*
* These functions provide abstraction for interacting with the VF hash table.
@@ -224,6 +287,18 @@ static inline u16 ice_vf_get_port_vlan_tpid(struct ice_vf *vf)
#ifdef CONFIG_PCI_IOV
struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id);
+
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ int vf_id = pci_iov_vf_id(vf_dev);
+
+ if (vf_id < 0)
+ return NULL;
+
+ return ice_get_vf_by_id(pf, pci_iov_vf_id(vf_dev));
+}
+
void ice_put_vf(struct ice_vf *vf);
bool ice_has_vfs(struct ice_pf *pf);
u16 ice_get_num_vfs(struct ice_pf *pf);
@@ -242,12 +317,20 @@ ice_vf_clear_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m);
int ice_reset_vf(struct ice_vf *vf, u32 flags);
void ice_reset_all_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_ctrl_vsi(struct ice_pf *pf, struct ice_vsi *vsi);
+void ice_vf_update_mac_lldp_num(struct ice_vf *vf, struct ice_vsi *vsi,
+ bool incr);
#else /* CONFIG_PCI_IOV */
static inline struct ice_vf *ice_get_vf_by_id(struct ice_pf *pf, u16 vf_id)
{
return NULL;
}
+static inline struct ice_vf *ice_get_vf_by_dev(struct ice_pf *pf,
+ struct pci_dev *vf_dev)
+{
+ return NULL;
+}
+
static inline void ice_put_vf(struct ice_vf *vf)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -24,6 +24,7 @@
#endif
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index 75c8113e58ee..7798a5d4bc9d 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -23,18 +23,18 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
u8 *msg, u16 msglen, struct ice_sq_cd *cd)
{
struct ice_aqc_pf_vf_msg *cmd;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
- cmd = &desc.params.virt;
+ cmd = libie_aq_raw(&desc);
cmd->id = cpu_to_le32(vfid);
desc.cookie_high = cpu_to_le32(v_opcode);
desc.cookie_low = cpu_to_le32(v_retval);
if (msglen)
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
index 1279c1ffe31c..fb526cb84776 100644
--- a/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
+++ b/drivers/net/ethernet/intel/ice/ice_vlan_mode.c
@@ -63,7 +63,7 @@ static int
ice_aq_get_vlan_mode(struct ice_hw *hw,
struct ice_aqc_get_vlan_mode *get_params)
{
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!get_params)
return -EINVAL;
@@ -275,7 +275,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
struct ice_aqc_set_vlan_mode *set_params)
{
u8 rdma_packet, mng_vlan_prot_id;
- struct ice_aq_desc desc;
+ struct libie_aq_desc desc;
if (!set_params)
return -EINVAL;
@@ -295,7 +295,7 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
ice_fill_dflt_direct_cmd_desc(&desc,
ice_aqc_opc_set_vlan_mode_parameters);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags |= cpu_to_le16(LIBIE_AQ_FLAG_RD);
return ice_aq_send_cmd(hw, &desc, set_params, sizeof(*set_params),
NULL);
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 5291f2888ef8..ada78f83b3ac 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -113,7 +113,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -169,7 +169,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %s\n",
- ena, err, ice_aq_str(hw->adminq.sq_last_status));
+ ena, err, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -258,7 +258,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_info(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
goto out;
}
@@ -306,7 +306,7 @@ int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (ret)
dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
- ret, ice_aq_str(hw->adminq.sq_last_status));
+ ret, libie_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return ret;
@@ -353,7 +353,7 @@ static int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
if (status) {
netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %s\n",
ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
- ice_aq_str(pf->hw.adminq.sq_last_status));
+ libie_aq_str(pf->hw.adminq.sq_last_status));
goto err_out;
}
@@ -497,7 +497,7 @@ int ice_vsi_ena_outer_stripping(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -544,7 +544,7 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN stripping failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -604,7 +604,7 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for enabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -654,7 +654,7 @@ int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for disabling outer VLAN insertion failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
else
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -720,7 +720,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for setting outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan = ctxt->info.port_based_outer_vlan;
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
@@ -782,7 +782,7 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err)
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
kfree(ctxt);
return err;
@@ -830,7 +830,7 @@ int ice_vsi_clear_port_vlan(struct ice_vsi *vsi)
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
+ err, libie_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan =
ctxt->info.port_based_outer_vlan;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 334ae945d640..989ff1fd9110 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/unroll.h>
+#include <net/libeth/xdp.h>
#include <net/xdp_sock_drv.h>
#include <net/xdp.h>
#include "ice.h"
@@ -18,52 +20,12 @@ static struct xdp_buff **ice_xdp_buf(struct ice_rx_ring *rx_ring, u32 idx)
}
/**
- * ice_qp_reset_stats - Resets all stats for rings of given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf;
-
- pf = vsi->back;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- memset(&vsi_stat->rx_ring_stats[q_idx]->rx_stats, 0,
- sizeof(vsi_stat->rx_ring_stats[q_idx]->rx_stats));
- memset(&vsi_stat->tx_ring_stats[q_idx]->stats, 0,
- sizeof(vsi_stat->tx_ring_stats[q_idx]->stats));
- if (vsi->xdp_rings)
- memset(&vsi->xdp_rings[q_idx]->ring_stats->stats, 0,
- sizeof(vsi->xdp_rings[q_idx]->ring_stats->stats));
-}
-
-/**
- * ice_qp_clean_rings - Cleans all the rings of a given index
- * @vsi: VSI that contains rings of interest
- * @q_idx: ring index in array
- */
-static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
-{
- ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (vsi->xdp_rings)
- ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- ice_clean_rx_ring(vsi->rx_rings[q_idx]);
-}
-
-/**
* ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
* @vsi: VSI that has netdev
* @q_vector: q_vector that has NAPI context
* @enable: true for enable, false for disable
*/
-static void
+void
ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
bool enable)
{
@@ -82,7 +44,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @rx_ring: Rx ring that will have its IRQ disabled
* @q_vector: queue vector
*/
-static void
+void
ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
@@ -112,7 +74,7 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* @q_vector: queue vector
* @qid: queue index
*/
-static void
+void
ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
@@ -142,7 +104,7 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
*/
-static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -153,111 +115,6 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
}
/**
- * ice_qp_dis - Disables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_txq_meta txq_meta = { };
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- int fail = 0;
- int err;
-
- if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
- return -EINVAL;
-
- tx_ring = vsi->tx_rings[q_idx];
- rx_ring = vsi->rx_rings[q_idx];
- q_vector = rx_ring->q_vector;
-
- synchronize_net();
- netif_carrier_off(vsi->netdev);
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
- ice_qvec_dis_irq(vsi, rx_ring, q_vector);
- ice_qvec_toggle_napi(vsi, q_vector, false);
-
- ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (!fail)
- fail = err;
- if (vsi->xdp_rings) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- memset(&txq_meta, 0, sizeof(txq_meta));
- ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
- &txq_meta);
- if (!fail)
- fail = err;
- }
-
- ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
- ice_qp_clean_rings(vsi, q_idx);
- ice_qp_reset_stats(vsi, q_idx);
-
- return fail;
-}
-
-/**
- * ice_qp_ena - Enables a queue pair
- * @vsi: VSI of interest
- * @q_idx: ring index in array
- *
- * Returns 0 on success, negative on failure.
- */
-static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
-{
- struct ice_q_vector *q_vector;
- int fail = 0;
- bool link_up;
- int err;
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (!fail)
- fail = err;
-
- if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
-
- err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (!fail)
- fail = err;
- ice_set_ring_xdp(xdp_ring);
- ice_tx_xsk_pool(vsi, q_idx);
- }
-
- err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (!fail)
- fail = err;
-
- q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector, q_idx);
-
- err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (!fail)
- fail = err;
-
- ice_qvec_toggle_napi(vsi, q_vector, true);
- ice_qvec_ena_irq(vsi, q_vector);
-
- /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
- synchronize_net();
- ice_get_link_status(vsi->port_info, &link_up);
- if (link_up) {
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
- netif_carrier_on(vsi->netdev);
- }
-
- return fail;
-}
-
-/**
* ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
@@ -313,50 +170,18 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
* If allocation was successful, substitute buffer with allocated one.
* Returns 0 on success, negative on failure
*/
-static int
+int
ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
{
- size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
- sizeof(*rx_ring->rx_buf);
- void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
-
- if (!sw_ring)
- return -ENOMEM;
-
if (pool_present) {
- kfree(rx_ring->rx_buf);
- rx_ring->rx_buf = NULL;
- rx_ring->xdp_buf = sw_ring;
+ rx_ring->xdp_buf = kcalloc(rx_ring->count,
+ sizeof(*rx_ring->xdp_buf),
+ GFP_KERNEL);
+ if (!rx_ring->xdp_buf)
+ return -ENOMEM;
} else {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
- rx_ring->rx_buf = sw_ring;
- }
-
- return 0;
-}
-
-/**
- * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
- * @vsi: Current VSI
- * @zc: is zero copy set
- *
- * Reallocate buffer for rx_rings that might be used by XSK.
- * XDP requires more memory, than rx_buf provides.
- * Returns 0 on success, negative on failure
- */
-int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
-{
- struct ice_rx_ring *rx_ring;
- uint i;
-
- ice_for_each_rxq(vsi, i) {
- rx_ring = vsi->rx_rings[i];
- if (!rx_ring->xsk_pool)
- continue;
-
- if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
- return -ENOMEM;
}
return 0;
@@ -372,6 +197,7 @@ int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
*/
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
bool if_running, pool_present = !!pool;
int ret = 0, pool_failure = 0;
@@ -385,8 +211,6 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
ice_is_xdp_ena_vsi(vsi);
if (if_running) {
- struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
-
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
@@ -447,11 +271,6 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->wb.status_error0 = 0;
- /* Put private info that changes on a per-packet basis
- * into xdp_buff_xsk->cb.
- */
- ice_xdp_meta_set_desc(*xdp, rx_desc);
-
rx_desc++;
xdp++;
}
@@ -537,69 +356,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
}
/**
- * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
- * @rx_ring: Rx ring
- * @xdp: Pointer to XDP buffer
- *
- * This function allocates a new skb from a zero-copy Rx buffer.
- *
- * Returns the skb on success, NULL on failure.
- */
-static struct sk_buff *
-ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
-{
- unsigned int totalsize = xdp->data_end - xdp->data_meta;
- unsigned int metasize = xdp->data - xdp->data_meta;
- struct skb_shared_info *sinfo = NULL;
- struct sk_buff *skb;
- u32 nr_frags = 0;
-
- if (unlikely(xdp_buff_has_frags(xdp))) {
- sinfo = xdp_get_shared_info_from_buff(xdp);
- nr_frags = sinfo->nr_frags;
- }
- net_prefetch(xdp->data_meta);
-
- skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
- if (unlikely(!skb))
- return NULL;
-
- memcpy(__skb_put(skb, totalsize), xdp->data_meta,
- ALIGN(totalsize, sizeof(long)));
-
- if (metasize) {
- skb_metadata_set(skb, metasize);
- __skb_pull(skb, metasize);
- }
-
- if (likely(!xdp_buff_has_frags(xdp)))
- goto out;
-
- for (int i = 0; i < nr_frags; i++) {
- struct skb_shared_info *skinfo = skb_shinfo(skb);
- skb_frag_t *frag = &sinfo->frags[i];
- struct page *page;
- void *addr;
-
- page = dev_alloc_page();
- if (!page) {
- dev_kfree_skb(skb);
- return NULL;
- }
- addr = page_to_virt(page);
-
- memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
-
- __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
- addr, 0, skb_frag_size(frag));
- }
-
-out:
- xsk_buff_free(xdp);
- return skb;
-}
-
-/**
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
* @xsk_pool: AF_XDP buffer pool pointer
@@ -801,35 +557,6 @@ out_failure:
return result;
}
-static int
-ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
- struct xdp_buff *xdp, const unsigned int size)
-{
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
-
- if (!size)
- return 0;
-
- if (!xdp_buff_has_frags(first)) {
- sinfo->nr_frags = 0;
- sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(first);
- }
-
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- xsk_buff_free(first);
- return -ENOMEM;
- }
-
- __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
- virt_to_page(xdp->data_hard_start),
- XDP_PACKET_HEADROOM, size);
- sinfo->xdp_frags_size += size;
- xsk_buff_add_frag(xdp);
-
- return 0;
-}
-
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -842,10 +569,10 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
struct xsk_buff_pool *xsk_pool,
int budget)
{
+ struct xdp_buff *first = (struct xdp_buff *)rx_ring->xsk;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u32 ntc = rx_ring->next_to_clean;
u32 ntu = rx_ring->next_to_use;
- struct xdp_buff *first = NULL;
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
@@ -859,9 +586,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
- if (ntc != rx_ring->first_desc)
- first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
-
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
@@ -895,16 +619,19 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
if (!first) {
first = xdp;
- } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
- break;
+ } else if (likely(size) && !xsk_buff_add_frag(first, xdp)) {
+ xsk_buff_free(first);
+ first = NULL;
}
if (++ntc == cnt)
ntc = 0;
- if (ice_is_non_eop(rx_ring, rx_desc))
+ if (ice_is_non_eop(rx_ring, rx_desc) || unlikely(!first))
continue;
+ ((struct libeth_xdp_buff *)first)->desc = rx_desc;
+
xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
xsk_pool);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
@@ -912,7 +639,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
} else if (xdp_res == ICE_XDP_EXIT) {
failure = true;
first = NULL;
- rx_ring->first_desc = ntc;
break;
} else if (xdp_res == ICE_XDP_CONSUMED) {
xsk_buff_free(first);
@@ -924,24 +650,20 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
total_rx_packets++;
first = NULL;
- rx_ring->first_desc = ntc;
continue;
construct_skb:
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, first);
+ skb = xdp_build_skb_from_zc(first);
if (!skb) {
+ xsk_buff_free(first);
+ first = NULL;
+
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
- break;
+ continue;
}
first = NULL;
- rx_ring->first_desc = ntc;
-
- if (eth_skb_pad(skb)) {
- skb = NULL;
- continue;
- }
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -953,7 +675,9 @@ construct_skb:
}
rx_ring->next_to_clean = ntc;
- entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
+ rx_ring->xsk = (struct libeth_xdp_buff *)first;
+
+ entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
entries_to_alloc);
@@ -1017,7 +741,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
struct ice_tx_desc *tx_desc;
u32 i;
- loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
+ unrolled_count(PKTS_PER_BATCH)
+ for (i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 45adeb513253..5275fcedc9e1 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -7,14 +7,6 @@
#define PKTS_PER_BATCH 8
-#ifdef __clang__
-#define loop_unrolled_for _Pragma("clang loop unroll_count(8)") for
-#elif __GNUC__ >= 8
-#define loop_unrolled_for _Pragma("GCC unroll 8") for
-#else
-#define loop_unrolled_for for
-#endif
-
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
@@ -30,7 +22,14 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
-int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
+int ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present);
+void ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid);
+void ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable);
+void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector);
+void ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector);
#else
static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
struct xsk_buff_pool __always_unused *xsk_pool)
@@ -78,10 +77,25 @@ static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
static inline int
-ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
- bool __always_unused zc)
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring,
+ bool __always_unused pool_present)
{
return 0;
}
+
+static inline void
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ u16 qid) { }
+
+static inline void
+ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
+ bool enable) { }
+
+static inline void
+ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector) { }
+
+static inline void
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
+ struct ice_q_vector *q_vector) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/virt/allowlist.c
index c105a82ee136..a07efec19c45 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
/* Purpose of this file is to share functionality to allowlist or denylist
* opcodes used in PF <-> VF communication. Group of opcodes:
@@ -65,7 +65,7 @@ static const u32 vlan_v2_allowlist_opcodes[] = {
/* VIRTCHNL_VF_OFFLOAD_RSS_PF */
static const u32 rss_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_CONFIG_RSS_KEY, VIRTCHNL_OP_CONFIG_RSS_LUT,
- VIRTCHNL_OP_GET_RSS_HENA_CAPS, VIRTCHNL_OP_SET_RSS_HENA,
+ VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, VIRTCHNL_OP_SET_RSS_HASHCFG,
VIRTCHNL_OP_CONFIG_RSS_HFUNC,
};
@@ -84,6 +84,12 @@ static const u32 fdir_pf_allowlist_opcodes[] = {
VIRTCHNL_OP_ADD_FDIR_FILTER, VIRTCHNL_OP_DEL_FDIR_FILTER,
};
+/* VIRTCHNL_VF_CAP_PTP */
+static const u32 ptp_allowlist_opcodes[] = {
+ VIRTCHNL_OP_1588_PTP_GET_CAPS,
+ VIRTCHNL_OP_1588_PTP_GET_TIME,
+};
+
static const u32 tc_allowlist_opcodes[] = {
VIRTCHNL_OP_GET_QOS_CAPS, VIRTCHNL_OP_CONFIG_QUEUE_BW,
VIRTCHNL_OP_CONFIG_QUANTA,
@@ -110,6 +116,7 @@ static const struct allowlist_opcode_info allowlist_opcodes[] = {
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_FDIR_PF, fdir_pf_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_VLAN_V2, vlan_v2_allowlist_opcodes),
ALLOW_ITEM(VIRTCHNL_VF_OFFLOAD_QOS, tc_allowlist_opcodes),
+ ALLOW_ITEM(VIRTCHNL_VF_CAP_PTP, ptp_allowlist_opcodes),
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h b/drivers/net/ethernet/intel/ice/virt/allowlist.h
index d3ae86ded219..d3ae86ded219 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.h
+++ b/drivers/net/ethernet/intel/ice/virt/allowlist.h
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/virt/fdir.c
index 14e3f0f89c78..ae83c3914e29 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.c
@@ -832,21 +832,27 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
struct virtchnl_proto_hdrs *proto,
struct virtchnl_fdir_fltr_conf *conf)
{
- u8 *pkt_buf, *msk_buf __free(kfree);
+ u8 *pkt_buf, *msk_buf __free(kfree) = NULL;
struct ice_parser_result rslt;
struct ice_pf *pf = vf->pf;
+ u16 pkt_len, udp_port = 0;
struct ice_parser *psr;
int status = -ENOMEM;
struct ice_hw *hw;
- u16 udp_port = 0;
- pkt_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
- msk_buf = kzalloc(proto->raw.pkt_len, GFP_KERNEL);
+ pkt_len = proto->raw.pkt_len;
+
+ if (!pkt_len || pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ return -EINVAL;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+
if (!pkt_buf || !msk_buf)
goto err_mem_alloc;
- memcpy(pkt_buf, proto->raw.spec, proto->raw.pkt_len);
- memcpy(msk_buf, proto->raw.mask, proto->raw.pkt_len);
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
hw = &pf->hw;
@@ -862,7 +868,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
if (ice_get_open_tunnel_port(hw, &udp_port, TNL_VXLAN))
ice_parser_vxlan_tunnel_set(psr, udp_port, true);
- status = ice_parser_run(psr, pkt_buf, proto->raw.pkt_len, &rslt);
+ status = ice_parser_run(psr, pkt_buf, pkt_len, &rslt);
if (status)
goto err_parser_destroy;
@@ -876,7 +882,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
}
status = ice_parser_profile_init(&rslt, pkt_buf, msk_buf,
- proto->raw.pkt_len, ICE_BLK_FD,
+ pkt_len, ICE_BLK_FD,
conf->prof);
if (status)
goto err_parser_profile_init;
@@ -885,7 +891,7 @@ ice_vc_fdir_parse_raw(struct ice_vf *vf,
ice_parser_profile_dump(hw, conf->prof);
/* Store raw flow info into @conf */
- conf->pkt_len = proto->raw.pkt_len;
+ conf->pkt_len = pkt_len;
conf->pkt_buf = pkt_buf;
conf->parser_ena = true;
@@ -1444,7 +1450,8 @@ err_free_pkt:
*/
static void ice_vf_fdir_timer(struct timer_list *t)
{
- struct ice_vf_fdir_ctx *ctx_irq = from_timer(ctx_irq, t, rx_tmr);
+ struct ice_vf_fdir_ctx *ctx_irq = timer_container_of(ctx_irq, t,
+ rx_tmr);
struct ice_vf_fdir_ctx *ctx_done;
struct ice_vf_fdir *fdir;
unsigned long flags;
@@ -1515,7 +1522,7 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
memcpy(&ctx_done->rx_desc, rx_desc, sizeof(*rx_desc));
spin_unlock_irqrestore(&fdir->ctx_lock, flags);
- ret = del_timer(&ctx_irq->rx_tmr);
+ ret = timer_delete(&ctx_irq->rx_tmr);
if (!ret)
dev_err(dev, "VF %d: Unexpected inactive timer!\n", vf->vf_id);
@@ -1910,7 +1917,7 @@ static void ice_vc_fdir_clear_irq_ctx(struct ice_vf *vf)
struct ice_vf_fdir_ctx *ctx = &vf->fdir.ctx_irq;
unsigned long flags;
- del_timer(&ctx->rx_tmr);
+ timer_delete(&ctx->rx_tmr);
spin_lock_irqsave(&vf->fdir.ctx_lock, flags);
ctx->flags &= ~ICE_VF_FDIR_CTX_VALID;
spin_unlock_irqrestore(&vf->fdir.ctx_lock, flags);
@@ -2091,6 +2098,11 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
vf_vsi = ice_get_vf_vsi(vf);
+ if (!vf_vsi) {
+ dev_err(dev, "Can not get FDIR vf_vsi for VF %u\n", vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err_exit;
+ }
#define ICE_VF_MAX_FDIR_FILTERS 128
if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/virt/fdir.h
index ac6dcab454b4..ac6dcab454b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/virt/fdir.h
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.c b/drivers/net/ethernet/intel/ice/virt/queues.c
new file mode 100644
index 000000000000..f73d5a3e83d4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.c
@@ -0,0 +1,975 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "virtchnl.h"
+#include "queues.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+#include "ice_base.h"
+#include "ice_lib.h"
+
+/**
+ * ice_vc_get_max_frame_size - get max frame size allowed for VF
+ * @vf: VF used to determine max frame size
+ *
+ * Max frame size is determined based on the current port's max frame size and
+ * whether a port VLAN is configured on this VF. The VF is not aware whether
+ * it's in a port VLAN so the PF needs to account for this in max frame size
+ * checks and sending the max frame size to the VF.
+ */
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
+{
+ struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ u16 max_frame_size;
+
+ max_frame_size = pi->phy.link_info.max_frame_size;
+
+ if (ice_vf_is_port_vlan_ena(vf))
+ max_frame_size -= VLAN_HLEN;
+
+ return max_frame_size;
+}
+
+/**
+ * ice_vc_isvalid_q_id
+ * @vsi: VSI to check queue ID against
+ * @qid: VSI relative queue ID
+ *
+ * check for the valid queue ID
+ */
+static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u16 qid)
+{
+ /* allocated Tx and Rx queues should be always equal for VF VSI */
+ return qid < vsi->alloc_txq;
+}
+
+/**
+ * ice_vc_isvalid_ring_len
+ * @ring_len: length of ring
+ *
+ * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
+ * or zero
+ */
+static bool ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return ring_len == 0 ||
+ (ring_len >= ICE_MIN_NUM_DESC &&
+ ring_len <= ICE_MAX_NUM_DESC_E810 &&
+ !(ring_len % ICE_REQ_DESC_MULTIPLE));
+}
+
+/**
+ * ice_vf_cfg_qs_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @num_queues: number of queues to be configured
+ *
+ * Configure per queue bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ int ret;
+ u16 i;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ for (i = 0; i < num_queues; i++) {
+ u32 p_rate, min_rate;
+ u8 tc;
+
+ p_rate = vf->qs_bw[i].peak;
+ min_rate = vf->qs_bw[i].committed;
+ tc = vf->qs_bw[i].tc;
+ if (p_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW, p_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MAX_BW);
+ if (ret)
+ return ret;
+
+ if (min_rate)
+ ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW, min_rate);
+ else
+ ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
+ vf->qs_bw[i].queue_id,
+ ICE_MIN_BW);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_cfg_q_quanta_profile - Configure quanta profile
+ * @vf: pointer to the VF info
+ * @quanta_prof_idx: pointer to the quanta profile index
+ * @quanta_size: quanta size to be set
+ *
+ * This function chooses available quanta profile and configures the register.
+ * The quanta profile is evenly divided by the number of device ports, and then
+ * available to the specific PF and VFs. The first profile for each PF is a
+ * reserved default profile. Only quanta size of the rest unused profile can be
+ * modified.
+ *
+ * Return: 0 on success or negative error value.
+ */
+static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
+ u16 *quanta_prof_idx)
+{
+ const u16 n_desc = calc_quanta_desc(quanta_size);
+ struct ice_hw *hw = &vf->pf->hw;
+ const u16 n_cmd = 2 * n_desc;
+ struct ice_pf *pf = vf->pf;
+ u16 per_pf, begin_id;
+ u8 n_used;
+ u32 reg;
+
+ begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
+ hw->logical_pf_id;
+
+ if (quanta_size == ICE_DFLT_QUANTA) {
+ *quanta_prof_idx = begin_id;
+ } else {
+ per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
+ hw->dev_caps.num_funcs;
+ n_used = pf->num_quanta_prof_used;
+ if (n_used < per_pf) {
+ *quanta_prof_idx = begin_id + 1 + n_used;
+ pf->num_quanta_prof_used++;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
+ FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
+ wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
+
+ return 0;
+}
+
+/**
+ * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
+ * @vqs: virtchnl_queue_select structure containing bitmaps to validate
+ *
+ * Return true on successful validation, else false
+ */
+static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
+{
+ if ((!vqs->rx_queues && !vqs->tx_queues) ||
+ vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
+ vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->txq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_TQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_TQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
+ * @vsi: VSI of the VF to configure
+ * @q_idx: VF queue index used to determine the queue in the PF's space
+ */
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ u32 pfq = vsi->rxq_map[q_idx];
+ u32 reg;
+
+ reg = rd32(hw, QINT_RQCTL(pfq));
+
+ /* MSI-X index 0 in the VF's space is always for the OICR, which means
+ * this is most likely a poll mode VF driver, so don't enable an
+ * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
+ */
+ if (!(reg & QINT_RQCTL_MSIX_INDX_M))
+ return;
+
+ wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
+}
+
+/**
+ * ice_vc_ena_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to enable all or specific queue(s)
+ */
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Enable only Rx rings, Tx rings were enabled by the FW when the
+ * Tx queue group list was configured and the context bits were
+ * programmed using ice_vsi_cfg_txqs
+ */
+ q_map = vqs->rx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->rxq_ena);
+ }
+
+ q_map = vqs->tx_queues;
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if enabled */
+ if (test_bit(vf_q_id, vf->txq_ena))
+ continue;
+
+ ice_vf_ena_txq_interrupt(vsi, vf_q_id);
+ set_bit(vf_q_id, vf->txq_ena);
+ }
+
+ /* Set flag to indicate that queues are enabled */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
+ set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vf_vsi_dis_single_txq - disable a single Tx queue
+ * @vf: VF to disable queue for
+ * @vsi: VSI for the VF
+ * @q_id: VF relative (0-based) queue ID
+ *
+ * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
+ * disabled then clear q_id bit in the enabled queues bitmap and return
+ * success. Otherwise return error.
+ */
+int ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
+{
+ struct ice_txq_meta txq_meta = { 0 };
+ struct ice_tx_ring *ring;
+ int err;
+
+ if (!test_bit(q_id, vf->txq_ena))
+ dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
+ q_id, vsi->vsi_num);
+
+ ring = vsi->tx_rings[q_id];
+ if (!ring)
+ return -EINVAL;
+
+ ice_fill_txq_meta(vsi, ring, &txq_meta);
+
+ err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
+ if (err) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
+ q_id, vsi->vsi_num);
+ return err;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(q_id, vf->txq_ena);
+
+ return 0;
+}
+
+/**
+ * ice_vc_dis_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to disable all or specific queue(s)
+ */
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queue_select *vqs =
+ (struct virtchnl_queue_select *)msg;
+ struct ice_vsi *vsi;
+ unsigned long q_map;
+ u16 vf_q_id;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
+ !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_validate_vqs_bitmaps(vqs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vqs->tx_queues) {
+ q_map = vqs->tx_queues;
+
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
+
+ q_map = vqs->rx_queues;
+ /* speed up Rx queue disable by batching them if possible */
+ if (q_map &&
+ bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
+ if (ice_vsi_stop_all_rx_rings(vsi)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
+ vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ } else if (q_map) {
+ for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
+ if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Skip queue if not enabled */
+ if (!test_bit(vf_q_id, vf->rxq_ena))
+ continue;
+
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
+ true)) {
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
+ vf_q_id, vsi->vsi_num);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* Clear enabled queues flag */
+ clear_bit(vf_q_id, vf->rxq_ena);
+ }
+ }
+
+ /* Clear enabled queues flag */
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_cfg_interrupt
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @map: vector map for mapping vectors to queues
+ * @q_vector: structure for interrupt vector
+ * configure the IRQ to queue map
+ */
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_vector_map *map,
+ struct ice_q_vector *q_vector)
+{
+ u16 vsi_q_id, vsi_q_id_idx;
+ unsigned long qmap;
+
+ q_vector->num_ring_rx = 0;
+ q_vector->num_ring_tx = 0;
+
+ qmap = map->rxq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_rx++;
+ q_vector->rx.itr_idx = map->rxitr_idx;
+ vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->rx.itr_idx);
+ }
+
+ qmap = map->txq_map;
+ for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
+ vsi_q_id = vsi_q_id_idx;
+
+ if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
+ return VIRTCHNL_STATUS_ERR_PARAM;
+
+ q_vector->num_ring_tx++;
+ q_vector->tx.itr_idx = map->txitr_idx;
+ vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
+ q_vector->tx.itr_idx);
+ }
+
+ return VIRTCHNL_STATUS_SUCCESS;
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the IRQ to queue map
+ */
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u16 num_q_vectors_mapped, vsi_id, vector_id;
+ struct virtchnl_irq_map_info *irqmap_info;
+ struct virtchnl_vector_map *map;
+ struct ice_vsi *vsi;
+ int i;
+
+ irqmap_info = (struct virtchnl_irq_map_info *)msg;
+ num_q_vectors_mapped = irqmap_info->num_vectors;
+
+ /* Check to make sure number of VF vectors mapped is not greater than
+ * number of VF vectors originally allocated, and check that
+ * there is actually at least a single VF queue vector mapped
+ */
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ vf->num_msix < num_q_vectors_mapped ||
+ !num_q_vectors_mapped) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ for (i = 0; i < num_q_vectors_mapped; i++) {
+ struct ice_q_vector *q_vector;
+
+ map = &irqmap_info->vecmap[i];
+
+ vector_id = map->vector_id;
+ vsi_id = map->vsi_id;
+ /* vector_id is always 0-based for each VF, and can never be
+ * larger than or equal to the max allowed interrupts per VF
+ */
+ if (!(vector_id < vf->num_msix) ||
+ !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (!vector_id && (map->rxq_map || map->txq_map))) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* No need to map VF miscellaneous or rogue vector */
+ if (!vector_id)
+ continue;
+
+ /* Subtract non queue vector from vector_id passed by VF
+ * to get actual number of VSI queue vector array index
+ */
+ q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
+ if (!q_vector) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ /* lookout for the invalid queue index */
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
+ if (v_ret)
+ goto error_param;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_bw - Configure per queue bandwidth
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues bandwidth.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_queues_bw_cfg *qbw =
+ (struct virtchnl_queues_bw_cfg *)msg;
+ struct ice_vsi *vsi;
+ u16 i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
+ qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
+ qbw->cfg[i].shaper.peak > vf->max_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->max_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
+ qbw->cfg[i].shaper.committed < vf->min_tx_rate) {
+ dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
+ qbw->cfg[i].queue_id, vf->vf_id,
+ vf->min_tx_rate);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].queue_id > vf->num_vf_qs) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure invalid queue_id\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ if (qbw->cfg[i].tc >= ICE_MAX_TRAFFIC_CLASS) {
+ dev_warn(ice_pf_to_dev(vf->pf), "VF-%d trying to configure a traffic class higher than allowed\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < qbw->num_queues; i++) {
+ vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
+ vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
+ vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
+ vf->qs_bw[i].tc = qbw->cfg[i].tc;
+ }
+
+ if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_q_quanta - Configure per queue quanta
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer which holds the command descriptor
+ *
+ * Configure VF queues quanta.
+ *
+ * Return: 0 on success or negative error value.
+ */
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
+{
+ u16 quanta_prof_id, quanta_size, start_qid, num_queues, end_qid, i;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_quanta_cfg *qquanta =
+ (struct virtchnl_quanta_cfg *)msg;
+ struct ice_vsi *vsi;
+ int ret;
+
+ start_qid = qquanta->queue_select.start_queue_id;
+ num_queues = qquanta->queue_select.num_queues;
+
+ if (check_add_overflow(start_qid, num_queues, &end_qid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
+ end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ quanta_size = qquanta->quanta_size;
+ if (quanta_size > ICE_MAX_QUANTA_SIZE ||
+ quanta_size < ICE_MIN_QUANTA_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (quanta_size % 64) {
+ dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
+ &quanta_prof_id);
+ if (ret) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto err;
+ }
+
+ for (i = start_qid; i < end_qid; i++)
+ vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
+
+err:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_cfg_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * called from the VF to configure the Rx/Tx queues
+ */
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_vsi_queue_config_info *qci =
+ (struct virtchnl_vsi_queue_config_info *)msg;
+ struct virtchnl_queue_pair_info *qpi;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ int i = -1, q_idx;
+ bool ena_ts;
+ u8 act_prt;
+
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ice_lag_prepare_vf_reset(pf->lag);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto error_param;
+
+ if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
+ goto error_param;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ goto error_param;
+
+ if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
+ qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
+ qpi = &qci->qpair[i];
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != qpi->txq.queue_id ||
+ qpi->txq.headwb_enabled ||
+ !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
+ !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
+ !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
+ goto error_param;
+ }
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ goto error_param;
+ }
+
+ /* copy Tx queue info from VF into VSI */
+ if (qpi->txq.ring_len > 0) {
+ vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
+ vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
+
+ /* Disable any existing queue first */
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
+ goto error_param;
+
+ /* Configure a queue with the requested settings */
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+ }
+
+ /* copy Rx queue info from VF into VSI */
+ if (qpi->rxq.ring_len > 0) {
+ u16 max_frame_size = ice_vc_get_max_frame_size(vf);
+ struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
+ u32 rxdid;
+
+ ring->dma = qpi->rxq.dma_ring_addr;
+ ring->count = qpi->rxq.ring_len;
+
+ if (qpi->rxq.crc_disable)
+ ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
+ if (qpi->rxq.databuffer_size != 0 &&
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+
+ ring->rx_buf_len = qpi->rxq.databuffer_size;
+
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+ vsi->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+ vf->vf_id, q_idx);
+ goto error_param;
+ }
+
+ /* If Rx flex desc is supported, select RXDID for Rx
+ * queues. Otherwise, use legacy 32byte descriptor
+ * format. Legacy 16byte descriptor is not supported.
+ * If this RXDID is selected, return error.
+ */
+ if (vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
+ rxdid = qpi->rxq.rxdid;
+ if (!(BIT(rxdid) & pf->supported_rxdids))
+ goto error_param;
+ } else {
+ rxdid = ICE_RXDID_LEGACY_1;
+ }
+
+ ena_ts = ((vf->driver_caps &
+ VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) &&
+ (vf->driver_caps & VIRTCHNL_VF_CAP_PTP) &&
+ (qpi->rxq.flags & VIRTCHNL_PTP_RX_TSTAMP));
+
+ ice_write_qrxflxp_cntxt(&vsi->back->hw,
+ vsi->rxq_map[q_idx], rxdid,
+ ICE_RXDID_PRIO, ena_ts);
+ }
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+error_param:
+ /* disable whatever we can */
+ for (; i >= 0; i--) {
+ if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
+ vf->vf_id, i);
+ if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
+ dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
+ vf->vf_id, i);
+ }
+
+ ice_lag_complete_vf_reset(pf->lag, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
+}
+
+/**
+ * ice_vc_request_qs_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * VFs get a default number of queues but can use this message to request a
+ * different number. If the request is successful, PF will reset the VF and
+ * return 0. If unsuccessful, PF will send message informing VF of number of
+ * available queue pairs via virtchnl message response to VF.
+ */
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_vf_res_request *vfres =
+ (struct virtchnl_vf_res_request *)msg;
+ u16 req_queues = vfres->num_queue_pairs;
+ struct ice_pf *pf = vf->pf;
+ u16 max_allowed_vf_queues;
+ u16 tx_rx_queue_left;
+ struct device *dev;
+ u16 cur_queues;
+
+ dev = ice_pf_to_dev(pf);
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ cur_queues = vf->num_vf_qs;
+ tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf));
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
+ if (!req_queues) {
+ dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
+ vf->vf_id);
+ } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
+ dev_err(dev, "VF %d tried to request more than %d queues.\n",
+ vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
+ } else if (req_queues > cur_queues &&
+ req_queues - cur_queues > tx_rx_queue_left) {
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
+ vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
+ vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
+ ICE_MAX_RSS_QS_PER_VF);
+ } else {
+ /* request is successful, then reset VF */
+ vf->num_req_qs = req_queues;
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
+ dev_info(dev, "VF %d granted request of %u queues.\n",
+ vf->vf_id, req_queues);
+ return 0;
+ }
+
+error_param:
+ /* send the response to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
+ v_ret, (u8 *)vfres, sizeof(*vfres));
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/queues.h b/drivers/net/ethernet/intel/ice/virt/queues.h
new file mode 100644
index 000000000000..c4a792cecea1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/queues.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_QUEUES_H_
+#define _ICE_VIRT_QUEUES_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+u16 ice_vc_get_max_frame_size(struct ice_vf *vf);
+int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg);
+int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg);
+int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_QUEUES_H_ */
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.c b/drivers/net/ethernet/intel/ice/virt/rss.c
new file mode 100644
index 000000000000..085e69ec0cfc
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.c
@@ -0,0 +1,1922 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "rss.h"
+#include "ice_vf_lib_private.h"
+#include "ice.h"
+
+#define FIELD_SELECTOR(proto_hdr_field) \
+ BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
+
+struct ice_vc_hdr_match_type {
+ u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
+ u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
+};
+
+static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
+ {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
+ {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
+ {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
+ {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
+ ICE_FLOW_SEG_HDR_IPV_OTHER},
+ {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
+ {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
+ {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
+ {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
+ ICE_FLOW_SEG_HDR_GTPU_DWN},
+ {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
+ ICE_FLOW_SEG_HDR_GTPU_UP},
+ {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
+ {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
+ {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
+ {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
+ {VIRTCHNL_PROTO_HDR_GTPC, ICE_FLOW_SEG_HDR_GTPC},
+ {VIRTCHNL_PROTO_HDR_L2TPV2, ICE_FLOW_SEG_HDR_L2TPV2},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG},
+ {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG, ICE_FLOW_SEG_HDR_IPV_FRAG},
+ {VIRTCHNL_PROTO_HDR_GRE, ICE_FLOW_SEG_HDR_GRE},
+};
+
+struct ice_vc_hash_field_match_type {
+ u32 vc_hdr; /* virtchnl headers
+ * (VIRTCHNL_PROTO_HDR_XXX)
+ */
+ u32 vc_hash_field; /* virtchnl hash fields selector
+ * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
+ */
+ u64 ice_hash_field; /* ice hash fields
+ * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
+ */
+};
+
+static const struct
+ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
+ {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
+ ICE_FLOW_HASH_ETH},
+ {VIRTCHNL_PROTO_HDR_ETH,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
+ {VIRTCHNL_PROTO_HDR_S_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
+ {VIRTCHNL_PROTO_HDR_C_VLAN,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
+ ICE_FLOW_HASH_IPV4},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV4_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
+ ICE_FLOW_HASH_IPV6},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
+ ICE_FLOW_HASH_IPV6_PRE64},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ ICE_FLOW_HASH_IPV6_PRE64 |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_SRC) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_SA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_IPV6,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PREFIX64_DST) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PRE64_DA) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
+ ICE_FLOW_HASH_TCP_PORT},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_TCP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_CHKSUM),
+ ICE_FLOW_HASH_TCP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
+ ICE_FLOW_HASH_UDP_PORT},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_UDP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_CHKSUM),
+ ICE_FLOW_HASH_UDP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
+ ICE_FLOW_HASH_SCTP_PORT},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_SCTP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT) |
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_CHKSUM),
+ ICE_FLOW_HASH_SCTP_PORT |
+ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_CHKSUM)},
+ {VIRTCHNL_PROTO_HDR_PPPOE,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_GTPU_IP,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV3,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
+ {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
+ {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
+ {VIRTCHNL_PROTO_HDR_GTPC,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPC_TEID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV2,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_SESS_ID)},
+ {VIRTCHNL_PROTO_HDR_L2TPV2,
+ FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV2_LEN_SESS_ID),
+ BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV2_LEN_SESS_ID)},
+};
+
+static int
+ice_vc_rss_hash_update(struct ice_hw *hw, struct ice_vsi *vsi, u8 hash_type)
+{
+ struct ice_vsi_ctx *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ /* clear previous hash_type */
+ ctx->info.q_opt_rss = vsi->info.q_opt_rss &
+ ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M;
+ /* hash_type is passed in as ICE_AQ_VSI_Q_OPT_RSS_<XOR|TPLZ|SYM_TPLZ */
+ ctx->info.q_opt_rss |= FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M,
+ hash_type);
+
+ /* Preserve existing queueing option setting */
+ ctx->info.q_opt_tc = vsi->info.q_opt_tc;
+ ctx->info.q_opt_flags = vsi->info.q_opt_flags;
+
+ ctx->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctx, NULL);
+ if (ret) {
+ dev_err(ice_hw_to_dev(hw), "update VSI for RSS failed, err %d aq_err %s\n",
+ ret, libie_aq_str(hw->adminq.sq_last_status));
+ } else {
+ vsi->info.q_opt_rss = ctx->info.q_opt_rss;
+ }
+
+ kfree(ctx);
+
+ return ret;
+}
+
+/**
+ * ice_vc_validate_pattern
+ * @vf: pointer to the VF info
+ * @proto: virtchnl protocol headers
+ *
+ * validate the pattern is supported or not.
+ *
+ * Return: true on success, false on error.
+ */
+bool
+ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
+{
+ bool is_ipv4 = false;
+ bool is_ipv6 = false;
+ bool is_udp = false;
+ u16 ptype = -1;
+ int i = 0;
+
+ while (i < proto->count &&
+ proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
+ switch (proto->proto_hdr[i].type) {
+ case VIRTCHNL_PROTO_HDR_ETH:
+ ptype = ICE_PTYPE_MAC_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV4:
+ ptype = ICE_PTYPE_IPV4_PAY;
+ is_ipv4 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_IPV6:
+ ptype = ICE_PTYPE_IPV6_PAY;
+ is_ipv6 = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_UDP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_UDP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_UDP_PAY;
+ is_udp = true;
+ break;
+ case VIRTCHNL_PROTO_HDR_TCP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_TCP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_TCP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_SCTP:
+ if (is_ipv4)
+ ptype = ICE_PTYPE_IPV4_SCTP_PAY;
+ else if (is_ipv6)
+ ptype = ICE_PTYPE_IPV6_SCTP_PAY;
+ break;
+ case VIRTCHNL_PROTO_HDR_GTPU_IP:
+ case VIRTCHNL_PROTO_HDR_GTPU_EH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_GTPU;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_GTPU;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_L2TPV3:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_L2TPV3;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_L2TPV3;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_ESP:
+ if (is_ipv4)
+ ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
+ ICE_MAC_IPV4_ESP;
+ else if (is_ipv6)
+ ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
+ ICE_MAC_IPV6_ESP;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_AH:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_AH;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_AH;
+ goto out;
+ case VIRTCHNL_PROTO_HDR_PFCP:
+ if (is_ipv4)
+ ptype = ICE_MAC_IPV4_PFCP_SESSION;
+ else if (is_ipv6)
+ ptype = ICE_MAC_IPV6_PFCP_SESSION;
+ goto out;
+ default:
+ break;
+ }
+ i++;
+ }
+
+out:
+ return ice_hw_ptype_ena(&vf->pf->hw, ptype);
+}
+
+/**
+ * ice_vc_parse_rss_cfg - parses hash fields and headers from
+ * a specific virtchnl RSS cfg
+ * @hw: pointer to the hardware
+ * @rss_cfg: pointer to the virtchnl RSS cfg
+ * @hash_cfg: pointer to the HW hash configuration
+ *
+ * Return true if all the protocol header and hash fields in the RSS cfg could
+ * be parsed, else return false
+ *
+ * This function parses the virtchnl RSS cfg to be the intended
+ * hash fields and the intended header for RSS configuration
+ */
+static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
+ struct virtchnl_rss_cfg *rss_cfg,
+ struct ice_rss_hash_cfg *hash_cfg)
+{
+ const struct ice_vc_hash_field_match_type *hf_list;
+ const struct ice_vc_hdr_match_type *hdr_list;
+ int i, hf_list_len, hdr_list_len;
+ bool outer_ipv4 = false;
+ bool outer_ipv6 = false;
+ bool inner_hdr = false;
+ bool has_gre = false;
+
+ u32 *addl_hdrs = &hash_cfg->addl_hdrs;
+ u64 *hash_flds = &hash_cfg->hash_flds;
+
+ /* set outer layer RSS as default */
+ hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hash_cfg->symm = true;
+ else
+ hash_cfg->symm = false;
+
+ hf_list = ice_vc_hash_field_list;
+ hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
+ hdr_list = ice_vc_hdr_list;
+ hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
+
+ for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
+ struct virtchnl_proto_hdr *proto_hdr =
+ &rss_cfg->proto_hdrs.proto_hdr[i];
+ u32 hdr_found = 0;
+ int j;
+
+ /* Find matched ice headers according to virtchnl headers.
+ * Also figure out the outer type of GTPU headers.
+ */
+ for (j = 0; j < hdr_list_len; j++) {
+ struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
+
+ if (proto_hdr->type == hdr_map.vc_hdr)
+ hdr_found = hdr_map.ice_hdr;
+ }
+
+ if (!hdr_found)
+ return false;
+
+ /* Find matched ice hash fields according to
+ * virtchnl hash fields.
+ */
+ for (j = 0; j < hf_list_len; j++) {
+ struct ice_vc_hash_field_match_type hf_map = hf_list[j];
+
+ if (proto_hdr->type == hf_map.vc_hdr &&
+ proto_hdr->field_selector == hf_map.vc_hash_field) {
+ *hash_flds |= hf_map.ice_hash_field;
+ break;
+ }
+ }
+
+ if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4 && !inner_hdr)
+ outer_ipv4 = true;
+ else if (proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6 &&
+ !inner_hdr)
+ outer_ipv6 = true;
+ /* for GRE and L2TPv2, take inner header as input set if no
+ * any field is selected from outer headers.
+ * for GTPU, take inner header and GTPU teid as input set.
+ */
+ else if ((proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_IP ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN ||
+ proto_hdr->type ==
+ VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP) ||
+ ((proto_hdr->type == VIRTCHNL_PROTO_HDR_L2TPV2 ||
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE) &&
+ *hash_flds == 0)) {
+ /* set inner_hdr flag, and clean up outer header */
+ inner_hdr = true;
+
+ /* clear outer headers */
+ *addl_hdrs = 0;
+
+ if (outer_ipv4 && outer_ipv6)
+ return false;
+
+ if (outer_ipv4)
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV4;
+ else if (outer_ipv6)
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS_W_OUTER_IPV6;
+ else
+ hash_cfg->hdr_type = ICE_RSS_INNER_HEADERS;
+
+ if (has_gre && outer_ipv4)
+ hash_cfg->hdr_type =
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4_GRE;
+ if (has_gre && outer_ipv6)
+ hash_cfg->hdr_type =
+ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6_GRE;
+
+ if (proto_hdr->type == VIRTCHNL_PROTO_HDR_GRE)
+ has_gre = true;
+ }
+
+ *addl_hdrs |= hdr_found;
+
+ /* refine hash hdrs and fields for IP fragment */
+ if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID) &&
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV4_FRAG) {
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_ID);
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV4_FRAG_PKID);
+ }
+ if (VIRTCHNL_TEST_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID) &&
+ proto_hdr->type == VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG) {
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_IPV_FRAG;
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_IPV_OTHER);
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_ID);
+ VIRTCHNL_DEL_PROTO_HDR_FIELD(proto_hdr,
+ VIRTCHNL_PROTO_HDR_IPV6_EH_FRAG_PKID);
+ }
+ }
+
+ /* refine gtpu header if we take outer as input set for a no inner
+ * ip gtpu flow.
+ */
+ if (hash_cfg->hdr_type == ICE_RSS_OUTER_HEADERS &&
+ *addl_hdrs & ICE_FLOW_SEG_HDR_GTPU_IP) {
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_GTPU_IP);
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_GTPU_NON_IP;
+ }
+
+ /* refine hash field for esp and nat-t-esp. */
+ if ((*addl_hdrs & ICE_FLOW_SEG_HDR_UDP) &&
+ (*addl_hdrs & ICE_FLOW_SEG_HDR_ESP)) {
+ *addl_hdrs &= ~(ICE_FLOW_SEG_HDR_ESP | ICE_FLOW_SEG_HDR_UDP);
+ *addl_hdrs |= ICE_FLOW_SEG_HDR_NAT_T_ESP;
+ *hash_flds &= ~(BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI));
+ *hash_flds |= BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI);
+ }
+
+ /* refine hash hdrs for L4 udp/tcp/sctp. */
+ if (*addl_hdrs & (ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_UDP |
+ ICE_FLOW_SEG_HDR_SCTP) &&
+ *addl_hdrs & ICE_FLOW_SEG_HDR_IPV_OTHER)
+ *addl_hdrs &= ~ICE_FLOW_SEG_HDR_IPV_OTHER;
+
+ return true;
+}
+
+/**
+ * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
+ * RSS offloads
+ * @caps: VF driver negotiated capabilities
+ *
+ * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
+ * else return false
+ */
+static bool ice_vf_adv_rss_offload_ena(u32 caps)
+{
+ return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
+}
+
+/**
+ * ice_is_hash_cfg_valid - Check whether an RSS hash context is valid
+ * @cfg: RSS hash configuration to test
+ *
+ * Return: true if both @cfg->hash_flds and @cfg->addl_hdrs are non-zero; false otherwise.
+ */
+static bool ice_is_hash_cfg_valid(struct ice_rss_hash_cfg *cfg)
+{
+ return cfg->hash_flds && cfg->addl_hdrs;
+}
+
+/**
+ * ice_hash_cfg_reset - Reset an RSS hash context
+ * @cfg: RSS hash configuration to reset
+ *
+ * Reset fields of @cfg that store the active rule information.
+ */
+static void ice_hash_cfg_reset(struct ice_rss_hash_cfg *cfg)
+{
+ cfg->hash_flds = 0;
+ cfg->addl_hdrs = 0;
+ cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
+ cfg->symm = 0;
+}
+
+/**
+ * ice_hash_cfg_record - Record an RSS hash context
+ * @ctx: destination (global) RSS hash configuration
+ * @cfg: source RSS hash configuration to record
+ *
+ * Copy the active rule information from @cfg into @ctx.
+ */
+static void ice_hash_cfg_record(struct ice_rss_hash_cfg *ctx,
+ struct ice_rss_hash_cfg *cfg)
+{
+ ctx->hash_flds = cfg->hash_flds;
+ ctx->addl_hdrs = cfg->addl_hdrs;
+ ctx->hdr_type = cfg->hdr_type;
+ ctx->symm = cfg->symm;
+}
+
+/**
+ * ice_hash_moveout - Delete an RSS configuration (keep context)
+ * @vf: VF pointer
+ * @cfg: RSS hash configuration
+ *
+ * Return: 0 on success (including when already absent); -ENOENT if @cfg is
+ * invalid or VSI is missing; -EBUSY on hardware removal failure.
+ */
+static int
+ice_hash_moveout(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (!ice_is_hash_cfg_valid(cfg) || !vsi)
+ return -ENOENT;
+
+ ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
+ if (ret && ret != -ENOENT) {
+ dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_hash_moveback - Add an RSS hash configuration for a VF
+ * @vf: VF pointer
+ * @cfg: RSS hash configuration to apply
+ *
+ * Add @cfg to @vf if the context is valid and VSI exists; programs HW.
+ *
+ * Return:
+ * * 0 on success
+ * * -ENOENT if @cfg is invalid or VSI is missing
+ * * -EBUSY if hardware programming fails
+ */
+static int
+ice_hash_moveback(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (!ice_is_hash_cfg_valid(cfg) || !vsi)
+ return -ENOENT;
+
+ ret = ice_add_rss_cfg(hw, vsi, cfg);
+ if (ret) {
+ dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_hash_remove - remove a RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * This function will delete a RSS hash configuration and also delete the
+ * hash context which stores the rule info.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int
+ice_hash_remove(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ int ret;
+
+ ret = ice_hash_moveout(vf, cfg);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ ice_hash_cfg_reset(cfg);
+
+ return 0;
+}
+
+struct ice_gtpu_ctx_action {
+ u32 ctx_idx;
+ const u32 *remove_list;
+ int remove_count;
+ const u32 *moveout_list;
+ int moveout_count;
+};
+
+/**
+ * ice_add_rss_cfg_pre_gtpu - Pre-process the GTPU RSS configuration
+ * @vf: pointer to the VF info
+ * @ctx: pointer to the context of the GTPU hash
+ * @ctx_idx: index of the hash context
+ *
+ * Pre-processes the GTPU hash configuration before adding a new
+ * hash context. It removes or reorders existing hash configurations that may
+ * conflict with the new one. For example, if a GTPU_UP or GTPU_DWN rule is
+ * configured after a GTPU_EH rule, the GTPU_EH hash will be matched first due
+ * to TCAM write and match order (top-down). In such cases, the GTPU_EH rule
+ * must be moved after the GTPU_UP/DWN rule. Conversely, if a GTPU_EH rule is
+ * configured after a GTPU_UP/DWN rule, the UP/DWN rules should be removed to
+ * avoid conflict.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int ice_add_rss_cfg_pre_gtpu(struct ice_vf *vf,
+ struct ice_vf_hash_gtpu_ctx *ctx,
+ u32 ctx_idx)
+{
+ int ret, i;
+
+ static const u32 remove_eh_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP_UDP, ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ ICE_HASH_GTPU_CTX_UP_IP, ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP, ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP, ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+
+ static const u32 remove_eh_ip_udp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ };
+ static const u32 moveout_eh_ip_udp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+
+ static const u32 remove_eh_ip_tcp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+ static const u32 moveout_eh_ip_tcp[] = {
+ ICE_HASH_GTPU_CTX_UP_IP,
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP,
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ };
+
+ static const u32 remove_up_ip[] = {
+ ICE_HASH_GTPU_CTX_UP_IP_UDP,
+ ICE_HASH_GTPU_CTX_UP_IP_TCP,
+ };
+ static const u32 moveout_up_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const u32 moveout_up_ip_udp_tcp[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const u32 remove_dw_ip[] = {
+ ICE_HASH_GTPU_CTX_DW_IP_UDP,
+ ICE_HASH_GTPU_CTX_DW_IP_TCP,
+ };
+ static const u32 moveout_dw_ip[] = {
+ ICE_HASH_GTPU_CTX_EH_IP,
+ ICE_HASH_GTPU_CTX_EH_IP_UDP,
+ ICE_HASH_GTPU_CTX_EH_IP_TCP,
+ };
+
+ static const struct ice_gtpu_ctx_action actions[] = {
+ { ICE_HASH_GTPU_CTX_EH_IP, remove_eh_ip,
+ ARRAY_SIZE(remove_eh_ip), NULL, 0 },
+ { ICE_HASH_GTPU_CTX_EH_IP_UDP, remove_eh_ip_udp,
+ ARRAY_SIZE(remove_eh_ip_udp), moveout_eh_ip_udp,
+ ARRAY_SIZE(moveout_eh_ip_udp) },
+ { ICE_HASH_GTPU_CTX_EH_IP_TCP, remove_eh_ip_tcp,
+ ARRAY_SIZE(remove_eh_ip_tcp), moveout_eh_ip_tcp,
+ ARRAY_SIZE(moveout_eh_ip_tcp) },
+ { ICE_HASH_GTPU_CTX_UP_IP, remove_up_ip,
+ ARRAY_SIZE(remove_up_ip), moveout_up_ip,
+ ARRAY_SIZE(moveout_up_ip) },
+ { ICE_HASH_GTPU_CTX_UP_IP_UDP, NULL, 0, moveout_up_ip_udp_tcp,
+ ARRAY_SIZE(moveout_up_ip_udp_tcp) },
+ { ICE_HASH_GTPU_CTX_UP_IP_TCP, NULL, 0, moveout_up_ip_udp_tcp,
+ ARRAY_SIZE(moveout_up_ip_udp_tcp) },
+ { ICE_HASH_GTPU_CTX_DW_IP, remove_dw_ip,
+ ARRAY_SIZE(remove_dw_ip), moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ { ICE_HASH_GTPU_CTX_DW_IP_UDP, NULL, 0, moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ { ICE_HASH_GTPU_CTX_DW_IP_TCP, NULL, 0, moveout_dw_ip,
+ ARRAY_SIZE(moveout_dw_ip) },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(actions); i++) {
+ if (actions[i].ctx_idx != ctx_idx)
+ continue;
+
+ if (actions[i].remove_list) {
+ for (int j = 0; j < actions[i].remove_count; j++) {
+ u16 rm = actions[i].remove_list[j];
+
+ ret = ice_hash_remove(vf, &ctx->ctx[rm]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+ }
+
+ if (actions[i].moveout_list) {
+ for (int j = 0; j < actions[i].moveout_count; j++) {
+ u16 mv = actions[i].moveout_list[j];
+
+ ret = ice_hash_moveout(vf, &ctx->ctx[mv]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_pre_ip - Pre-process IP-layer RSS configuration
+ * @vf: VF pointer
+ * @ctx: IP L4 hash context (ESP/UDP-ESP/AH/PFCP and UDP/TCP/SCTP)
+ *
+ * Remove covered/recorded IP RSS configurations prior to adding a new one.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_pre_ip(struct ice_vf *vf, struct ice_vf_hash_ip_ctx *ctx)
+{
+ int i, ret;
+
+ for (i = 1; i < ICE_HASH_IP_CTX_MAX; i++)
+ if (ice_is_hash_cfg_valid(&ctx->ctx[i])) {
+ ret = ice_hash_remove(vf, &ctx->ctx[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_calc_gtpu_ctx_idx - Calculate GTPU hash context index
+ * @hdrs: Bitmask of protocol headers prefixed with ICE_FLOW_SEG_HDR_*
+ *
+ * Determine the GTPU hash context index based on the combination of
+ * encapsulation headers (GTPU_EH, GTPU_UP, GTPU_DWN) and transport
+ * protocols (UDP, TCP) within IPv4 or IPv6 flows.
+ *
+ * Return: A valid context index (0-8) if the header combination is supported,
+ * or ICE_HASH_GTPU_CTX_MAX if the combination is invalid.
+ */
+static enum ice_hash_gtpu_ctx_type ice_calc_gtpu_ctx_idx(u32 hdrs)
+{
+ u32 eh_idx, ip_idx;
+
+ if (hdrs & ICE_FLOW_SEG_HDR_GTPU_EH)
+ eh_idx = 0;
+ else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_UP)
+ eh_idx = 1;
+ else if (hdrs & ICE_FLOW_SEG_HDR_GTPU_DWN)
+ eh_idx = 2;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+
+ ip_idx = 0;
+ if (hdrs & ICE_FLOW_SEG_HDR_UDP)
+ ip_idx = 1;
+ else if (hdrs & ICE_FLOW_SEG_HDR_TCP)
+ ip_idx = 2;
+
+ if (hdrs & (ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV6))
+ return eh_idx * 3 + ip_idx;
+ else
+ return ICE_HASH_GTPU_CTX_MAX;
+}
+
+/**
+ * ice_map_ip_ctx_idx - map the index of the IP L4 hash context
+ * @hdrs: protocol headers prefix with ICE_FLOW_SEG_HDR_XXX.
+ *
+ * The IP L4 hash context use the index to classify for IPv4/IPv6 with
+ * ESP/UDP_ESP/AH/PFCP and non-tunnel UDP/TCP/SCTP
+ * this function map the index based on the protocol headers.
+ *
+ * Return: The mapped IP context index on success, or ICE_HASH_IP_CTX_MAX
+ * if no matching context is found.
+ */
+static u8 ice_map_ip_ctx_idx(u32 hdrs)
+{
+ u8 i;
+
+ static struct {
+ u32 hdrs;
+ u8 ctx_idx;
+ } ip_ctx_idx_map[] = {
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_ESP,
+ ICE_HASH_IP_CTX_IP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_NAT_T_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_AH,
+ ICE_HASH_IP_CTX_IP_AH },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_PFCP_SESSION,
+ ICE_HASH_IP_CTX_IP_PFCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_ESP,
+ ICE_HASH_IP_CTX_IP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_NAT_T_ESP,
+ ICE_HASH_IP_CTX_IP_UDP_ESP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_AH,
+ ICE_HASH_IP_CTX_IP_AH },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER |
+ ICE_FLOW_SEG_HDR_PFCP_SESSION,
+ ICE_HASH_IP_CTX_IP_PFCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_ETH | ICE_FLOW_SEG_HDR_VLAN |
+ ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ /* the remaining mappings are used for default RSS */
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_IPV4 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_UDP,
+ ICE_HASH_IP_CTX_IP_UDP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_TCP,
+ ICE_HASH_IP_CTX_IP_TCP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_SCTP,
+ ICE_HASH_IP_CTX_IP_SCTP },
+ { ICE_FLOW_SEG_HDR_IPV6 | ICE_FLOW_SEG_HDR_IPV_OTHER,
+ ICE_HASH_IP_CTX_IP },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ip_ctx_idx_map); i++) {
+ if (hdrs == ip_ctx_idx_map[i].hdrs)
+ return ip_ctx_idx_map[i].ctx_idx;
+ }
+
+ return ICE_HASH_IP_CTX_MAX;
+}
+
+/**
+ * ice_add_rss_cfg_pre - Prepare RSS configuration context for a VF
+ * @vf: pointer to the VF structure
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Prepare the RSS hash context for a given VF based on the additional
+ * protocol headers specified in @cfg. This includes pre-configuration
+ * for IP and GTPU-based flows.
+ *
+ * If the configuration matches a known IP context, the function sets up
+ * the appropriate IP hash context. If the configuration includes GTPU
+ * headers, it prepares the GTPU-specific context accordingly.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_pre(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx == ICE_HASH_IP_CTX_IP) {
+ int ret = 0;
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v4);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ret = ice_add_rss_cfg_pre_ip(vf, &vf->hash_ctx.v6);
+
+ if (ret)
+ return ret;
+ }
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv4,
+ ice_gtpu_ctx_idx);
+ } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ return ice_add_rss_cfg_pre_gtpu(vf, &vf->hash_ctx.ipv6,
+ ice_gtpu_ctx_idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_post_gtpu - Post-process GTPU RSS configuration
+ * @vf: pointer to the VF info
+ * @ctx: pointer to the context of the GTPU hash
+ * @cfg: pointer to the RSS hash configuration
+ * @ctx_idx: index of the hash context
+ *
+ * Post-processes the GTPU hash configuration after a new hash
+ * context has been successfully added. It updates the context with the new
+ * configuration and restores any previously removed hash contexts that need
+ * to be re-applied. This ensures proper TCAM rule ordering and avoids
+ * conflicts between overlapping GTPU rules.
+ *
+ * Return: 0 on success or a negative error code on failure
+ */
+static int ice_add_rss_cfg_post_gtpu(struct ice_vf *vf,
+ struct ice_vf_hash_gtpu_ctx *ctx,
+ struct ice_rss_hash_cfg *cfg, u32 ctx_idx)
+{
+ /* GTPU hash moveback lookup table indexed by context ID.
+ * Each entry is a bitmap indicating which contexts need moveback
+ * operations when the corresponding context index is processed.
+ */
+ static const unsigned long
+ ice_gtpu_moveback_tbl[ICE_HASH_GTPU_CTX_MAX] = {
+ [ICE_HASH_GTPU_CTX_EH_IP] = 0,
+ [ICE_HASH_GTPU_CTX_EH_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_UP_IP) |
+ BIT(ICE_HASH_GTPU_CTX_UP_IP_TCP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP_TCP),
+ [ICE_HASH_GTPU_CTX_EH_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_UP_IP) |
+ BIT(ICE_HASH_GTPU_CTX_UP_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP) |
+ BIT(ICE_HASH_GTPU_CTX_DW_IP_UDP),
+ [ICE_HASH_GTPU_CTX_UP_IP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_UP_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_UP_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP_UDP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ [ICE_HASH_GTPU_CTX_DW_IP_TCP] =
+ BIT(ICE_HASH_GTPU_CTX_EH_IP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_UDP) |
+ BIT(ICE_HASH_GTPU_CTX_EH_IP_TCP),
+ };
+ unsigned long moveback_mask;
+ int ret;
+ int i;
+
+ if (unlikely(ctx_idx >= ICE_HASH_GTPU_CTX_MAX))
+ return 0;
+
+ ctx->ctx[ctx_idx].addl_hdrs = cfg->addl_hdrs;
+ ctx->ctx[ctx_idx].hash_flds = cfg->hash_flds;
+ ctx->ctx[ctx_idx].hdr_type = cfg->hdr_type;
+ ctx->ctx[ctx_idx].symm = cfg->symm;
+
+ moveback_mask = ice_gtpu_moveback_tbl[ctx_idx];
+ for_each_set_bit(i, &moveback_mask, ICE_HASH_GTPU_CTX_MAX) {
+ ret = ice_hash_moveback(vf, &ctx->ctx[i]);
+ if (ret && ret != -ENOENT)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ice_add_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_record(&vf->hash_ctx.v4.ctx[ip_ctx_idx], cfg);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_record(&vf->hash_ctx.v6.ctx[ip_ctx_idx], cfg);
+ }
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv4,
+ cfg, ice_gtpu_ctx_idx);
+ } else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6) {
+ return ice_add_rss_cfg_post_gtpu(vf, &vf->hash_ctx.ipv6,
+ cfg, ice_gtpu_ctx_idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_rem_rss_cfg_post - post-process the RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Post process the RSS hash configuration after deleting a hash
+ * config. Such as, it will reset the hash context for the GTPU hash.
+ */
+static void
+ice_rem_rss_cfg_post(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ u32 ice_gtpu_ctx_idx = ice_calc_gtpu_ctx_idx(cfg->addl_hdrs);
+ u8 ip_ctx_idx = ice_map_ip_ctx_idx(cfg->addl_hdrs);
+
+ if (ip_ctx_idx && ip_ctx_idx < ICE_HASH_IP_CTX_MAX) {
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_reset(&vf->hash_ctx.v4.ctx[ip_ctx_idx]);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_reset(&vf->hash_ctx.v6.ctx[ip_ctx_idx]);
+ }
+
+ if (ice_gtpu_ctx_idx >= ICE_HASH_GTPU_CTX_MAX)
+ return;
+
+ if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV4)
+ ice_hash_cfg_reset(&vf->hash_ctx.ipv4.ctx[ice_gtpu_ctx_idx]);
+ else if (cfg->addl_hdrs & ICE_FLOW_SEG_HDR_IPV6)
+ ice_hash_cfg_reset(&vf->hash_ctx.ipv6.ctx[ice_gtpu_ctx_idx]);
+}
+
+/**
+ * ice_rem_rss_cfg_wrap - Wrapper for deleting an RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Wrapper function to delete a flow profile base on an RSS configuration,
+ * and also post process the hash context base on the rollback mechanism
+ * which handle some rules conflict by ice_add_rss_cfg_wrap.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_rem_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ ret = ice_rem_rss_cfg(hw, vsi->idx, cfg);
+ /* We just ignore -ENOENT, because if two configurations share the same
+ * profile remove one of them actually removes both, since the
+ * profile is deleted.
+ */
+ if (ret && ret != -ENOENT) {
+ dev_err(dev, "ice_rem_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return ret;
+ }
+
+ ice_rem_rss_cfg_post(vf, cfg);
+
+ return 0;
+}
+
+/**
+ * ice_add_rss_cfg_wrap - Wrapper for adding an RSS configuration
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS hash configuration
+ *
+ * Add a flow profile based on an RSS configuration. Use a rollback
+ * mechanism to handle rule conflicts due to TCAM
+ * write sequence from top to down.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_rss_cfg_wrap(struct ice_vf *vf, struct ice_rss_hash_cfg *cfg)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ret;
+
+ if (ice_add_rss_cfg_pre(vf, cfg))
+ return -EINVAL;
+
+ ret = ice_add_rss_cfg(hw, vsi, cfg);
+ if (ret) {
+ dev_err(dev, "ice_add_rss_cfg failed for VF %d, VSI %d, error:%d\n",
+ vf->vf_id, vf->lan_vsi_idx, ret);
+ return ret;
+ }
+
+ if (ice_add_rss_cfg_post(vf, cfg))
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/**
+ * ice_parse_raw_rss_pattern - Parse raw pattern spec and mask for RSS
+ * @vf: pointer to the VF info
+ * @proto: pointer to the virtchnl protocol header
+ * @raw_cfg: pointer to the RSS raw pattern configuration
+ *
+ * Parser function to get spec and mask from virtchnl message, and parse
+ * them to get the corresponding profile and offset. The profile is used
+ * to add RSS configuration.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_parse_raw_rss_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto,
+ struct ice_rss_raw_cfg *raw_cfg)
+{
+ struct ice_parser_result pkt_parsed;
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_parser_profile prof;
+ struct ice_parser *psr;
+ u8 *pkt_buf, *msk_buf;
+ u16 pkt_len;
+ int ret = 0;
+
+ pkt_len = proto->raw.pkt_len;
+ if (!pkt_len)
+ return -EINVAL;
+ if (pkt_len > VIRTCHNL_MAX_SIZE_RAW_PACKET)
+ pkt_len = VIRTCHNL_MAX_SIZE_RAW_PACKET;
+
+ pkt_buf = kzalloc(pkt_len, GFP_KERNEL);
+ msk_buf = kzalloc(pkt_len, GFP_KERNEL);
+ if (!pkt_buf || !msk_buf) {
+ ret = -ENOMEM;
+ goto free_alloc;
+ }
+
+ memcpy(pkt_buf, proto->raw.spec, pkt_len);
+ memcpy(msk_buf, proto->raw.mask, pkt_len);
+
+ psr = ice_parser_create(hw);
+ if (IS_ERR(psr)) {
+ ret = PTR_ERR(psr);
+ goto free_alloc;
+ }
+
+ ret = ice_parser_run(psr, pkt_buf, pkt_len, &pkt_parsed);
+ if (ret)
+ goto parser_destroy;
+
+ ret = ice_parser_profile_init(&pkt_parsed, pkt_buf, msk_buf,
+ pkt_len, ICE_BLK_RSS, &prof);
+ if (ret)
+ goto parser_destroy;
+
+ memcpy(&raw_cfg->prof, &prof, sizeof(prof));
+
+parser_destroy:
+ ice_parser_destroy(psr);
+free_alloc:
+ kfree(pkt_buf);
+ kfree(msk_buf);
+ return ret;
+}
+
+/**
+ * ice_add_raw_rss_cfg - add RSS configuration for raw pattern
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS raw pattern configuration
+ *
+ * This function adds the RSS configuration for raw pattern.
+ * Check if current profile is matched. If not, remove the old
+ * one and add the new profile to HW directly. Update the symmetric
+ * hash configuration as well.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_add_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
+{
+ struct ice_parser_profile *prof = &cfg->prof;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_rss_prof_info *rss_prof;
+ struct ice_hw *hw = &vf->pf->hw;
+ int i, ptg, ret = 0;
+ u16 vsi_handle;
+ u64 id;
+
+ vsi_handle = vf->lan_vsi_idx;
+ id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+
+ ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+ rss_prof = &vf->rss_prof_info[ptg];
+
+ /* check if ptg already has a profile */
+ if (rss_prof->prof.fv_num) {
+ for (i = 0; i < ICE_MAX_FV_WORDS; i++) {
+ if (rss_prof->prof.fv[i].proto_id !=
+ prof->fv[i].proto_id ||
+ rss_prof->prof.fv[i].offset !=
+ prof->fv[i].offset)
+ break;
+ }
+
+ /* current profile is matched, check symmetric hash */
+ if (i == ICE_MAX_FV_WORDS) {
+ if (rss_prof->symm != cfg->symm)
+ goto update_symm;
+ return ret;
+ }
+
+ /* current profile is not matched, remove it */
+ ret =
+ ice_rem_prof_id_flow(hw, ICE_BLK_RSS,
+ ice_get_hw_vsi_num(hw, vsi_handle),
+ id);
+ if (ret) {
+ dev_err(dev, "remove RSS flow failed\n");
+ return ret;
+ }
+
+ ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+ if (ret) {
+ dev_err(dev, "remove RSS profile failed\n");
+ return ret;
+ }
+ }
+
+ /* add new profile */
+ ret = ice_flow_set_parser_prof(hw, vsi_handle, 0, prof, ICE_BLK_RSS);
+ if (ret) {
+ dev_err(dev, "HW profile add failed\n");
+ return ret;
+ }
+
+ memcpy(&rss_prof->prof, prof, sizeof(struct ice_parser_profile));
+
+update_symm:
+ rss_prof->symm = cfg->symm;
+ ice_rss_update_raw_symm(hw, cfg, id);
+ return ret;
+}
+
+/**
+ * ice_rem_raw_rss_cfg - remove RSS configuration for raw pattern
+ * @vf: pointer to the VF info
+ * @cfg: pointer to the RSS raw pattern configuration
+ *
+ * This function removes the RSS configuration for raw pattern.
+ * Check if vsi group is already removed first. If not, remove the
+ * profile.
+ *
+ * Return: 0 on success; negative error code on failure.
+ */
+static int
+ice_rem_raw_rss_cfg(struct ice_vf *vf, struct ice_rss_raw_cfg *cfg)
+{
+ struct ice_parser_profile *prof = &cfg->prof;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ int ptg, ret = 0;
+ u16 vsig, vsi;
+ u64 id;
+
+ id = find_first_bit(prof->ptypes, ICE_FLOW_PTYPE_MAX);
+
+ ptg = hw->blk[ICE_BLK_RSS].xlt1.t[id];
+
+ memset(&vf->rss_prof_info[ptg], 0,
+ sizeof(struct ice_rss_prof_info));
+
+ /* check if vsig is already removed */
+ vsi = ice_get_hw_vsi_num(hw, vf->lan_vsi_idx);
+ if (vsi >= ICE_MAX_VSI) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vsig = hw->blk[ICE_BLK_RSS].xlt2.vsis[vsi].vsig;
+ if (vsig) {
+ ret = ice_rem_prof_id_flow(hw, ICE_BLK_RSS, vsi, id);
+ if (ret)
+ goto err;
+
+ ret = ice_rem_prof(hw, ICE_BLK_RSS, id);
+ if (ret)
+ goto err;
+ }
+
+ return ret;
+
+err:
+ dev_err(dev, "HW profile remove failed\n");
+ return ret;
+}
+
+/**
+ * ice_vc_handle_rss_cfg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the message buffer
+ * @add: add a RSS config if true, otherwise delete a RSS config
+ *
+ * This function adds/deletes a RSS config
+ */
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
+{
+ u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
+ struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_hw *hw = &vf->pf->hw;
+ struct ice_vsi *vsi;
+ u8 hash_type;
+ bool symm;
+ int ret;
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto error_param;
+ }
+
+ if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
+ rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
+ rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
+ dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
+ vf->vf_id);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+
+ ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
+ if (ret)
+ v_ret = ice_err_to_virt_err(ret);
+ goto error_param;
+ }
+
+ hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ :
+ ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ ret = ice_vc_rss_hash_update(hw, vsi, hash_type);
+ if (ret) {
+ v_ret = ice_err_to_virt_err(ret);
+ goto error_param;
+ }
+
+ symm = rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC;
+ /* Configure RSS hash for raw pattern */
+ if (rss_cfg->proto_hdrs.tunnel_level == 0 &&
+ rss_cfg->proto_hdrs.count == 0) {
+ struct ice_rss_raw_cfg raw_cfg;
+
+ if (ice_parse_raw_rss_pattern(vf, &rss_cfg->proto_hdrs,
+ &raw_cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ raw_cfg.symm = symm;
+ if (ice_add_raw_rss_cfg(vf, &raw_cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ if (ice_rem_raw_rss_cfg(vf, &raw_cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ } else {
+ struct ice_rss_hash_cfg cfg;
+
+ /* Only check for none raw pattern case */
+ if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
+ cfg.hash_flds = ICE_HASH_INVALID;
+ cfg.hdr_type = ICE_RSS_ANY_HEADERS;
+
+ if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (add) {
+ cfg.symm = symm;
+ if (ice_add_rss_cfg_wrap(vf, &cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ } else {
+ if (ice_rem_rss_cfg_wrap(vf, &cfg))
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+ }
+
+error_param:
+ return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS key
+ */
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_key *vrk =
+ (struct virtchnl_rss_key *)msg;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_key(vsi, vrk->key))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS LUT
+ */
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_config_rss_hfunc
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * Configure the VF's RSS Hash function
+ */
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
+ struct ice_vsi *vsi;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
+ hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
+
+ if (ice_set_rss_hfunc(vsi, hfunc))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+error_param:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
+ NULL, 0);
+}
+
+/**
+ * ice_vc_get_rss_hashcfg - return the RSS Hash configuration
+ * @vf: pointer to the VF info
+ */
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_rss_hashcfg *vrh = NULL;
+ int len = 0, ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
+ dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ len = sizeof(struct virtchnl_rss_hashcfg);
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+
+ vrh->hashcfg = ICE_DEFAULT_RSS_HASHCFG;
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS, v_ret,
+ (u8 *)vrh, len);
+ kfree(vrh);
+ return ret;
+}
+
+/**
+ * ice_vc_set_rss_hashcfg - set RSS Hash configuration bits for the VF
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ */
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg)
+{
+ struct virtchnl_rss_hashcfg *vrh = (struct virtchnl_rss_hashcfg *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int status;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ dev_err(dev, "RSS not supported by PF\n");
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
+ /* clear all previously programmed RSS configuration to allow VF drivers
+ * the ability to customize the RSS configuration and/or completely
+ * disable RSS
+ */
+ status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
+ if (status && !vrh->hashcfg) {
+ /* only report failure to clear the current RSS configuration if
+ * that was clearly the VF's intention (i.e. vrh->hashcfg = 0)
+ */
+ v_ret = ice_err_to_virt_err(status);
+ goto err;
+ } else if (status) {
+ /* allow the VF to update the RSS configuration even on failure
+ * to clear the current RSS confguration in an attempt to keep
+ * RSS in a working state
+ */
+ dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
+ vf->vf_id);
+ }
+
+ if (vrh->hashcfg) {
+ status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hashcfg);
+ v_ret = ice_err_to_virt_err(status);
+ }
+
+ /* save the requested VF configuration */
+ if (!v_ret)
+ vf->rss_hashcfg = vrh->hashcfg;
+
+ /* send the response to the VF */
+err:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HASHCFG, v_ret,
+ NULL, 0);
+}
+
diff --git a/drivers/net/ethernet/intel/ice/virt/rss.h b/drivers/net/ethernet/intel/ice/virt/rss.h
new file mode 100644
index 000000000000..784d4c43ce8b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/virt/rss.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_VIRT_RSS_H_
+#define _ICE_VIRT_RSS_H_
+
+#include <linux/types.h>
+
+struct ice_vf;
+
+int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add);
+int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg);
+int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg);
+int ice_vc_get_rss_hashcfg(struct ice_vf *vf);
+int ice_vc_set_rss_hashcfg(struct ice_vf *vf, u8 *msg);
+
+#endif /* _ICE_VIRT_RSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
index f445e33b2028..f3f921134379 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.c
@@ -1,170 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2022, Intel Corporation. */
-#include "ice_virtchnl.h"
+#include "virtchnl.h"
+#include "queues.h"
+#include "rss.h"
#include "ice_vf_lib_private.h"
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
-#include "ice_virtchnl_allowlist.h"
+#include "allowlist.h"
#include "ice_vf_vsi_vlan_ops.h"
#include "ice_vlan.h"
#include "ice_flex_pipe.h"
#include "ice_dcb_lib.h"
-#define FIELD_SELECTOR(proto_hdr_field) \
- BIT((proto_hdr_field) & PROTO_HDR_FIELD_MASK)
-
-struct ice_vc_hdr_match_type {
- u32 vc_hdr; /* virtchnl headers (VIRTCHNL_PROTO_HDR_XXX) */
- u32 ice_hdr; /* ice headers (ICE_FLOW_SEG_HDR_XXX) */
-};
-
-static const struct ice_vc_hdr_match_type ice_vc_hdr_list[] = {
- {VIRTCHNL_PROTO_HDR_NONE, ICE_FLOW_SEG_HDR_NONE},
- {VIRTCHNL_PROTO_HDR_ETH, ICE_FLOW_SEG_HDR_ETH},
- {VIRTCHNL_PROTO_HDR_S_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_C_VLAN, ICE_FLOW_SEG_HDR_VLAN},
- {VIRTCHNL_PROTO_HDR_IPV4, ICE_FLOW_SEG_HDR_IPV4 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_IPV6, ICE_FLOW_SEG_HDR_IPV6 |
- ICE_FLOW_SEG_HDR_IPV_OTHER},
- {VIRTCHNL_PROTO_HDR_TCP, ICE_FLOW_SEG_HDR_TCP},
- {VIRTCHNL_PROTO_HDR_UDP, ICE_FLOW_SEG_HDR_UDP},
- {VIRTCHNL_PROTO_HDR_SCTP, ICE_FLOW_SEG_HDR_SCTP},
- {VIRTCHNL_PROTO_HDR_PPPOE, ICE_FLOW_SEG_HDR_PPPOE},
- {VIRTCHNL_PROTO_HDR_GTPU_IP, ICE_FLOW_SEG_HDR_GTPU_IP},
- {VIRTCHNL_PROTO_HDR_GTPU_EH, ICE_FLOW_SEG_HDR_GTPU_EH},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_DWN,
- ICE_FLOW_SEG_HDR_GTPU_DWN},
- {VIRTCHNL_PROTO_HDR_GTPU_EH_PDU_UP,
- ICE_FLOW_SEG_HDR_GTPU_UP},
- {VIRTCHNL_PROTO_HDR_L2TPV3, ICE_FLOW_SEG_HDR_L2TPV3},
- {VIRTCHNL_PROTO_HDR_ESP, ICE_FLOW_SEG_HDR_ESP},
- {VIRTCHNL_PROTO_HDR_AH, ICE_FLOW_SEG_HDR_AH},
- {VIRTCHNL_PROTO_HDR_PFCP, ICE_FLOW_SEG_HDR_PFCP_SESSION},
-};
-
-struct ice_vc_hash_field_match_type {
- u32 vc_hdr; /* virtchnl headers
- * (VIRTCHNL_PROTO_HDR_XXX)
- */
- u32 vc_hash_field; /* virtchnl hash fields selector
- * FIELD_SELECTOR((VIRTCHNL_PROTO_HDR_ETH_XXX))
- */
- u64 ice_hash_field; /* ice hash fields
- * (BIT_ULL(ICE_FLOW_FIELD_IDX_XXX))
- */
-};
-
-static const struct
-ice_vc_hash_field_match_type ice_vc_hash_field_list[] = {
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA)},
- {VIRTCHNL_PROTO_HDR_ETH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_DST),
- ICE_FLOW_HASH_ETH},
- {VIRTCHNL_PROTO_HDR_ETH,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ETH_ETHERTYPE),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_TYPE)},
- {VIRTCHNL_PROTO_HDR_S_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_S_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_S_VLAN)},
- {VIRTCHNL_PROTO_HDR_C_VLAN,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_C_VLAN_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_C_VLAN)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
- ICE_FLOW_HASH_IPV4},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- ICE_FLOW_HASH_IPV4 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV4, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
- ICE_FLOW_HASH_IPV6},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) |
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- ICE_FLOW_HASH_IPV6 | BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_IPV6, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_PROT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_TCP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_TCP_DST_PORT),
- ICE_FLOW_HASH_TCP_PORT},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_UDP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_DST_PORT),
- ICE_FLOW_HASH_UDP_PORT},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)},
- {VIRTCHNL_PROTO_HDR_SCTP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_SRC_PORT) |
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_SCTP_DST_PORT),
- ICE_FLOW_HASH_SCTP_PORT},
- {VIRTCHNL_PROTO_HDR_PPPOE,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PPPOE_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_GTPU_IP,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_GTPU_IP_TEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)},
- {VIRTCHNL_PROTO_HDR_L2TPV3,
- FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_L2TPV3_SESS_ID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)},
- {VIRTCHNL_PROTO_HDR_ESP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_ESP_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)},
- {VIRTCHNL_PROTO_HDR_AH, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_AH_SPI),
- BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)},
- {VIRTCHNL_PROTO_HDR_PFCP, FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_PFCP_SEID),
- BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)},
-};
-
/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
@@ -304,10 +154,10 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
- if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
+ if (aq_ret && pf->hw.mailboxq.sq_last_status != LIBIE_AQ_RC_ENOSYS) {
dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %s\n",
vf->vf_id, aq_ret,
- ice_aq_str(pf->hw.mailboxq.sq_last_status));
+ libie_aq_str(pf->hw.mailboxq.sq_last_status));
return -EIO;
}
@@ -338,28 +188,6 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_get_max_frame_size - get max frame size allowed for VF
- * @vf: VF used to determine max frame size
- *
- * Max frame size is determined based on the current port's max frame size and
- * whether a port VLAN is configured on this VF. The VF is not aware whether
- * it's in a port VLAN so the PF needs to account for this in max frame size
- * checks and sending the max frame size to the VF.
- */
-static u16 ice_vc_get_max_frame_size(struct ice_vf *vf)
-{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
- u16 max_frame_size;
-
- max_frame_size = pi->phy.link_info.max_frame_size;
-
- if (ice_vf_is_port_vlan_ena(vf))
- max_frame_size -= VLAN_HLEN;
-
- return max_frame_size;
-}
-
-/**
* ice_vc_get_vlan_caps
* @hw: pointer to the hw
* @vf: pointer to the VF info
@@ -498,6 +326,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_QOS)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_QOS;
+ if (vf->driver_caps & VIRTCHNL_VF_CAP_PTP)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_PTP;
+
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
@@ -556,488 +387,6 @@ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id)
}
/**
- * ice_vc_isvalid_q_id
- * @vsi: VSI to check queue ID against
- * @qid: VSI relative queue ID
- *
- * check for the valid queue ID
- */
-static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid)
-{
- /* allocated Tx and Rx queues should be always equal for VF VSI */
- return qid < vsi->alloc_txq;
-}
-
-/**
- * ice_vc_isvalid_ring_len
- * @ring_len: length of ring
- *
- * check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
- * or zero
- */
-static bool ice_vc_isvalid_ring_len(u16 ring_len)
-{
- return ring_len == 0 ||
- (ring_len >= ICE_MIN_NUM_DESC &&
- ring_len <= ICE_MAX_NUM_DESC &&
- !(ring_len % ICE_REQ_DESC_MULTIPLE));
-}
-
-/**
- * ice_vc_validate_pattern
- * @vf: pointer to the VF info
- * @proto: virtchnl protocol headers
- *
- * validate the pattern is supported or not.
- *
- * Return: true on success, false on error.
- */
-bool
-ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto)
-{
- bool is_ipv4 = false;
- bool is_ipv6 = false;
- bool is_udp = false;
- u16 ptype = -1;
- int i = 0;
-
- while (i < proto->count &&
- proto->proto_hdr[i].type != VIRTCHNL_PROTO_HDR_NONE) {
- switch (proto->proto_hdr[i].type) {
- case VIRTCHNL_PROTO_HDR_ETH:
- ptype = ICE_PTYPE_MAC_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_IPV4:
- ptype = ICE_PTYPE_IPV4_PAY;
- is_ipv4 = true;
- break;
- case VIRTCHNL_PROTO_HDR_IPV6:
- ptype = ICE_PTYPE_IPV6_PAY;
- is_ipv6 = true;
- break;
- case VIRTCHNL_PROTO_HDR_UDP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_UDP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_UDP_PAY;
- is_udp = true;
- break;
- case VIRTCHNL_PROTO_HDR_TCP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_TCP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_TCP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_SCTP:
- if (is_ipv4)
- ptype = ICE_PTYPE_IPV4_SCTP_PAY;
- else if (is_ipv6)
- ptype = ICE_PTYPE_IPV6_SCTP_PAY;
- break;
- case VIRTCHNL_PROTO_HDR_GTPU_IP:
- case VIRTCHNL_PROTO_HDR_GTPU_EH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_GTPU;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_GTPU;
- goto out;
- case VIRTCHNL_PROTO_HDR_L2TPV3:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_L2TPV3;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_L2TPV3;
- goto out;
- case VIRTCHNL_PROTO_HDR_ESP:
- if (is_ipv4)
- ptype = is_udp ? ICE_MAC_IPV4_NAT_T_ESP :
- ICE_MAC_IPV4_ESP;
- else if (is_ipv6)
- ptype = is_udp ? ICE_MAC_IPV6_NAT_T_ESP :
- ICE_MAC_IPV6_ESP;
- goto out;
- case VIRTCHNL_PROTO_HDR_AH:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_AH;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_AH;
- goto out;
- case VIRTCHNL_PROTO_HDR_PFCP:
- if (is_ipv4)
- ptype = ICE_MAC_IPV4_PFCP_SESSION;
- else if (is_ipv6)
- ptype = ICE_MAC_IPV6_PFCP_SESSION;
- goto out;
- default:
- break;
- }
- i++;
- }
-
-out:
- return ice_hw_ptype_ena(&vf->pf->hw, ptype);
-}
-
-/**
- * ice_vc_parse_rss_cfg - parses hash fields and headers from
- * a specific virtchnl RSS cfg
- * @hw: pointer to the hardware
- * @rss_cfg: pointer to the virtchnl RSS cfg
- * @hash_cfg: pointer to the HW hash configuration
- *
- * Return true if all the protocol header and hash fields in the RSS cfg could
- * be parsed, else return false
- *
- * This function parses the virtchnl RSS cfg to be the intended
- * hash fields and the intended header for RSS configuration
- */
-static bool ice_vc_parse_rss_cfg(struct ice_hw *hw,
- struct virtchnl_rss_cfg *rss_cfg,
- struct ice_rss_hash_cfg *hash_cfg)
-{
- const struct ice_vc_hash_field_match_type *hf_list;
- const struct ice_vc_hdr_match_type *hdr_list;
- int i, hf_list_len, hdr_list_len;
- u32 *addl_hdrs = &hash_cfg->addl_hdrs;
- u64 *hash_flds = &hash_cfg->hash_flds;
-
- /* set outer layer RSS as default */
- hash_cfg->hdr_type = ICE_RSS_OUTER_HEADERS;
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hash_cfg->symm = true;
- else
- hash_cfg->symm = false;
-
- hf_list = ice_vc_hash_field_list;
- hf_list_len = ARRAY_SIZE(ice_vc_hash_field_list);
- hdr_list = ice_vc_hdr_list;
- hdr_list_len = ARRAY_SIZE(ice_vc_hdr_list);
-
- for (i = 0; i < rss_cfg->proto_hdrs.count; i++) {
- struct virtchnl_proto_hdr *proto_hdr =
- &rss_cfg->proto_hdrs.proto_hdr[i];
- bool hdr_found = false;
- int j;
-
- /* Find matched ice headers according to virtchnl headers. */
- for (j = 0; j < hdr_list_len; j++) {
- struct ice_vc_hdr_match_type hdr_map = hdr_list[j];
-
- if (proto_hdr->type == hdr_map.vc_hdr) {
- *addl_hdrs |= hdr_map.ice_hdr;
- hdr_found = true;
- }
- }
-
- if (!hdr_found)
- return false;
-
- /* Find matched ice hash fields according to
- * virtchnl hash fields.
- */
- for (j = 0; j < hf_list_len; j++) {
- struct ice_vc_hash_field_match_type hf_map = hf_list[j];
-
- if (proto_hdr->type == hf_map.vc_hdr &&
- proto_hdr->field_selector == hf_map.vc_hash_field) {
- *hash_flds |= hf_map.ice_hash_field;
- break;
- }
- }
- }
-
- return true;
-}
-
-/**
- * ice_vf_adv_rss_offload_ena - determine if capabilities support advanced
- * RSS offloads
- * @caps: VF driver negotiated capabilities
- *
- * Return true if VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF capability is set,
- * else return false
- */
-static bool ice_vf_adv_rss_offload_ena(u32 caps)
-{
- return !!(caps & VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF);
-}
-
-/**
- * ice_vc_handle_rss_cfg
- * @vf: pointer to the VF info
- * @msg: pointer to the message buffer
- * @add: add a RSS config if true, otherwise delete a RSS config
- *
- * This function adds/deletes a RSS config
- */
-static int ice_vc_handle_rss_cfg(struct ice_vf *vf, u8 *msg, bool add)
-{
- u32 v_opcode = add ? VIRTCHNL_OP_ADD_RSS_CFG : VIRTCHNL_OP_DEL_RSS_CFG;
- struct virtchnl_rss_cfg *rss_cfg = (struct virtchnl_rss_cfg *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS is not supported by the PF\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto error_param;
- }
-
- if (!ice_vf_adv_rss_offload_ena(vf->driver_caps)) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but Advanced RSS offload is not supported\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->proto_hdrs.count > VIRTCHNL_MAX_NUM_PROTO_HDRS ||
- rss_cfg->rss_algorithm < VIRTCHNL_RSS_ALG_TOEPLITZ_ASYMMETRIC ||
- rss_cfg->rss_algorithm > VIRTCHNL_RSS_ALG_XOR_SYMMETRIC) {
- dev_dbg(dev, "VF %d attempting to configure RSS, but RSS configuration is not valid\n",
- vf->vf_id);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (rss_cfg->rss_algorithm == VIRTCHNL_RSS_ALG_R_ASYMMETRIC) {
- struct ice_vsi_ctx *ctx;
- u8 lut_type, hash_type;
- int status;
-
- lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
- hash_type = add ? ICE_AQ_VSI_Q_OPT_RSS_HASH_XOR :
- ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
-
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- goto error_param;
- }
-
- ctx->info.q_opt_rss =
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
- FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
-
- /* Preserve existing queueing option setting */
- ctx->info.q_opt_rss |= (vsi->info.q_opt_rss &
- ICE_AQ_VSI_Q_OPT_RSS_GBL_LUT_M);
- ctx->info.q_opt_tc = vsi->info.q_opt_tc;
- ctx->info.q_opt_flags = vsi->info.q_opt_rss;
-
- ctx->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
-
- status = ice_update_vsi(hw, vsi->idx, ctx, NULL);
- if (status) {
- dev_err(dev, "update VSI for RSS failed, err %d aq_err %s\n",
- status, ice_aq_str(hw->adminq.sq_last_status));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- } else {
- vsi->info.q_opt_rss = ctx->info.q_opt_rss;
- }
-
- kfree(ctx);
- } else {
- struct ice_rss_hash_cfg cfg;
-
- /* Only check for none raw pattern case */
- if (!ice_vc_validate_pattern(vf, &rss_cfg->proto_hdrs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- cfg.addl_hdrs = ICE_FLOW_SEG_HDR_NONE;
- cfg.hash_flds = ICE_HASH_INVALID;
- cfg.hdr_type = ICE_RSS_ANY_HEADERS;
-
- if (!ice_vc_parse_rss_cfg(hw, rss_cfg, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (add) {
- if (ice_add_rss_cfg(hw, vsi, &cfg)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_add_rss_cfg failed for vsi = %d, v_ret = %d\n",
- vsi->vsi_num, v_ret);
- }
- } else {
- int status;
-
- status = ice_rem_rss_cfg(hw, vsi->idx, &cfg);
- /* We just ignore -ENOENT, because if two configurations
- * share the same profile remove one of them actually
- * removes both, since the profile is deleted.
- */
- if (status && status != -ENOENT) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev, "ice_rem_rss_cfg failed for VF ID:%d, error:%d\n",
- vf->vf_id, status);
- }
- }
- }
-
-error_param:
- return ice_vc_send_msg_to_vf(vf, v_opcode, v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_key
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS key
- */
-static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_key *vrk =
- (struct virtchnl_rss_key *)msg;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_key(vsi, vrk->key))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_lut
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS LUT
- */
-static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_config_rss_hfunc
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * Configure the VF's RSS Hash function
- */
-static int ice_vc_config_rss_hfunc(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hfunc *vrh = (struct virtchnl_rss_hfunc *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
- struct ice_vsi *vsi;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vrh->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vrh->rss_algorithm == VIRTCHNL_RSS_ALG_TOEPLITZ_SYMMETRIC)
- hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
-
- if (ice_set_rss_hfunc(vsi, hfunc))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
-error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_HFUNC, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_get_qos_caps - Get current QoS caps from PF
* @vf: pointer to the VF info
*
@@ -1119,110 +468,6 @@ err:
}
/**
- * ice_vf_cfg_qs_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @num_queues: number of queues to be configured
- *
- * Configure per queue bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_qs_bw(struct ice_vf *vf, u16 num_queues)
-{
- struct ice_hw *hw = &vf->pf->hw;
- struct ice_vsi *vsi;
- int ret;
- u16 i;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- for (i = 0; i < num_queues; i++) {
- u32 p_rate, min_rate;
- u8 tc;
-
- p_rate = vf->qs_bw[i].peak;
- min_rate = vf->qs_bw[i].committed;
- tc = vf->qs_bw[i].tc;
- if (p_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW, p_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MAX_BW);
- if (ret)
- return ret;
-
- if (min_rate)
- ret = ice_cfg_q_bw_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW, min_rate);
- else
- ret = ice_cfg_q_bw_dflt_lmt(hw->port_info, vsi->idx, tc,
- vf->qs_bw[i].queue_id,
- ICE_MIN_BW);
-
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-/**
- * ice_vf_cfg_q_quanta_profile - Configure quanta profile
- * @vf: pointer to the VF info
- * @quanta_prof_idx: pointer to the quanta profile index
- * @quanta_size: quanta size to be set
- *
- * This function chooses available quanta profile and configures the register.
- * The quanta profile is evenly divided by the number of device ports, and then
- * available to the specific PF and VFs. The first profile for each PF is a
- * reserved default profile. Only quanta size of the rest unused profile can be
- * modified.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vf_cfg_q_quanta_profile(struct ice_vf *vf, u16 quanta_size,
- u16 *quanta_prof_idx)
-{
- const u16 n_desc = calc_quanta_desc(quanta_size);
- struct ice_hw *hw = &vf->pf->hw;
- const u16 n_cmd = 2 * n_desc;
- struct ice_pf *pf = vf->pf;
- u16 per_pf, begin_id;
- u8 n_used;
- u32 reg;
-
- begin_id = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) / hw->dev_caps.num_funcs *
- hw->logical_pf_id;
-
- if (quanta_size == ICE_DFLT_QUANTA) {
- *quanta_prof_idx = begin_id;
- } else {
- per_pf = (GLCOMM_QUANTA_PROF_MAX_INDEX + 1) /
- hw->dev_caps.num_funcs;
- n_used = pf->num_quanta_prof_used;
- if (n_used < per_pf) {
- *quanta_prof_idx = begin_id + 1 + n_used;
- pf->num_quanta_prof_used++;
- } else {
- return -EINVAL;
- }
- }
-
- reg = FIELD_PREP(GLCOMM_QUANTA_PROF_QUANTA_SIZE_M, quanta_size) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_CMD_M, n_cmd) |
- FIELD_PREP(GLCOMM_QUANTA_PROF_MAX_DESC_M, n_desc);
- wr32(hw, GLCOMM_QUANTA_PROF(*quanta_prof_idx), reg);
-
- return 0;
-}
-
-/**
* ice_vc_cfg_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1404,744 +649,6 @@ error_param:
}
/**
- * ice_vc_validate_vqs_bitmaps - validate Rx/Tx queue bitmaps from VIRTCHNL
- * @vqs: virtchnl_queue_select structure containing bitmaps to validate
- *
- * Return true on successful validation, else false
- */
-static bool ice_vc_validate_vqs_bitmaps(struct virtchnl_queue_select *vqs)
-{
- if ((!vqs->rx_queues && !vqs->tx_queues) ||
- vqs->rx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF) ||
- vqs->tx_queues >= BIT(ICE_MAX_RSS_QS_PER_VF))
- return false;
-
- return true;
-}
-
-/**
- * ice_vf_ena_txq_interrupt - enable Tx queue interrupt via QINT_TQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->txq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_TQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_TQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_TQCTL(pfq), reg | QINT_TQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vf_ena_rxq_interrupt - enable Tx queue interrupt via QINT_RQCTL
- * @vsi: VSI of the VF to configure
- * @q_idx: VF queue index used to determine the queue in the PF's space
- */
-static void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 pfq = vsi->rxq_map[q_idx];
- u32 reg;
-
- reg = rd32(hw, QINT_RQCTL(pfq));
-
- /* MSI-X index 0 in the VF's space is always for the OICR, which means
- * this is most likely a poll mode VF driver, so don't enable an
- * interrupt that was never configured via VIRTCHNL_OP_CONFIG_IRQ_MAP
- */
- if (!(reg & QINT_RQCTL_MSIX_INDX_M))
- return;
-
- wr32(hw, QINT_RQCTL(pfq), reg | QINT_RQCTL_CAUSE_ENA_M);
-}
-
-/**
- * ice_vc_ena_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to enable all or specific queue(s)
- */
-static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Enable only Rx rings, Tx rings were enabled by the FW when the
- * Tx queue group list was configured and the context bits were
- * programmed using ice_vsi_cfg_txqs
- */
- q_map = vqs->rx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, true, vf_q_id, true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- ice_vf_ena_rxq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->rxq_ena);
- }
-
- q_map = vqs->tx_queues;
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if enabled */
- if (test_bit(vf_q_id, vf->txq_ena))
- continue;
-
- ice_vf_ena_txq_interrupt(vsi, vf_q_id);
- set_bit(vf_q_id, vf->txq_ena);
- }
-
- /* Set flag to indicate that queues are enabled */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS)
- set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vf_vsi_dis_single_txq - disable a single Tx queue
- * @vf: VF to disable queue for
- * @vsi: VSI for the VF
- * @q_id: VF relative (0-based) queue ID
- *
- * Attempt to disable the Tx queue passed in. If the Tx queue was successfully
- * disabled then clear q_id bit in the enabled queues bitmap and return
- * success. Otherwise return error.
- */
-static int
-ice_vf_vsi_dis_single_txq(struct ice_vf *vf, struct ice_vsi *vsi, u16 q_id)
-{
- struct ice_txq_meta txq_meta = { 0 };
- struct ice_tx_ring *ring;
- int err;
-
- if (!test_bit(q_id, vf->txq_ena))
- dev_dbg(ice_pf_to_dev(vsi->back), "Queue %u on VSI %u is not enabled, but stopping it anyway\n",
- q_id, vsi->vsi_num);
-
- ring = vsi->tx_rings[q_id];
- if (!ring)
- return -EINVAL;
-
- ice_fill_txq_meta(vsi, ring, &txq_meta);
-
- err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, ring, &txq_meta);
- if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
- q_id, vsi->vsi_num);
- return err;
- }
-
- /* Clear enabled queues flag */
- clear_bit(q_id, vf->txq_ena);
-
- return 0;
-}
-
-/**
- * ice_vc_dis_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to disable all or specific queue(s)
- */
-static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queue_select *vqs =
- (struct virtchnl_queue_select *)msg;
- struct ice_vsi *vsi;
- unsigned long q_map;
- u16 vf_q_id;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
- !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (!ice_vc_validate_vqs_bitmaps(vqs)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (vqs->tx_queues) {
- q_map = vqs->tx_queues;
-
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- if (ice_vf_vsi_dis_single_txq(vf, vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
- }
- }
-
- q_map = vqs->rx_queues;
- /* speed up Rx queue disable by batching them if possible */
- if (q_map &&
- bitmap_equal(&q_map, vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF)) {
- if (ice_vsi_stop_all_rx_rings(vsi)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop all Rx rings on VSI %d\n",
- vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- } else if (q_map) {
- for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Skip queue if not enabled */
- if (!test_bit(vf_q_id, vf->rxq_ena))
- continue;
-
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, vf_q_id,
- true)) {
- dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
- vf_q_id, vsi->vsi_num);
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* Clear enabled queues flag */
- clear_bit(vf_q_id, vf->rxq_ena);
- }
- }
-
- /* Clear enabled queues flag */
- if (v_ret == VIRTCHNL_STATUS_SUCCESS && ice_vf_has_no_qs_ena(vf))
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_cfg_interrupt
- * @vf: pointer to the VF info
- * @vsi: the VSI being configured
- * @map: vector map for mapping vectors to queues
- * @q_vector: structure for interrupt vector
- * configure the IRQ to queue map
- */
-static enum virtchnl_status_code
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
- struct virtchnl_vector_map *map,
- struct ice_q_vector *q_vector)
-{
- u16 vsi_q_id, vsi_q_id_idx;
- unsigned long qmap;
-
- q_vector->num_ring_rx = 0;
- q_vector->num_ring_tx = 0;
-
- qmap = map->rxq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_rx++;
- q_vector->rx.itr_idx = map->rxitr_idx;
- vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->rx.itr_idx);
- }
-
- qmap = map->txq_map;
- for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) {
- vsi_q_id = vsi_q_id_idx;
-
- if (!ice_vc_isvalid_q_id(vsi, vsi_q_id))
- return VIRTCHNL_STATUS_ERR_PARAM;
-
- q_vector->num_ring_tx++;
- q_vector->tx.itr_idx = map->txitr_idx;
- vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id,
- q_vector->vf_reg_idx,
- q_vector->tx.itr_idx);
- }
-
- return VIRTCHNL_STATUS_SUCCESS;
-}
-
-/**
- * ice_vc_cfg_irq_map_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the IRQ to queue map
- */
-static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 num_q_vectors_mapped, vsi_id, vector_id;
- struct virtchnl_irq_map_info *irqmap_info;
- struct virtchnl_vector_map *map;
- struct ice_vsi *vsi;
- int i;
-
- irqmap_info = (struct virtchnl_irq_map_info *)msg;
- num_q_vectors_mapped = irqmap_info->num_vectors;
-
- /* Check to make sure number of VF vectors mapped is not greater than
- * number of VF vectors originally allocated, and check that
- * there is actually at least a single VF queue vector mapped
- */
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- vf->num_msix < num_q_vectors_mapped ||
- !num_q_vectors_mapped) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- for (i = 0; i < num_q_vectors_mapped; i++) {
- struct ice_q_vector *q_vector;
-
- map = &irqmap_info->vecmap[i];
-
- vector_id = map->vector_id;
- vsi_id = map->vsi_id;
- /* vector_id is always 0-based for each VF, and can never be
- * larger than or equal to the max allowed interrupts per VF
- */
- if (!(vector_id < vf->num_msix) ||
- !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
- (!vector_id && (map->rxq_map || map->txq_map))) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* No need to map VF miscellaneous or rogue vector */
- if (!vector_id)
- continue;
-
- /* Subtract non queue vector from vector_id passed by VF
- * to get actual number of VSI queue vector array index
- */
- q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
- if (!q_vector) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- /* lookout for the invalid queue index */
- v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
- if (v_ret)
- goto error_param;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
- NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_bw - Configure per queue bandwidth
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues bandwidth.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_bw(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_queues_bw_cfg *qbw =
- (struct virtchnl_queues_bw_cfg *)msg;
- struct ice_vsi *vsi;
- u16 i;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- !ice_vc_isvalid_vsi_id(vf, qbw->vsi_id)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (qbw->num_queues > ICE_MAX_RSS_QS_PER_VF ||
- qbw->num_queues > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- if (qbw->cfg[i].shaper.peak != 0 && vf->max_tx_rate != 0 &&
- qbw->cfg[i].shaper.peak > vf->max_tx_rate)
- dev_warn(ice_pf_to_dev(vf->pf), "The maximum queue %d rate limit configuration may not take effect because the maximum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
- if (qbw->cfg[i].shaper.committed != 0 && vf->min_tx_rate != 0 &&
- qbw->cfg[i].shaper.committed < vf->min_tx_rate)
- dev_warn(ice_pf_to_dev(vf->pf), "The minimum queue %d rate limit configuration may not take effect because the minimum TX rate for VF-%d is %d\n",
- qbw->cfg[i].queue_id, vf->vf_id,
- vf->max_tx_rate);
- }
-
- for (i = 0; i < qbw->num_queues; i++) {
- vf->qs_bw[i].queue_id = qbw->cfg[i].queue_id;
- vf->qs_bw[i].peak = qbw->cfg[i].shaper.peak;
- vf->qs_bw[i].committed = qbw->cfg[i].shaper.committed;
- vf->qs_bw[i].tc = qbw->cfg[i].tc;
- }
-
- if (ice_vf_cfg_qs_bw(vf, qbw->num_queues))
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUEUE_BW,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_q_quanta - Configure per queue quanta
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer which holds the command descriptor
- *
- * Configure VF queues quanta.
- *
- * Return: 0 on success or negative error value.
- */
-static int ice_vc_cfg_q_quanta(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- u16 quanta_prof_id, quanta_size, start_qid, end_qid, i;
- struct virtchnl_quanta_cfg *qquanta =
- (struct virtchnl_quanta_cfg *)msg;
- struct ice_vsi *vsi;
- int ret;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- end_qid = qquanta->queue_select.start_queue_id +
- qquanta->queue_select.num_queues;
- if (end_qid > ICE_MAX_RSS_QS_PER_VF ||
- end_qid > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(vf->pf), "VF-%d trying to configure more than allocated number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- quanta_size = qquanta->quanta_size;
- if (quanta_size > ICE_MAX_QUANTA_SIZE ||
- quanta_size < ICE_MIN_QUANTA_SIZE) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (quanta_size % 64) {
- dev_err(ice_pf_to_dev(vf->pf), "quanta size should be the product of 64\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- ret = ice_vf_cfg_q_quanta_profile(vf, quanta_size,
- &quanta_prof_id);
- if (ret) {
- v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
- goto err;
- }
-
- start_qid = qquanta->queue_select.start_queue_id;
- for (i = start_qid; i < end_qid; i++)
- vsi->tx_rings[i]->quanta_prof_id = quanta_prof_id;
-
-err:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_QUANTA,
- v_ret, NULL, 0);
-}
-
-/**
- * ice_vc_cfg_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * called from the VF to configure the Rx/Tx queues
- */
-static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_vsi_queue_config_info *qci =
- (struct virtchnl_vsi_queue_config_info *)msg;
- struct virtchnl_queue_pair_info *qpi;
- struct ice_pf *pf = vf->pf;
- struct ice_lag *lag;
- struct ice_vsi *vsi;
- u8 act_prt, pri_prt;
- int i = -1, q_idx;
-
- lag = pf->lag;
- mutex_lock(&pf->lag_mutex);
- act_prt = ICE_LAG_INVALID_PORT;
- pri_prt = pf->hw.port_info->lport;
- if (lag && lag->bonded && lag->primary) {
- act_prt = lag->active_port;
- if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
- lag->upper_netdev)
- ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
- else
- act_prt = ICE_LAG_INVALID_PORT;
- }
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
- goto error_param;
-
- if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id))
- goto error_param;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- goto error_param;
-
- if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF ||
- qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
- vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- if (!qci->qpair[i].rxq.crc_disable)
- continue;
-
- if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
- vf->vlan_strip_ena)
- goto error_param;
- }
-
- for (i = 0; i < qci->num_queue_pairs; i++) {
- qpi = &qci->qpair[i];
- if (qpi->txq.vsi_id != qci->vsi_id ||
- qpi->rxq.vsi_id != qci->vsi_id ||
- qpi->rxq.queue_id != qpi->txq.queue_id ||
- qpi->txq.headwb_enabled ||
- !ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
- !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
- !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) {
- goto error_param;
- }
-
- q_idx = qpi->rxq.queue_id;
-
- /* make sure selected "q_idx" is in valid range of queues
- * for selected "vsi"
- */
- if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
- goto error_param;
- }
-
- /* copy Tx queue info from VF into VSI */
- if (qpi->txq.ring_len > 0) {
- vsi->tx_rings[q_idx]->dma = qpi->txq.dma_ring_addr;
- vsi->tx_rings[q_idx]->count = qpi->txq.ring_len;
-
- /* Disable any existing queue first */
- if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx))
- goto error_param;
-
- /* Configure a queue with the requested settings */
- if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
- }
-
- /* copy Rx queue info from VF into VSI */
- if (qpi->rxq.ring_len > 0) {
- u16 max_frame_size = ice_vc_get_max_frame_size(vf);
- struct ice_rx_ring *ring = vsi->rx_rings[q_idx];
- u32 rxdid;
-
- ring->dma = qpi->rxq.dma_ring_addr;
- ring->count = qpi->rxq.ring_len;
-
- if (qpi->rxq.crc_disable)
- ring->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
- else
- ring->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
-
- if (qpi->rxq.databuffer_size != 0 &&
- (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
- qpi->rxq.databuffer_size < 1024))
- goto error_param;
- ring->rx_buf_len = qpi->rxq.databuffer_size;
- if (qpi->rxq.max_pkt_size > max_frame_size ||
- qpi->rxq.max_pkt_size < 64)
- goto error_param;
-
- ring->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is
- * not expected to account for it in the MTU
- * calculation
- */
- if (ice_vf_is_port_vlan_ena(vf))
- ring->max_frame += VLAN_HLEN;
-
- if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
- dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
- vf->vf_id, q_idx);
- goto error_param;
- }
-
- /* If Rx flex desc is supported, select RXDID for Rx
- * queues. Otherwise, use legacy 32byte descriptor
- * format. Legacy 16byte descriptor is not supported.
- * If this RXDID is selected, return error.
- */
- if (vf->driver_caps &
- VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
- rxdid = qpi->rxq.rxdid;
- if (!(BIT(rxdid) & pf->supported_rxdids))
- goto error_param;
- } else {
- rxdid = ICE_RXDID_LEGACY_1;
- }
-
- ice_write_qrxflxp_cntxt(&vsi->back->hw,
- vsi->rxq_map[q_idx],
- rxdid, 0x03, false);
- }
- }
-
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_SUCCESS, NULL, 0);
-error_param:
- /* disable whatever we can */
- for (; i >= 0; i--) {
- if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n",
- vf->vf_id, i);
- if (ice_vf_vsi_dis_single_txq(vf, vsi, i))
- dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n",
- vf->vf_id, i);
- }
-
- if (lag && lag->bonded && lag->primary &&
- act_prt != ICE_LAG_INVALID_PORT)
- ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
- mutex_unlock(&pf->lag_mutex);
-
- ice_lag_move_new_vf_nodes(vf);
-
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
-}
-
-/**
* ice_can_vf_change_mac
* @vf: pointer to the VF info
*
@@ -2234,6 +741,51 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
}
/**
+ * ice_is_mc_lldp_eth_addr - check if the given MAC is a multicast LLDP address
+ * @mac: address to check
+ *
+ * Return: true if the address is one of the three possible LLDP multicast
+ * addresses, false otherwise.
+ */
+static bool ice_is_mc_lldp_eth_addr(const u8 *mac)
+{
+ const u8 lldp_mac_base[] = {0x01, 0x80, 0xc2, 0x00, 0x00};
+
+ if (memcmp(mac, lldp_mac_base, sizeof(lldp_mac_base)))
+ return false;
+
+ return (mac[5] == 0x0e || mac[5] == 0x03 || mac[5] == 0x00);
+}
+
+/**
+ * ice_vc_can_add_mac - check if the VF is allowed to add a given MAC
+ * @vf: a VF to add the address to
+ * @mac: address to check
+ *
+ * Return: true if the VF is allowed to add such MAC address, false otherwise.
+ */
+static bool ice_vc_can_add_mac(const struct ice_vf *vf, const u8 *mac)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+
+ if (is_unicast_ether_addr(mac) &&
+ !ice_can_vf_change_mac((struct ice_vf *)vf)) {
+ dev_err(dev,
+ "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ return false;
+ }
+
+ if (!vf->trusted && ice_is_mc_lldp_eth_addr(mac)) {
+ dev_warn(dev,
+ "An untrusted VF %u is attempting to configure an LLDP multicast address\n",
+ vf->vf_id);
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
@@ -2251,10 +803,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
- if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
- dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
+ if (!ice_vc_can_add_mac(vf, mac_addr))
return -EPERM;
- }
ret = ice_fltr_add_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
if (ret == -EEXIST) {
@@ -2269,6 +819,8 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
return ret;
} else {
vf->num_mac++;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, true);
}
ice_vfhw_mac_add(vf, vc_ether_addr);
@@ -2363,6 +915,8 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
+ if (ice_is_mc_lldp_eth_addr(mac_addr))
+ ice_vf_update_mac_lldp_num(vf, vsi, false);
return 0;
}
@@ -2468,66 +1022,6 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
}
/**
- * ice_vc_request_qs_msg
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- *
- * VFs get a default number of queues but can use this message to request a
- * different number. If the request is successful, PF will reset the VF and
- * return 0. If unsuccessful, PF will send message informing VF of number of
- * available queue pairs via virtchnl message response to VF.
- */
-static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_vf_res_request *vfres =
- (struct virtchnl_vf_res_request *)msg;
- u16 req_queues = vfres->num_queue_pairs;
- struct ice_pf *pf = vf->pf;
- u16 max_allowed_vf_queues;
- u16 tx_rx_queue_left;
- struct device *dev;
- u16 cur_queues;
-
- dev = ice_pf_to_dev(pf);
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto error_param;
- }
-
- cur_queues = vf->num_vf_qs;
- tx_rx_queue_left = min_t(u16, ice_get_avail_txq_count(pf),
- ice_get_avail_rxq_count(pf));
- max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
- if (!req_queues) {
- dev_err(dev, "VF %d tried to request 0 queues. Ignoring.\n",
- vf->vf_id);
- } else if (req_queues > ICE_MAX_RSS_QS_PER_VF) {
- dev_err(dev, "VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_RSS_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_RSS_QS_PER_VF;
- } else if (req_queues > cur_queues &&
- req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
- vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
- ICE_MAX_RSS_QS_PER_VF);
- } else {
- /* request is successful, then reset VF */
- vf->num_req_qs = req_queues;
- ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
- dev_info(dev, "VF %d granted request of %u queues.\n",
- vf->vf_id, req_queues);
- return 0;
- }
-
-error_param:
- /* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- v_ret, (u8 *)vfres, sizeof(*vfres));
-}
-
-/**
* ice_vf_vlan_offload_ena - determine if capabilities support VLAN offloads
* @caps: VF driver negotiated capabilities
*
@@ -2542,7 +1036,7 @@ static bool ice_vf_vlan_offload_ena(u32 caps)
* ice_is_vlan_promisc_allowed - check if VLAN promiscuous config is allowed
* @vf: VF used to determine if VLAN promiscuous config is allowed
*/
-static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
{
if ((test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) &&
@@ -2561,8 +1055,8 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
* This function should only be called if VLAN promiscuous mode is allowed,
* which can be determined via ice_is_vlan_promisc_allowed().
*/
-static int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
- struct ice_vlan *vlan)
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
{
u8 promisc_m = 0;
int status;
@@ -2920,108 +1414,6 @@ error_param:
}
/**
- * ice_vc_get_rss_hena - return the RSS HENA bits allowed by the hardware
- * @vf: pointer to the VF info
- */
-static int ice_vc_get_rss_hena(struct ice_vf *vf)
-{
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_rss_hena *vrh = NULL;
- int len = 0, ret;
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- dev_err(ice_pf_to_dev(vf->pf), "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- len = sizeof(struct virtchnl_rss_hena);
- vrh = kzalloc(len, GFP_KERNEL);
- if (!vrh) {
- v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
- len = 0;
- goto err;
- }
-
- vrh->hena = ICE_DEFAULT_RSS_HENA;
-err:
- /* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_RSS_HENA_CAPS, v_ret,
- (u8 *)vrh, len);
- kfree(vrh);
- return ret;
-}
-
-/**
- * ice_vc_set_rss_hena - set RSS HENA bits for the VF
- * @vf: pointer to the VF info
- * @msg: pointer to the msg buffer
- */
-static int ice_vc_set_rss_hena(struct ice_vf *vf, u8 *msg)
-{
- struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
- enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct ice_pf *pf = vf->pf;
- struct ice_vsi *vsi;
- struct device *dev;
- int status;
-
- dev = ice_pf_to_dev(pf);
-
- if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- dev_err(dev, "RSS not supported by PF\n");
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi) {
- v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- goto err;
- }
-
- /* clear all previously programmed RSS configuration to allow VF drivers
- * the ability to customize the RSS configuration and/or completely
- * disable RSS
- */
- status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
- if (status && !vrh->hena) {
- /* only report failure to clear the current RSS configuration if
- * that was clearly the VF's intention (i.e. vrh->hena = 0)
- */
- v_ret = ice_err_to_virt_err(status);
- goto err;
- } else if (status) {
- /* allow the VF to update the RSS configuration even on failure
- * to clear the current RSS confguration in an attempt to keep
- * RSS in a working state
- */
- dev_warn(dev, "Failed to clear the RSS configuration for VF %u\n",
- vf->vf_id);
- }
-
- if (vrh->hena) {
- status = ice_add_avf_rss_cfg(&pf->hw, vsi, vrh->hena);
- v_ret = ice_err_to_virt_err(status);
- }
-
- /* send the response to the VF */
-err:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_SET_RSS_HENA, v_ret,
- NULL, 0);
-}
-
-/**
* ice_vc_query_rxdid - query RXDID supported by DDP package
* @vf: pointer to VF info
*
@@ -3031,8 +1423,8 @@ err:
static int ice_vc_query_rxdid(struct ice_vf *vf)
{
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
- struct virtchnl_supported_rxdids rxdid = {};
struct ice_pf *pf = vf->pf;
+ u64 rxdid;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3044,7 +1436,7 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- rxdid.supported_rxdids = pf->supported_rxdids;
+ rxdid = pf->supported_rxdids;
err:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS,
@@ -3777,48 +2169,6 @@ ice_vc_ena_vlan_offload(struct ice_vsi *vsi,
return 0;
}
-#define ICE_L2TSEL_QRX_CONTEXT_REG_IDX 3
-#define ICE_L2TSEL_BIT_OFFSET 23
-enum ice_l2tsel {
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND,
- ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG1,
-};
-
-/**
- * ice_vsi_update_l2tsel - update l2tsel field for all Rx rings on this VSI
- * @vsi: VSI used to update l2tsel on
- * @l2tsel: l2tsel setting requested
- *
- * Use the l2tsel setting to update all of the Rx queue context bits for l2tsel.
- * This will modify which descriptor field the first offloaded VLAN will be
- * stripped into.
- */
-static void ice_vsi_update_l2tsel(struct ice_vsi *vsi, enum ice_l2tsel l2tsel)
-{
- struct ice_hw *hw = &vsi->back->hw;
- u32 l2tsel_bit;
- int i;
-
- if (l2tsel == ICE_L2TSEL_EXTRACT_FIRST_TAG_L2TAG2_2ND)
- l2tsel_bit = 0;
- else
- l2tsel_bit = BIT(ICE_L2TSEL_BIT_OFFSET);
-
- for (i = 0; i < vsi->alloc_rxq; i++) {
- u16 pfq = vsi->rxq_map[i];
- u32 qrx_context_offset;
- u32 regval;
-
- qrx_context_offset =
- QRX_CONTEXT(ICE_L2TSEL_QRX_CONTEXT_REG_IDX, pfq);
-
- regval = rd32(hw, qrx_context_offset);
- regval &= ~BIT(ICE_L2TSEL_BIT_OFFSET);
- regval |= l2tsel_bit;
- wr32(hw, qrx_context_offset, regval);
- }
-}
-
/**
* ice_vc_ena_vlan_stripping_v2_msg
* @vf: VF the message was received from
@@ -4092,6 +2442,59 @@ out:
v_ret, NULL, 0);
}
+static int ice_vc_get_ptp_cap(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ u32 caps = VIRTCHNL_1588_PTP_CAP_RX_TSTAMP |
+ VIRTCHNL_1588_PTP_CAP_READ_PHC;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ if (msg->caps & caps)
+ vf->ptp_caps = caps;
+
+err:
+ /* send the response back to the VF */
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_CAPS, v_ret,
+ (u8 *)&vf->ptp_caps,
+ sizeof(struct virtchnl_ptp_caps));
+}
+
+static int ice_vc_get_phc_time(struct ice_vf *vf)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ struct virtchnl_phc_time *phc_time = NULL;
+ struct ice_pf *pf = vf->pf;
+ u32 len = 0;
+ int ret;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+ goto err;
+
+ v_ret = VIRTCHNL_STATUS_SUCCESS;
+
+ phc_time = kzalloc(sizeof(*phc_time), GFP_KERNEL);
+ if (!phc_time) {
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto err;
+ }
+
+ len = sizeof(*phc_time);
+
+ phc_time->time = ice_ptp_read_src_clk_reg(pf, NULL);
+
+err:
+ /* send the response back to the VF */
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_1588_PTP_GET_TIME, v_ret,
+ (u8 *)phc_time, len);
+ kfree(phc_time);
+ return ret;
+}
+
static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_ver_msg = ice_vc_get_ver_msg,
.get_vf_res_msg = ice_vc_get_vf_res_msg,
@@ -4111,8 +2514,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4128,6 +2531,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_dflt_ops = {
.get_qos_caps = ice_vc_get_qos_caps,
.cfg_q_bw = ice_vc_cfg_q_bw,
.cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
+ /* If you add a new op here please make sure to add it to
+ * ice_virtchnl_repr_ops as well.
+ */
};
/**
@@ -4185,7 +2593,6 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
}
ice_vfhw_mac_add(vf, &al->list[i]);
- vf->num_mac++;
break;
}
@@ -4244,8 +2651,8 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.add_vlan_msg = ice_vc_add_vlan_msg,
.remove_vlan_msg = ice_vc_remove_vlan_msg,
.query_rxdid = ice_vc_query_rxdid,
- .get_rss_hena = ice_vc_get_rss_hena,
- .set_rss_hena_msg = ice_vc_set_rss_hena,
+ .get_rss_hashcfg = ice_vc_get_rss_hashcfg,
+ .set_rss_hashcfg = ice_vc_set_rss_hashcfg,
.ena_vlan_stripping = ice_vc_ena_vlan_stripping,
.dis_vlan_stripping = ice_vc_dis_vlan_stripping,
.handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
@@ -4258,6 +2665,11 @@ static const struct ice_virtchnl_ops ice_virtchnl_repr_ops = {
.dis_vlan_stripping_v2_msg = ice_vc_dis_vlan_stripping_v2_msg,
.ena_vlan_insertion_v2_msg = ice_vc_ena_vlan_insertion_v2_msg,
.dis_vlan_insertion_v2_msg = ice_vc_dis_vlan_insertion_v2_msg,
+ .get_qos_caps = ice_vc_get_qos_caps,
+ .cfg_q_bw = ice_vc_cfg_q_bw,
+ .cfg_q_quanta = ice_vc_cfg_q_quanta,
+ .get_ptp_cap = ice_vc_get_ptp_cap,
+ .get_phc_time = ice_vc_get_phc_time,
};
/**
@@ -4441,11 +2853,11 @@ error_handler:
case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
err = ops->query_rxdid(vf);
break;
- case VIRTCHNL_OP_GET_RSS_HENA_CAPS:
- err = ops->get_rss_hena(vf);
+ case VIRTCHNL_OP_GET_RSS_HASHCFG_CAPS:
+ err = ops->get_rss_hashcfg(vf);
break;
- case VIRTCHNL_OP_SET_RSS_HENA:
- err = ops->set_rss_hena_msg(vf, msg);
+ case VIRTCHNL_OP_SET_RSS_HASHCFG:
+ err = ops->set_rss_hashcfg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
err = ops->ena_vlan_stripping(vf);
@@ -4495,6 +2907,12 @@ error_handler:
case VIRTCHNL_OP_CONFIG_QUANTA:
err = ops->cfg_q_quanta(vf, msg);
break;
+ case VIRTCHNL_OP_1588_PTP_GET_CAPS:
+ err = ops->get_ptp_cap(vf, (const void *)msg);
+ break;
+ case VIRTCHNL_OP_1588_PTP_GET_TIME:
+ err = ops->get_phc_time(vf);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(dev, "Unsupported opcode %d from VF %d\n", v_opcode,
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
index 0c629aef9baf..71bb456e2d71 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h
+++ b/drivers/net/ethernet/intel/ice/virt/virtchnl.h
@@ -26,6 +26,9 @@
#define ICE_MAX_MACADDR_PER_VF 18
#define ICE_FLEX_DESC_RXDID_MAX_NUM 64
+/* Priority to be compared against previous priority from the pipe */
+#define ICE_RXDID_PRIO 0x03
+
/* VFs only get a single VSI. For ice hardware, the VF does not need to know
* its VSI index. However, the virtchnl interface requires a VSI number,
* mainly due to legacy hardware.
@@ -54,8 +57,8 @@ struct ice_virtchnl_ops {
int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
int (*query_rxdid)(struct ice_vf *vf);
- int (*get_rss_hena)(struct ice_vf *vf);
- int (*set_rss_hena_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_rss_hashcfg)(struct ice_vf *vf);
+ int (*set_rss_hashcfg)(struct ice_vf *vf, u8 *msg);
int (*ena_vlan_stripping)(struct ice_vf *vf);
int (*dis_vlan_stripping)(struct ice_vf *vf);
int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
@@ -72,6 +75,9 @@ struct ice_virtchnl_ops {
int (*cfg_q_tc_map)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_bw)(struct ice_vf *vf, u8 *msg);
int (*cfg_q_quanta)(struct ice_vf *vf, u8 *msg);
+ int (*get_ptp_cap)(struct ice_vf *vf,
+ const struct virtchnl_ptp_caps *msg);
+ int (*get_phc_time)(struct ice_vf *vf);
};
#ifdef CONFIG_PCI_IOV
@@ -86,12 +92,31 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event,
struct ice_mbx_data *mbxdata);
+void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx);
+int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct ice_vlan *vlan);
+bool ice_is_vlan_promisc_allowed(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */
static inline void ice_virtchnl_set_dflt_ops(struct ice_vf *vf) { }
static inline void ice_virtchnl_set_repr_ops(struct ice_vf *vf) { }
static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vf_ena_rxq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+static inline void ice_vf_ena_txq_interrupt(struct ice_vsi *vsi, u32 q_idx) { }
+
+static inline int ice_vf_ena_vlan_promisc(struct ice_vf *vf,
+ struct ice_vsi *vsi,
+ struct ice_vlan *vlan)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vlan_promisc_allowed(struct ice_vf *vf)
+{
+ return false;
+}
static inline int
ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,