diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice.h | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 17 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_base.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_ddp.c | 302 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_ddp.h | 5 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_ethtool.c | 110 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_ethtool.h | 39 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_lib.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 33 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/ice/ice_virtchnl.c | 59 |
10 files changed, 317 insertions, 263 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 680a81961ba1..2f5d6f974185 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -181,11 +181,9 @@ #define ice_for_each_chnl_tc(i) \ for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++) -#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX) +#define ICE_UCAST_PROMISC_BITS ICE_PROMISC_UCAST_RX -#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \ - ICE_PROMISC_UCAST_RX | \ - ICE_PROMISC_VLAN_TX | \ +#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_RX | \ ICE_PROMISC_VLAN_RX) #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 1f01f3501d6b..1489a8ceec51 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -1492,6 +1492,23 @@ struct ice_aqc_dnl_equa_param { #define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_GAINHF (0x20 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_GAINLF (0x21 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_GAINDC (0x22 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_CTLE_BW (0x23 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_GAIN (0x30 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_GAIN2 (0x31 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_2 (0x32 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_3 (0x33 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_4 (0x34 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_5 (0x35 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_6 (0x36 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_7 (0x37 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_8 (0x38 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_9 (0x39 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_10 (0x3A << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_11 (0x3B << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DFE_12 (0x3C << ICE_AQC_RX_EQU_SHIFT) #define ICE_AQC_TX_EQU_PRE1 0x0 #define ICE_AQC_TX_EQU_PRE3 0x3 #define ICE_AQC_TX_EQU_ATTEN 0x4 diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 3a8e156d7d86..82a9cd4ec7ae 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -156,7 +156,8 @@ skip_alloc: * handler here (i.e. resume, reset/rebuild, etc.) */ if (vsi->netdev) - netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll); + netif_napi_add_config(vsi->netdev, &q_vector->napi, + ice_napi_poll, v_idx); out: /* tie q_vector and VSI together */ diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 272fd823a825..03988be03729 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -1211,6 +1211,131 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, } /** + * ice_is_buffer_metadata - determine if package buffer is a metadata buffer + * @buf: pointer to buffer header + * Return: whether given @buf is a metadata one. + */ +static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) +{ + return le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF; +} + +/** + * struct ice_ddp_send_ctx - sending context of current DDP segment + * @hw: pointer to the hardware struct + * + * Keeps current sending state (header, error) for the purpose of proper "last" + * bit setting in ice_aq_download_pkg(). Use via calls to ice_ddp_send_hunk(). + */ +struct ice_ddp_send_ctx { + struct ice_hw *hw; +/* private: only for ice_ddp_send_hunk() */ + struct ice_buf_hdr *hdr; + int err; +}; + +static void ice_ddp_send_ctx_set_err(struct ice_ddp_send_ctx *ctx, int err) +{ + ctx->err = err; +} + +/** + * ice_ddp_send_hunk - send one hunk of data to FW + * @ctx: current segment sending context + * @hunk: next hunk to send, size is always ICE_PKG_BUF_SIZE + * + * Send the next hunk of data to FW, retrying if needed. + * + * Notice: must be called once more with a NULL @hunk to finish up; such call + * will set up the "last" bit of an AQ request. After such call @ctx.hdr is + * cleared, @hw is still valid. + * + * Return: %ICE_DDP_PKG_SUCCESS if there were no problems; a sticky @err + * otherwise. + */ +static enum ice_ddp_state ice_ddp_send_hunk(struct ice_ddp_send_ctx *ctx, + struct ice_buf_hdr *hunk) +{ + struct ice_buf_hdr *prev_hunk = ctx->hdr; + struct ice_hw *hw = ctx->hw; + bool prev_was_last = !hunk; + enum ice_aq_err aq_err; + u32 offset, info; + int attempt, err; + + if (ctx->err) + return ctx->err; + + ctx->hdr = hunk; + if (!prev_hunk) + return ICE_DDP_PKG_SUCCESS; /* no problem so far */ + + for (attempt = 0; attempt < 5; attempt++) { + if (attempt) + msleep(20); + + err = ice_aq_download_pkg(hw, prev_hunk, ICE_PKG_BUF_SIZE, + prev_was_last, &offset, &info, NULL); + + aq_err = hw->adminq.sq_last_status; + if (aq_err != ICE_AQ_RC_ENOSEC && aq_err != ICE_AQ_RC_EBADSIG) + break; + } + + if (err) { + ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", + err, offset, info); + ctx->err = ice_map_aq_err_to_ddp_state(aq_err); + } else if (attempt) { + dev_dbg(ice_hw_to_dev(hw), + "ice_aq_download_pkg number of retries: %d\n", attempt); + } + + return ctx->err; +} + +/** + * ice_dwnld_cfg_bufs_no_lock + * @ctx: context of the current buffers section to send + * @bufs: pointer to an array of buffers + * @start: buffer index of first buffer to download + * @count: the number of buffers to download + * + * Downloads package configuration buffers to the firmware. Metadata buffers + * are skipped, and the first metadata buffer found indicates that the rest + * of the buffers are all metadata buffers. + */ +static enum ice_ddp_state +ice_dwnld_cfg_bufs_no_lock(struct ice_ddp_send_ctx *ctx, struct ice_buf *bufs, + u32 start, u32 count) +{ + struct ice_buf_hdr *bh; + enum ice_ddp_state err; + + if (!bufs || !count) { + ice_ddp_send_ctx_set_err(ctx, ICE_DDP_PKG_ERR); + return ICE_DDP_PKG_ERR; + } + + bufs += start; + + for (int i = 0; i < count; i++, bufs++) { + bh = (struct ice_buf_hdr *)bufs; + /* Metadata buffers should not be sent to FW, + * their presence means "we are done here". + */ + if (ice_is_buffer_metadata(bh)) + break; + + err = ice_ddp_send_hunk(ctx, bh); + if (err) + return err; + } + + return 0; +} + +/** * ice_get_pkg_seg_by_idx * @pkg_hdr: pointer to the package header to be searched * @idx: index of segment @@ -1270,136 +1395,20 @@ ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx, } /** - * ice_is_buffer_metadata - determine if package buffer is a metadata buffer - * @buf: pointer to buffer header - */ -static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf) -{ - if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF) - return true; - - return false; -} - -/** - * ice_is_last_download_buffer - * @buf: pointer to current buffer header - * @idx: index of the buffer in the current sequence - * @count: the buffer count in the current sequence - * - * Note: this routine should only be called if the buffer is not the last buffer - */ -static bool -ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count) -{ - struct ice_buf *next_buf; - - if ((idx + 1) == count) - return true; - - /* A set metadata flag in the next buffer will signal that the current - * buffer will be the last buffer downloaded - */ - next_buf = ((struct ice_buf *)buf) + 1; - - return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf); -} - -/** - * ice_dwnld_cfg_bufs_no_lock - * @hw: pointer to the hardware structure - * @bufs: pointer to an array of buffers - * @start: buffer index of first buffer to download - * @count: the number of buffers to download - * @indicate_last: if true, then set last buffer flag on last buffer download - * - * Downloads package configuration buffers to the firmware. Metadata buffers - * are skipped, and the first metadata buffer found indicates that the rest - * of the buffers are all metadata buffers. - */ -static enum ice_ddp_state -ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start, - u32 count, bool indicate_last) -{ - enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS; - struct ice_buf_hdr *bh; - enum ice_aq_err err; - u32 offset, info, i; - - if (!bufs || !count) - return ICE_DDP_PKG_ERR; - - /* If the first buffer's first section has its metadata bit set - * then there are no buffers to be downloaded, and the operation is - * considered a success. - */ - bh = (struct ice_buf_hdr *)(bufs + start); - if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) - return ICE_DDP_PKG_SUCCESS; - - for (i = 0; i < count; i++) { - bool last = false; - int try_cnt = 0; - int status; - - bh = (struct ice_buf_hdr *)(bufs + start + i); - - if (indicate_last) - last = ice_is_last_download_buffer(bh, i, count); - - while (1) { - status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, - last, &offset, &info, - NULL); - if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC && - hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG) - break; - - try_cnt++; - - if (try_cnt == 5) - break; - - msleep(20); - } - - if (try_cnt) - dev_dbg(ice_hw_to_dev(hw), - "ice_aq_download_pkg number of retries: %d\n", - try_cnt); - - /* Save AQ status from download package */ - if (status) { - ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n", - status, offset, info); - err = hw->adminq.sq_last_status; - state = ice_map_aq_err_to_ddp_state(err); - break; - } - - if (last) - break; - } - - return state; -} - -/** * ice_download_pkg_sig_seg - download a signature segment - * @hw: pointer to the hardware structure + * @ctx: context of the current buffers section to send * @seg: pointer to signature segment */ static enum ice_ddp_state -ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) +ice_download_pkg_sig_seg(struct ice_ddp_send_ctx *ctx, struct ice_sign_seg *seg) { - return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0, - le32_to_cpu(seg->buf_tbl.buf_count), - false); + return ice_dwnld_cfg_bufs_no_lock(ctx, seg->buf_tbl.buf_array, 0, + le32_to_cpu(seg->buf_tbl.buf_count)); } /** * ice_download_pkg_config_seg - download a config segment - * @hw: pointer to the hardware structure + * @ctx: context of the current buffers section to send * @pkg_hdr: pointer to package header * @idx: segment index * @start: starting buffer @@ -1408,8 +1417,9 @@ ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg) * Note: idx must reference a ICE segment */ static enum ice_ddp_state -ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, - u32 idx, u32 start, u32 count) +ice_download_pkg_config_seg(struct ice_ddp_send_ctx *ctx, + struct ice_pkg_hdr *pkg_hdr, u32 idx, u32 start, + u32 count) { struct ice_buf_table *bufs; struct ice_seg *seg; @@ -1425,46 +1435,56 @@ ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, if (start >= buf_count || start + count > buf_count) return ICE_DDP_PKG_ERR; - return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count, - true); + return ice_dwnld_cfg_bufs_no_lock(ctx, bufs->buf_array, start, count); +} + +static bool ice_is_last_sign_seg(u32 flags) +{ + return !(flags & ICE_SIGN_SEG_FLAGS_VALID) || /* behavior prior to valid */ + (flags & ICE_SIGN_SEG_FLAGS_LAST); } /** * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment - * @hw: pointer to the hardware structure + * @ctx: context of the current buffers section to send * @pkg_hdr: pointer to package header * @idx: segment index (must be a signature segment) * * Note: idx must reference a signature segment */ static enum ice_ddp_state -ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr, - u32 idx) +ice_dwnld_sign_and_cfg_segs(struct ice_ddp_send_ctx *ctx, + struct ice_pkg_hdr *pkg_hdr, u32 idx) { + u32 conf_idx, start, count, flags; enum ice_ddp_state state; struct ice_sign_seg *seg; - u32 conf_idx; - u32 start; - u32 count; seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx); if (!seg) { state = ICE_DDP_PKG_ERR; - goto exit; + ice_ddp_send_ctx_set_err(ctx, state); + return state; } count = le32_to_cpu(seg->signed_buf_count); - state = ice_download_pkg_sig_seg(hw, seg); + state = ice_download_pkg_sig_seg(ctx, seg); if (state || !count) - goto exit; + return state; conf_idx = le32_to_cpu(seg->signed_seg_idx); start = le32_to_cpu(seg->signed_buf_start); - state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start, + state = ice_download_pkg_config_seg(ctx, pkg_hdr, conf_idx, start, count); -exit: + /* finish up by sending last hunk with "last" flag set if requested by + * DDP content + */ + flags = le32_to_cpu(seg->flags); + if (ice_is_last_sign_seg(flags)) + state = ice_ddp_send_hunk(ctx, NULL); + return state; } @@ -1519,6 +1539,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) { enum ice_aq_err aq_err = hw->adminq.sq_last_status; enum ice_ddp_state state = ICE_DDP_PKG_ERR; + struct ice_ddp_send_ctx ctx = { .hw = hw }; int status; u32 i; @@ -1539,7 +1560,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) hw->pkg_sign_type)) continue; - state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i); + state = ice_dwnld_sign_and_cfg_segs(&ctx, pkg_hdr, i); if (state) break; } @@ -1564,6 +1585,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr) static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) { + struct ice_ddp_send_ctx ctx = { .hw = hw }; enum ice_ddp_state state; struct ice_buf_hdr *bh; int status; @@ -1576,7 +1598,7 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) * considered a success. */ bh = (struct ice_buf_hdr *)bufs; - if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF) + if (ice_is_buffer_metadata(bh)) return ICE_DDP_PKG_SUCCESS; status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE); @@ -1586,7 +1608,9 @@ ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count) return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status); } - state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true); + ice_dwnld_cfg_bufs_no_lock(&ctx, bufs, 0, count); + /* finish up by sending last hunk with "last" flag set */ + state = ice_ddp_send_hunk(&ctx, NULL); if (!state) state = ice_post_dwnld_pkg_actions(hw); diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h index 79551da2a4b0..8a2d57fc5dae 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.h +++ b/drivers/net/ethernet/intel/ice/ice_ddp.h @@ -181,7 +181,10 @@ struct ice_sign_seg { __le32 signed_seg_idx; __le32 signed_buf_start; __le32 signed_buf_count; -#define ICE_SIGN_SEG_RESERVED_COUNT 44 +#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000 +#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001 + __le32 flags; +#define ICE_SIGN_SEG_RESERVED_COUNT 40 u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT]; struct ice_buf_table buf_tbl; }; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 2924ac61300d..3072634bf049 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -693,75 +693,53 @@ static int ice_get_port_topology(struct ice_hw *hw, u8 lport, static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, struct ice_serdes_equalization_to_ethtool *ptr) { + static const int tx = ICE_AQC_OP_CODE_TX_EQU; + static const int rx = ICE_AQC_OP_CODE_RX_EQU; + struct { + int data_in; + int opcode; + int *out; + } aq_params[] = { + { ICE_AQC_TX_EQU_PRE1, tx, &ptr->tx_equ_pre1 }, + { ICE_AQC_TX_EQU_PRE3, tx, &ptr->tx_equ_pre3 }, + { ICE_AQC_TX_EQU_ATTEN, tx, &ptr->tx_equ_atten }, + { ICE_AQC_TX_EQU_POST1, tx, &ptr->tx_equ_post1 }, + { ICE_AQC_TX_EQU_PRE2, tx, &ptr->tx_equ_pre2 }, + { ICE_AQC_RX_EQU_PRE2, rx, &ptr->rx_equ_pre2 }, + { ICE_AQC_RX_EQU_PRE1, rx, &ptr->rx_equ_pre1 }, + { ICE_AQC_RX_EQU_POST1, rx, &ptr->rx_equ_post1 }, + { ICE_AQC_RX_EQU_BFLF, rx, &ptr->rx_equ_bflf }, + { ICE_AQC_RX_EQU_BFHF, rx, &ptr->rx_equ_bfhf }, + { ICE_AQC_RX_EQU_DRATE, rx, &ptr->rx_equ_drate }, + { ICE_AQC_RX_EQU_CTLE_GAINHF, rx, &ptr->rx_equ_ctle_gainhf }, + { ICE_AQC_RX_EQU_CTLE_GAINLF, rx, &ptr->rx_equ_ctle_gainlf }, + { ICE_AQC_RX_EQU_CTLE_GAINDC, rx, &ptr->rx_equ_ctle_gaindc }, + { ICE_AQC_RX_EQU_CTLE_BW, rx, &ptr->rx_equ_ctle_bw }, + { ICE_AQC_RX_EQU_DFE_GAIN, rx, &ptr->rx_equ_dfe_gain }, + { ICE_AQC_RX_EQU_DFE_GAIN2, rx, &ptr->rx_equ_dfe_gain_2 }, + { ICE_AQC_RX_EQU_DFE_2, rx, &ptr->rx_equ_dfe_2 }, + { ICE_AQC_RX_EQU_DFE_3, rx, &ptr->rx_equ_dfe_3 }, + { ICE_AQC_RX_EQU_DFE_4, rx, &ptr->rx_equ_dfe_4 }, + { ICE_AQC_RX_EQU_DFE_5, rx, &ptr->rx_equ_dfe_5 }, + { ICE_AQC_RX_EQU_DFE_6, rx, &ptr->rx_equ_dfe_6 }, + { ICE_AQC_RX_EQU_DFE_7, rx, &ptr->rx_equ_dfe_7 }, + { ICE_AQC_RX_EQU_DFE_8, rx, &ptr->rx_equ_dfe_8 }, + { ICE_AQC_RX_EQU_DFE_9, rx, &ptr->rx_equ_dfe_9 }, + { ICE_AQC_RX_EQU_DFE_10, rx, &ptr->rx_equ_dfe_10 }, + { ICE_AQC_RX_EQU_DFE_11, rx, &ptr->rx_equ_dfe_11 }, + { ICE_AQC_RX_EQU_DFE_12, rx, &ptr->rx_equ_dfe_12 }, + }; int err; - err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1, - ICE_AQC_OP_CODE_TX_EQU, serdes_num, - &ptr->tx_equalization_pre1); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3, - ICE_AQC_OP_CODE_TX_EQU, serdes_num, - &ptr->tx_equalization_pre3); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN, - ICE_AQC_OP_CODE_TX_EQU, serdes_num, - &ptr->tx_equalization_atten); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1, - ICE_AQC_OP_CODE_TX_EQU, serdes_num, - &ptr->tx_equalization_post1); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2, - ICE_AQC_OP_CODE_TX_EQU, serdes_num, - &ptr->tx_equalization_pre2); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2, - ICE_AQC_OP_CODE_RX_EQU, serdes_num, - &ptr->rx_equalization_pre2); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1, - ICE_AQC_OP_CODE_RX_EQU, serdes_num, - &ptr->rx_equalization_pre1); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1, - ICE_AQC_OP_CODE_RX_EQU, serdes_num, - &ptr->rx_equalization_post1); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF, - ICE_AQC_OP_CODE_RX_EQU, serdes_num, - &ptr->rx_equalization_bflf); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF, - ICE_AQC_OP_CODE_RX_EQU, serdes_num, - &ptr->rx_equalization_bfhf); - if (err) - return err; - - err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE, - ICE_AQC_OP_CODE_RX_EQU, serdes_num, - &ptr->rx_equalization_drate); - if (err) - return err; + for (int i = 0; i < ARRAY_SIZE(aq_params); i++) { + err = ice_aq_get_phy_equalization(hw, aq_params[i].data_in, + aq_params[i].opcode, + serdes_num, aq_params[i].out); + if (err) + break; + } - return 0; + return err; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index 9acccae38625..8f2ad1c172c0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -10,17 +10,34 @@ struct ice_phy_type_to_ethtool { }; struct ice_serdes_equalization_to_ethtool { - int rx_equalization_pre2; - int rx_equalization_pre1; - int rx_equalization_post1; - int rx_equalization_bflf; - int rx_equalization_bfhf; - int rx_equalization_drate; - int tx_equalization_pre1; - int tx_equalization_pre3; - int tx_equalization_atten; - int tx_equalization_post1; - int tx_equalization_pre2; + int rx_equ_pre2; + int rx_equ_pre1; + int rx_equ_post1; + int rx_equ_bflf; + int rx_equ_bfhf; + int rx_equ_drate; + int rx_equ_ctle_gainhf; + int rx_equ_ctle_gainlf; + int rx_equ_ctle_gaindc; + int rx_equ_ctle_bw; + int rx_equ_dfe_gain; + int rx_equ_dfe_gain_2; + int rx_equ_dfe_2; + int rx_equ_dfe_3; + int rx_equ_dfe_4; + int rx_equ_dfe_5; + int rx_equ_dfe_6; + int rx_equ_dfe_7; + int rx_equ_dfe_8; + int rx_equ_dfe_9; + int rx_equ_dfe_10; + int rx_equ_dfe_11; + int rx_equ_dfe_12; + int tx_equ_pre1; + int tx_equ_pre3; + int tx_equ_atten; + int tx_equ_post1; + int tx_equ_pre2; }; struct ice_regdump_to_ethtool { diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index d4e74f96a8ad..a7d45a8ce7ac 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2777,8 +2777,10 @@ void ice_napi_add(struct ice_vsi *vsi) return; ice_for_each_q_vector(vsi, v_idx) - netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, - ice_napi_poll); + netif_napi_add_config(vsi->netdev, + &vsi->q_vectors[v_idx]->napi, + ice_napi_poll, + v_idx); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index b79848fe2a9e..1eaa4428fd24 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4555,6 +4555,34 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) } /** + * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs + * @hw: pointer to the hardware structure + * @pf: pointer to pf structure + * + * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor + * formats the PF hardware supports. The exact list of supported RXDIDs + * depends on the loaded DDP package. The IDs can be determined by reading the + * GLFLXP_RXDID_FLAGS register after the DDP package is loaded. + * + * Note that the legacy 32-byte RXDID 0 is always supported but is not listed + * in the DDP package. The 16-byte legacy descriptor is never supported by + * VFs. + */ +static void ice_init_supported_rxdids(struct ice_hw *hw, struct ice_pf *pf) +{ + pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); + + for (int i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { + u32 regval; + + regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); + if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) + & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) + pf->supported_rxdids |= BIT(i); + } +} + +/** * ice_init_ddp_config - DDP related configuration * @hw: pointer to the hardware structure * @pf: pointer to pf structure @@ -4588,6 +4616,9 @@ static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf) ice_load_pkg(firmware, pf); release_firmware(firmware); + /* Initialize the supported Rx descriptor IDs after loading DDP */ + ice_init_supported_rxdids(hw, pf); + return 0; } @@ -5900,7 +5931,7 @@ static int __init ice_module_init(void) ice_adv_lnk_speed_maps_init(); - ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME); + ice_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, KBUILD_MODNAME); if (!ice_wq) { pr_err("Failed to create workqueue\n"); return status; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index aa2080747714..f445e33b2028 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -2554,17 +2554,27 @@ static bool ice_is_vlan_promisc_allowed(struct ice_vf *vf) /** * ice_vf_ena_vlan_promisc - Enable Tx/Rx VLAN promiscuous for the VLAN + * @vf: VF to enable VLAN promisc on * @vsi: VF's VSI used to enable VLAN promiscuous mode * @vlan: VLAN used to enable VLAN promiscuous * * This function should only be called if VLAN promiscuous mode is allowed, * which can be determined via ice_is_vlan_promisc_allowed(). */ -static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) +static int ice_vf_ena_vlan_promisc(struct ice_vf *vf, struct ice_vsi *vsi, + struct ice_vlan *vlan) { - u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; + u8 promisc_m = 0; int status; + if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states)) + promisc_m |= ICE_UCAST_VLAN_PROMISC_BITS; + if (test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) + promisc_m |= ICE_MCAST_VLAN_PROMISC_BITS; + + if (!promisc_m) + return 0; + status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, vlan->vid); if (status && status != -EEXIST) @@ -2583,7 +2593,7 @@ static int ice_vf_ena_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) */ static int ice_vf_dis_vlan_promisc(struct ice_vsi *vsi, struct ice_vlan *vlan) { - u8 promisc_m = ICE_PROMISC_VLAN_TX | ICE_PROMISC_VLAN_RX; + u8 promisc_m = ICE_UCAST_VLAN_PROMISC_BITS | ICE_MCAST_VLAN_PROMISC_BITS; int status; status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, promisc_m, @@ -2738,7 +2748,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) goto error_param; } } else if (vlan_promisc) { - status = ice_vf_ena_vlan_promisc(vsi, &vlan); + status = ice_vf_ena_vlan_promisc(vf, vsi, &vlan); if (status) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n", @@ -3021,12 +3031,8 @@ err: static int ice_vc_query_rxdid(struct ice_vf *vf) { enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; - struct virtchnl_supported_rxdids *rxdid = NULL; - struct ice_hw *hw = &vf->pf->hw; + struct virtchnl_supported_rxdids rxdid = {}; struct ice_pf *pf = vf->pf; - int len = 0; - int ret, i; - u32 regval; if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; @@ -3038,35 +3044,11 @@ static int ice_vc_query_rxdid(struct ice_vf *vf) goto err; } - len = sizeof(struct virtchnl_supported_rxdids); - rxdid = kzalloc(len, GFP_KERNEL); - if (!rxdid) { - v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY; - len = 0; - goto err; - } - - /* RXDIDs supported by DDP package can be read from the register - * to get the supported RXDID bitmap. But the legacy 32byte RXDID - * is not listed in DDP package, add it in the bitmap manually. - * Legacy 16byte descriptor is not supported. - */ - rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1); - - for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) { - regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0)); - if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) - & GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) - rxdid->supported_rxdids |= BIT(i); - } - - pf->supported_rxdids = rxdid->supported_rxdids; + rxdid.supported_rxdids = pf->supported_rxdids; err: - ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, - v_ret, (u8 *)rxdid, len); - kfree(rxdid); - return ret; + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_SUPPORTED_RXDIDS, + v_ret, (u8 *)&rxdid, sizeof(rxdid)); } /** @@ -3575,7 +3557,7 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, return err; if (vlan_promisc) { - err = ice_vf_ena_vlan_promisc(vsi, &vlan); + err = ice_vf_ena_vlan_promisc(vf, vsi, &vlan); if (err) return err; } @@ -3603,7 +3585,8 @@ ice_vc_add_vlans(struct ice_vf *vf, struct ice_vsi *vsi, */ if (!ice_is_dvm_ena(&vsi->back->hw)) { if (vlan_promisc) { - err = ice_vf_ena_vlan_promisc(vsi, &vlan); + err = ice_vf_ena_vlan_promisc(vf, vsi, + &vlan); if (err) return err; } |