summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/iort.c35
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/clk/clk-bulk.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4.c15
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c20
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c17
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c19
-rw-r--r--drivers/hwmon/xgene-hwmon.c19
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-sprd.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c17
-rw-r--r--drivers/ide/ide-probe.c1
-rw-r--r--drivers/ide/ide-scan-pci.c13
-rw-r--r--drivers/ide/setup-pci.c63
-rw-r--r--drivers/infiniband/core/iwpm_msg.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c12
-rw-r--r--drivers/md/bcache/closure.c4
-rw-r--r--drivers/misc/cxl/cxllib.c13
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/core/mmc.c36
-rw-r--r--drivers/mmc/core/queue.c125
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c26
-rw-r--r--drivers/mmc/host/pxamci.c6
-rw-r--r--drivers/mmc/host/sdhci-xenon.c24
-rw-r--r--drivers/mmc/host/sdhci-xenon.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h131
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c544
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c137
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c75
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c16
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h107
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c134
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c49
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c27
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h32
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c37
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c70
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c239
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c853
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c101
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c403
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h49
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c245
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h43
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c254
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.h293
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c690
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c14
-rw-r--r--drivers/net/hyperv/hyperv_net.h11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c96
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/uPD60620.c109
-rw-r--r--drivers/net/ppp/ppp_generic.c20
-rw-r--r--drivers/net/usb/cdc_ether.c13
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/pci.c14
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/libiscsi.c8
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c16
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/thunderbolt/xdomain.c3
-rw-r--r--drivers/vhost/net.c12
145 files changed, 4440 insertions, 1915 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9565d572f8dd..de56394dd161 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1178,12 +1178,44 @@ dev_put:
return ret;
}
+static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
+{
+ if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
+ struct acpi_iort_node *parent;
+ struct acpi_iort_id_mapping *map;
+ int i;
+
+ map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
+ iort_node->mapping_offset);
+
+ for (i = 0; i < iort_node->mapping_count; i++, map++) {
+ if (!map->output_reference)
+ continue;
+
+ parent = ACPI_ADD_PTR(struct acpi_iort_node,
+ iort_table, map->output_reference);
+ /*
+ * If we detect a RC->SMMU mapping, make sure
+ * we enable ACS on the system.
+ */
+ if ((parent->type == ACPI_IORT_NODE_SMMU) ||
+ (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
+ pci_request_acs();
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static void __init iort_init_platform_devices(void)
{
struct acpi_iort_node *iort_node, *iort_end;
struct acpi_table_iort *iort;
struct fwnode_handle *fwnode;
int i, ret;
+ bool acs_enabled = false;
/*
* iort_table and iort both point to the start of IORT table, but
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
return;
}
+ if (!acs_enabled)
+ acs_enabled = iort_enable_acs(iort_node);
+
if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
(iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4a438b8abe27..2dfe99b328f8 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,7 +17,7 @@ if BLK_DEV
config BLK_DEV_NULL_BLK
tristate "Null test block driver"
- depends on CONFIGFS_FS
+ select CONFIGFS_FS
config BLK_DEV_FD
tristate "Normal floppy disk support"
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3684e21d543f..883dfebd3014 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* appropriate.
*/
ret = nbd_handle_cmd(cmd, hctx->queue_num);
+ if (ret < 0)
+ ret = BLK_STS_IOERR;
+ else if (!ret)
+ ret = BLK_STS_OK;
complete(&cmd->send_complete);
- return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
+ return ret;
}
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index c834f5abfc49..4c10456f8a32 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -105,6 +105,7 @@ err:
return ret;
}
+EXPORT_SYMBOL_GPL(clk_bulk_prepare);
#endif /* CONFIG_HAVE_CLK_PREPARE */
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 62d7854e4b87..5970a50671b9 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(10), 8, GFLAGS),
GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 0, GFLAGS),
GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 1, GFLAGS),
GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(10), 2, GFLAGS),
GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
- RK2928_CLKGATE_CON(10), 8, GFLAGS),
+ RK2928_CLKGATE_CON(2), 15, GFLAGS),
COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
- GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+ GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
/* PD_MMC */
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
"aclk_peri",
"hclk_peri",
"pclk_peri",
+ "pclk_pmu",
+ "sclk_timer5",
};
static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index e40b77583c47..d8d3cb67b402 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
#define PLL_ENABLED (1 << 31)
#define PLL_LOCKED (1 << 29)
+static void exynos4_clk_enable_pll(u32 reg)
+{
+ u32 pll_con = readl(reg_base + reg);
+ pll_con |= PLL_ENABLED;
+ writel(pll_con, reg_base + reg);
+
+ while (!(pll_con & PLL_LOCKED)) {
+ cpu_relax();
+ pll_con = readl(reg_base + reg);
+ }
+}
+
static void exynos4_clk_wait_for_pll(u32 reg)
{
u32 pll_con;
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
samsung_clk_save(reg_base, exynos4_save_pll,
ARRAY_SIZE(exynos4_clk_pll_regs));
+ exynos4_clk_enable_pll(EPLL_CON0);
+ exynos4_clk_enable_pll(VPLL_CON0);
+
if (exynos4_soc == EXYNOS4210) {
samsung_clk_save(reg_base, exynos4_save_soc,
ARRAY_SIZE(exynos4210_clk_save));
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d805b6e6fe71..27743be5b768 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
connector->encoder->base.id,
connector->encoder->name);
- /* ELD Conn_Type */
- connector->eld[5] &= ~(3 << 2);
- if (intel_crtc_has_dp_encoder(crtc_state))
- connector->eld[5] |= (1 << 2);
-
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
if (dev_priv->display.audio_codec_enable)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 183e87e8ea31..00c6aee0a9a1 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ if (port == PORT_A && is_dvi) {
+ DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
+ is_dvi = false;
+ is_hdmi = false;
+ }
+
info->supports_dvi = is_dvi;
info->supports_hdmi = is_hdmi;
info->supports_dp = is_dp;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 965988f79a55..92c1f8e166dc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4b4fd1f8110b..476681d5940c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
out:
if (ret && IS_GEN9_LP(dev_priv)) {
tmp = I915_READ(BXT_PHY_CTL(port));
- if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
+ if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
+ BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
DRM_ERROR("Port %c enabled but PHY powered down? "
"(PHY_CTL %08x)\n", port_name(port), tmp);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 00cd17c76fdc..64f7b51ed97c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12359,7 +12359,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
- bool hw_check = intel_state->modeset;
u64 put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
int i;
@@ -12376,7 +12375,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (needs_modeset(new_crtc_state) ||
to_intel_crtc_state(new_crtc_state)->update_pipe) {
- hw_check = true;
put_domains[to_intel_crtc(crtc)->pipe] =
modeset_get_crtc_power_domains(crtc,
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 09b670929786..de38d014ed39 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
},
};
-static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
-{
- return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
- BIT(phy_info->channel[DPIO_CH0].port);
-}
-
static const struct bxt_ddi_phy_info *
bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
{
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
const struct bxt_ddi_phy_info *phy_info;
- enum port port;
phy_info = bxt_get_phy_info(dev_priv, phy);
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
return false;
}
- for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
- u32 tmp = I915_READ(BXT_PHY_CTL(port));
-
- if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
- "for port %c powered down "
- "(PHY_CTL %08x)\n",
- phy, port_name(port), tmp);
-
- return false;
- }
- }
-
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 951e834dd274..28a778b785ac 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -30,6 +30,21 @@
#include "intel_drv.h"
#include "i915_drv.h"
+static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
+{
+ u8 conn_type;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ conn_type = DRM_ELD_CONN_TYPE_DP;
+ } else {
+ conn_type = DRM_ELD_CONN_TYPE_HDMI;
+ }
+
+ connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
+ connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
+}
+
/**
* intel_connector_update_modes - update connector from edid
* @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
+ intel_connector_update_eld_conn_type(connector);
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b66d8e136aa3..b3a087cb0860 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
+
+ if (resume && dev_priv->csr.dmc_payload)
+ intel_csr_load_program(dev_priv);
}
#undef CNL_PROCMON_IDX
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9ea6cd5a1370..3cf1a6932fac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
hdmi->mod_clk = devm_clk_get(dev, "mod");
if (IS_ERR(hdmi->mod_clk)) {
dev_err(dev, "Couldn't get the HDMI mod clock\n");
- return PTR_ERR(hdmi->mod_clk);
+ ret = PTR_ERR(hdmi->mod_clk);
+ goto err_disable_bus_clk;
}
clk_prepare_enable(hdmi->mod_clk);
hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
if (IS_ERR(hdmi->pll0_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
- return PTR_ERR(hdmi->pll0_clk);
+ ret = PTR_ERR(hdmi->pll0_clk);
+ goto err_disable_mod_clk;
}
hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
if (IS_ERR(hdmi->pll1_clk)) {
dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
- return PTR_ERR(hdmi->pll1_clk);
+ ret = PTR_ERR(hdmi->pll1_clk);
+ goto err_disable_mod_clk;
}
ret = sun4i_tmds_create(hdmi);
if (ret) {
dev_err(dev, "Couldn't create the TMDS clock\n");
- return ret;
+ goto err_disable_mod_clk;
}
writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
ret = sun4i_hdmi_i2c_create(dev, hdmi);
if (ret) {
dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
- return ret;
+ goto err_disable_mod_clk;
}
drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +425,10 @@ err_cleanup_connector:
drm_encoder_cleanup(&hdmi->encoder);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
+err_disable_mod_clk:
+ clk_disable_unprepare(hdmi->mod_clk);
+err_disable_bus_clk:
+ clk_disable_unprepare(hdmi->bus_clk);
return ret;
}
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
+ clk_disable_unprepare(hdmi->mod_clk);
+ clk_disable_unprepare(hdmi->bus_clk);
}
static const struct component_ops sun4i_hdmi_ops = {
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 9c0dbb8191ad..e1be61095532 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
GFP_KERNEL);
if (rc)
- goto out_mbox_free;
+ return -ENOMEM;
INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(ctx->mbox_chan)) {
dev_err(&pdev->dev,
"SLIMpro mailbox channel request failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
} else {
struct acpi_pcct_hw_reduced *cppc_ss;
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (device_property_read_u32(&pdev->dev, "pcc-channel",
&ctx->mbox_idx)) {
dev_err(&pdev->dev, "no pcc-channel property\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (IS_ERR(ctx->mbox_chan)) {
dev_err(&pdev->dev,
"PPC channel request failed\n");
- return -ENODEV;
+ rc = -ENODEV;
+ goto out_mbox_free;
}
/*
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
if (!cppc_ss) {
dev_err(&pdev->dev, "PPC subspace not found\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
/*
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
rc = -ENODEV;
- goto out_mbox_free;
+ goto out;
}
if (!ctx->pcc_comm_addr) {
dev_err(&pdev->dev,
"Failed to ioremap PCC comm region\n");
rc = -ENOMEM;
- goto out_mbox_free;
+ goto out;
}
/*
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c06dce2c1da7..45a3f3ca29b3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
Gemini Lake (SOC)
Cannon Lake-H (PCH)
Cannon Lake-LP (PCH)
+ Cedar Fork (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e114e4e00d29..9e12a53ef7b8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -68,6 +68,7 @@
* Gemini Lake (SOC) 0x31d4 32 hard yes yes yes
* Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
* Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
+ * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -204,6 +205,7 @@
/* Older devices have their ID defined in <linux/pci_ids.h> */
#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
+#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 22e08ae1704f..25fcc3c1e32b 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
static const struct of_device_id sprd_i2c_of_match[] = {
{ .compatible = "sprd,sc9860-i2c", },
+ {},
};
static struct platform_driver sprd_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 47c67b0ca896..d4a6e9c2e9aa 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
unsigned int msg_num;
unsigned int msg_id;
struct stm32f7_i2c_msg f7_msg;
- struct stm32f7_i2c_setup *setup;
+ struct stm32f7_i2c_setup setup;
struct stm32f7_i2c_timings timing;
};
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
},
};
-struct stm32f7_i2c_setup stm32f7_setup = {
+static const struct stm32f7_i2c_setup stm32f7_setup = {
.rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
.fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
.dnf = STM32F7_I2C_DNF_DEFAULT,
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
/* Enable I2C */
- if (i2c_dev->setup->analog_filter)
+ if (i2c_dev->setup.analog_filter)
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
else
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
}
setup = of_device_get_match_data(&pdev->dev);
- i2c_dev->setup->rise_time = setup->rise_time;
- i2c_dev->setup->fall_time = setup->fall_time;
- i2c_dev->setup->dnf = setup->dnf;
- i2c_dev->setup->analog_filter = setup->analog_filter;
+ i2c_dev->setup = *setup;
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
&rise_time);
if (!ret)
- i2c_dev->setup->rise_time = rise_time;
+ i2c_dev->setup.rise_time = rise_time;
ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
&fall_time);
if (!ret)
- i2c_dev->setup->fall_time = fall_time;
+ i2c_dev->setup.fall_time = fall_time;
- ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup);
+ ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
if (ret)
goto clk_free;
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 01b2adfd8226..eaf39e5db08b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
if (hwif_init(hwif) == 0) {
printk(KERN_INFO "%s: failed to initialize IDE "
"interface\n", hwif->name);
+ device_unregister(hwif->portdev);
device_unregister(&hwif->gendev);
ide_disable_port(hwif);
continue;
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 86aa88aeb3a6..acf874800ca4 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
{
struct list_head *l;
struct pci_driver *d;
+ int ret;
list_for_each(l, &ide_pci_drivers) {
d = list_entry(l, struct pci_driver, node);
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
const struct pci_device_id *id =
pci_match_id(d->id_table, dev);
- if (id != NULL && d->probe(dev, id) >= 0) {
- dev->driver = d;
- pci_dev_get(dev);
- return 1;
+ if (id != NULL) {
+ pci_assign_irq(dev);
+ ret = d->probe(dev, id);
+ if (ret >= 0) {
+ dev->driver = d;
+ pci_dev_get(dev);
+ return 1;
+ }
}
}
}
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 112d2fe1bcdb..fdc8e813170c 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
/**
* ide_pci_enable - do PCI enables
* @dev: PCI device
+ * @bars: PCI BARs mask
* @d: IDE port info
*
* Enable the IDE PCI device. We attempt to enable the device in full
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
* Returns zero on success or an error code
*/
-static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
+static int ide_pci_enable(struct pci_dev *dev, int bars,
+ const struct ide_port_info *d)
{
- int ret, bars;
+ int ret;
if (pci_enable_device(dev)) {
ret = pci_enable_device_io(dev);
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
goto out;
}
- if (d->host_flags & IDE_HFLAG_SINGLE)
- bars = (1 << 2) - 1;
- else
- bars = (1 << 4) - 1;
-
- if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
- if (d->host_flags & IDE_HFLAG_CS5520)
- bars |= (1 << 2);
- else
- bars |= (1 << 4);
- }
-
ret = pci_request_selected_regions(dev, bars, d->name);
if (ret < 0)
printk(KERN_ERR "%s %s: can't reserve resources\n",
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
/**
* ide_setup_pci_controller - set up IDE PCI
* @dev: PCI device
+ * @bars: PCI BARs mask
* @d: IDE port info
* @noisy: verbose flag
*
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
* and enables it if need be
*/
-static int ide_setup_pci_controller(struct pci_dev *dev,
+static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
const struct ide_port_info *d, int noisy)
{
int ret;
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
if (noisy)
ide_setup_pci_noise(dev, d);
- ret = ide_pci_enable(dev, d);
+ ret = ide_pci_enable(dev, bars, d);
if (ret < 0)
goto out;
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
if (ret < 0) {
printk(KERN_ERR "%s %s: error accessing PCI regs\n",
d->name, pci_name(dev));
- goto out;
+ goto out_free_bars;
}
if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
ret = ide_pci_configure(dev, d);
if (ret < 0)
- goto out;
+ goto out_free_bars;
printk(KERN_INFO "%s %s: device enabled (Linux)\n",
d->name, pci_name(dev));
}
+ goto out;
+
+out_free_bars:
+ pci_release_selected_regions(dev, bars);
out:
return ret;
}
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
{
struct pci_dev *pdev[] = { dev1, dev2 };
struct ide_host *host;
- int ret, i, n_ports = dev2 ? 4 : 2;
+ int ret, i, n_ports = dev2 ? 4 : 2, bars;
struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+ if (d->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (d->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
+
for (i = 0; i < n_ports / 2; i++) {
- ret = ide_setup_pci_controller(pdev[i], d, !i);
- if (ret < 0)
+ ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
+ if (ret < 0) {
+ if (i == 1)
+ pci_release_selected_regions(pdev[0], bars);
goto out;
+ }
ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
}
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
host = ide_host_alloc(d, hws, n_ports);
if (host == NULL) {
ret = -ENOMEM;
- goto out;
+ goto out_free_bars;
}
host->dev[0] = &dev1->dev;
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
* do_ide_setup_pci_device() on the first device!
*/
if (ret < 0)
- goto out;
+ goto out_free_bars;
/* fixup IRQ */
if (ide_pci_is_in_compatibility_mode(pdev[i])) {
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
ret = ide_host_register(host, d, hws);
if (ret)
ide_host_free(host);
+ else
+ goto out;
+
+out_free_bars:
+ i = n_ports / 2;
+ while (i--)
+ pci_release_selected_regions(pdev[i], bars);
out:
return ret;
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30825bb9b8e9..8861c052155a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
if (ret)
goto pid_query_error;
+ nlmsg_end(skb, nlh);
+
pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
__func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
if (ret)
goto add_mapping_error;
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
&pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
if (ret)
goto query_mapping_error;
+
+ nlmsg_end(skb, nlh);
nlmsg_request->req_buffer = pm_msg;
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
if (ret)
goto remove_mapping_error;
+ nlmsg_end(skb, nlh);
+
ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
if (ret) {
skb = NULL; /* skb is freed in the netlink send-op handling */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index c81c55942626..3c4faadb8cdd 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
&mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
if (ret)
goto mapinfo_num_error;
+
+ nlmsg_end(skb, nlh);
+
ret = rdma_nl_unicast(skb, iwpm_pid);
if (ret) {
skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
if (ret)
goto send_mapping_info_unlock;
+ nlmsg_end(skb, nlh);
+
iwpm_print_sockaddr(&map_info->local_sockaddr,
"send_mapping_info: Local sockaddr:");
iwpm_print_sockaddr(&map_info->mapped_sockaddr,
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index d1f5345f04f0..42ca5346777d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -48,7 +48,7 @@
* @wqe: cqp wqe for header
* @header: header for the cqp wqe
*/
-static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
{
wmb(); /* make sure WQE is populated before polarity is set */
set_64bit_val(wqe, 24, header);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index e217a1259f57..5498ad01c280 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
struct i40iw_fast_reg_stag_info *info,
bool post_sq);
+void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
+
/* HMC/FPM functions */
enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
u8 hmc_fn_id);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c2cab20c4bc5..59f70676f0e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
get_64bit_val(wqe, 24, &offset24);
offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
- set_64bit_val(wqe, 24, offset24);
set_64bit_val(wqe, 0, buf->mem.pa);
set_64bit_val(wqe, 8,
LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
- set_64bit_val(wqe, 24, offset24);
+ i40iw_insert_wqe_hdr(wqe, offset24);
}
/**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
set_64bit_val(wqe, 16, header[0]);
- /* Ensure all data is written before writing valid bit */
- wmb();
- set_64bit_val(wqe, 24, header[1]);
+ i40iw_insert_wqe_hdr(wqe, header[1]);
i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
- set_64bit_val(wqe, 24, header);
+ i40iw_insert_wqe_hdr(wqe, header);
i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
wqe, I40IW_CQP_WQE_SIZE * 8);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 28b3d02d511b..62be0a41ad0b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
+ attr->port_num = 1;
init_attr->event_handler = iwqp->ibqp.event_handler;
init_attr->qp_context = iwqp->ibqp.qp_context;
init_attr->send_cq = iwqp->ibqp.send_cq;
init_attr->recv_cq = iwqp->ibqp.recv_cq;
init_attr->srq = iwqp->ibqp.srq;
init_attr->cap = attr->cap;
+ init_attr->port_num = 1;
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d6fbad8f34aa..552f7bd4ecc3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4174,9 +4174,9 @@ err_bfreg:
err_uar_page:
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
-err_cnt:
- mlx5_ib_cleanup_cong_debugfs(dev);
err_cong:
+ mlx5_ib_cleanup_cong_debugfs(dev);
+err_cnt:
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index b2bb42e2805d..254083b524bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -387,7 +387,7 @@ struct qedr_qp {
u8 wqe_size;
u8 smac[ETH_ALEN];
- u16 vlan_id;
+ u16 vlan;
int rc;
} *rqe_wr_id;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 4689e802b332..ad8965397cf7 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
-EINVAL : 0;
- qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan;
+ qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
/* note: length stands for data length i.e. GRH is excluded */
qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
struct qedr_cq *cq = get_qedr_cq(ibcq);
struct qedr_qp *qp = dev->gsi_qp;
unsigned long flags;
+ u16 vlan_id;
int i = 0;
spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
wc[i].wc_flags |= IB_WC_WITH_SMAC;
- if (qp->rqe_wr_id[qp->rq.cons].vlan_id) {
+
+ vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_VID_MASK;
+ if (vlan_id) {
wc[i].wc_flags |= IB_WC_WITH_VLAN;
- wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id;
+ wc[i].vlan_id = vlan_id;
+ wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
+ VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
}
qedr_inc_sw_cons(&qp->rq);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 7d5286b05036..1841d0359bac 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
void __closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
- struct closure *cl;
+ struct closure *cl, *t;
struct llist_node *reverse = NULL;
list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
reverse = llist_reverse_order(list);
/* Then do the wakeups */
- llist_for_each_entry(cl, reverse, list) {
+ llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1);
}
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 5dba23ca2e5f..dc9bc1807fdf 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
down_read(&mm->mmap_sem);
- for (dar = addr; dar < addr + size; dar += page_size) {
- if (!vma || dar < vma->vm_start || dar > vma->vm_end) {
+ vma = find_vma(mm, addr);
+ if (!vma) {
+ pr_err("Can't find vma for addr %016llx\n", addr);
+ rc = -EFAULT;
+ goto out;
+ }
+ /* get the size of the pages allocated */
+ page_size = vma_kernel_pagesize(vma);
+
+ for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
+ if (dar < vma->vm_start || dar >= vma->vm_end) {
vma = find_vma(mm, addr);
if (!vma) {
pr_err("Can't find vma for addr %016llx\n", addr);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 29fc1e662891..2ad7b5c69156 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
}
mqrq->areq.mrq = &brq->mrq;
-
- mmc_queue_bounce_pre(mqrq);
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
brq = &mq_rq->brq;
old_req = mmc_queue_req_to_req(mq_rq);
type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
- mmc_queue_bounce_post(mq_rq);
switch (status) {
case MMC_BLK_SUCCESS:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a7eb623f8daa..36217ad5e9b1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1286,6 +1286,23 @@ out_err:
return err;
}
+static void mmc_select_driver_type(struct mmc_card *card)
+{
+ int card_drv_type, drive_strength, drv_type;
+
+ card_drv_type = card->ext_csd.raw_driver_strength |
+ mmc_driver_type_mask(0);
+
+ drive_strength = mmc_select_drive_strength(card,
+ card->ext_csd.hs200_max_dtr,
+ card_drv_type, &drv_type);
+
+ card->drive_strength = drive_strength;
+
+ if (drv_type)
+ mmc_set_driver_type(card->host, drv_type);
+}
+
static int mmc_select_hs400es(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
goto out_err;
}
+ mmc_select_driver_type(card);
+
/* Switch card to HS400 */
val = EXT_CSD_TIMING_HS400 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1374,23 +1393,6 @@ out_err:
return err;
}
-static void mmc_select_driver_type(struct mmc_card *card)
-{
- int card_drv_type, drive_strength, drv_type;
-
- card_drv_type = card->ext_csd.raw_driver_strength |
- mmc_driver_type_mask(0);
-
- drive_strength = mmc_select_drive_strength(card,
- card->ext_csd.hs200_max_dtr,
- card_drv_type, &drv_type);
-
- card->drive_strength = drive_strength;
-
- if (drv_type)
- mmc_set_driver_type(card->host, drv_type);
-}
-
/*
* For device supporting HS200 mode, the following sequence
* should be done before executing the tuning process.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 74c663b1c0a7..0a4e77a5ba33 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -23,8 +23,6 @@
#include "core.h"
#include "card.h"
-#define MMC_QUEUE_BOUNCESZ 65536
-
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
}
-static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
-{
- unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
-
- if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
- return 0;
-
- if (bouncesz > host->max_req_size)
- bouncesz = host->max_req_size;
- if (bouncesz > host->max_seg_size)
- bouncesz = host->max_seg_size;
- if (bouncesz > host->max_blk_count * 512)
- bouncesz = host->max_blk_count * 512;
-
- if (bouncesz <= 512)
- return 0;
-
- return bouncesz;
-}
-
/**
* mmc_init_request() - initialize the MMC-specific per-request data
* @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
- if (card->bouncesz) {
- mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp);
- if (!mq_rq->bounce_buf)
- return -ENOMEM;
- if (card->bouncesz > 512) {
- mq_rq->sg = mmc_alloc_sg(1, gfp);
- if (!mq_rq->sg)
- return -ENOMEM;
- mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
- gfp);
- if (!mq_rq->bounce_sg)
- return -ENOMEM;
- }
- } else {
- mq_rq->bounce_buf = NULL;
- mq_rq->bounce_sg = NULL;
- mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
- if (!mq_rq->sg)
- return -ENOMEM;
- }
+ mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
+ if (!mq_rq->sg)
+ return -ENOMEM;
return 0;
}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
{
struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
- /* It is OK to kfree(NULL) so this will be smooth */
- kfree(mq_rq->bounce_sg);
- mq_rq->bounce_sg = NULL;
-
- kfree(mq_rq->bounce_buf);
- mq_rq->bounce_buf = NULL;
-
kfree(mq_rq->sg);
mq_rq->sg = NULL;
}
@@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
- /*
- * mmc_init_request() depends on card->bouncesz so it must be calculated
- * before blk_init_allocated_queue() starts allocating requests.
- */
- card->bouncesz = mmc_queue_calc_bouncesz(host);
-
mq->card = card;
mq->queue = blk_alloc_queue(GFP_KERNEL);
if (!mq->queue)
@@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (mmc_can_erase(card))
mmc_queue_setup_discard(mq->queue, card);
- if (card->bouncesz) {
- blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
- blk_queue_max_segments(mq->queue, card->bouncesz / 512);
- blk_queue_max_segment_size(mq->queue, card->bouncesz);
- } else {
- blk_queue_bounce_limit(mq->queue, limit);
- blk_queue_max_hw_sectors(mq->queue,
- min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
- blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- }
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue,
+ min(host->max_blk_count, host->max_req_size / 512));
+ blk_queue_max_segments(mq->queue, host->max_segs);
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
sema_init(&mq->thread_sem, 1);
@@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
*/
unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
- unsigned int sg_len;
- size_t buflen;
- struct scatterlist *sg;
struct request *req = mmc_queue_req_to_req(mqrq);
- int i;
-
- if (!mqrq->bounce_buf)
- return blk_rq_map_sg(mq->queue, req, mqrq->sg);
-
- sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
-
- mqrq->bounce_sg_len = sg_len;
-
- buflen = 0;
- for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
- buflen += sg->length;
-
- sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
-
- return 1;
-}
-
-/*
- * If writing, bounce the data to the buffer before the request
- * is sent to the host driver
- */
-void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
-{
- if (!mqrq->bounce_buf)
- return;
-
- if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
- return;
-
- sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
- mqrq->bounce_buf, mqrq->sg[0].length);
-}
-
-/*
- * If reading, bounce the data from the buffer after the request
- * has been handled by the host driver
- */
-void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
-{
- if (!mqrq->bounce_buf)
- return;
-
- if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
- return;
- sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
- mqrq->bounce_buf, mqrq->sg[0].length);
+ return blk_rq_map_sg(mq->queue, req, mqrq->sg);
}
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 04fc89360a7a..f18d3f656baa 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -49,9 +49,6 @@ enum mmc_drv_op {
struct mmc_queue_req {
struct mmc_blk_request brq;
struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
struct mmc_async_req areq;
enum mmc_drv_op drv_op;
int drv_op_result;
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
struct mmc_queue_req *);
-extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
-extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_access_rpmb(struct mmc_queue *);
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 27fb625cbcf3..fbd29f00fca0 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
*/
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
- MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF;
+ MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c885c2d4b904..85745ef179e2 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
div->shift = __ffs(CLK_DIV_MASK);
div->width = __builtin_popcountl(CLK_DIV_MASK);
div->hw.init = &init;
- div->flags = (CLK_DIVIDER_ONE_BASED |
- CLK_DIVIDER_ROUND_CLOSEST);
+ div->flags = CLK_DIVIDER_ONE_BASED;
clk = devm_clk_register(host->dev, &div->hw);
if (WARN_ON(IS_ERR(clk)))
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct meson_host *host = mmc_priv(mmc);
+ int ret;
+
+ /*
+ * If this is the initial tuning, try to get a sane Rx starting
+ * phase before doing the actual tuning.
+ */
+ if (!mmc->doing_retune) {
+ ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
+ if (ret)
+ return ret;
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
}
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP:
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+ /* Reset phases */
+ clk_set_phase(host->rx_clk, 0);
+ clk_set_phase(host->tx_clk, 270);
+
break;
case MMC_POWER_ON:
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->vqmmc_enabled = true;
}
- /* Reset rx phase */
- clk_set_phase(host->rx_clk, 0);
break;
}
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 59ab194cb009..c763b404510f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
pxamci_init_ocr(host);
- /*
- * This architecture used to disable bounce buffers through its
- * defconfig, now it is done at runtime as a host property.
- */
- mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
+ mmc->caps = 0;
host->cmdat = 0;
if (!cpu_is_pxa25x()) {
mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 2eec2e652c53..0842bbc2d7ad 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
{
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_host *host;
+ struct xenon_priv *priv;
int err;
host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
return PTR_ERR(host);
pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
/*
* Link Xenon specific mmc_host_ops function,
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
if (err)
goto free_pltfm;
+ priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
+ if (IS_ERR(priv->axi_clk)) {
+ err = PTR_ERR(priv->axi_clk);
+ if (err == -EPROBE_DEFER)
+ goto err_clk;
+ } else {
+ err = clk_prepare_enable(priv->axi_clk);
+ if (err)
+ goto err_clk;
+ }
+
err = mmc_of_parse(host->mmc);
if (err)
- goto err_clk;
+ goto err_clk_axi;
sdhci_get_of_property(pdev);
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
/* Xenon specific dt parse */
err = xenon_probe_dt(pdev);
if (err)
- goto err_clk;
+ goto err_clk_axi;
err = xenon_sdhc_prepare(host);
if (err)
- goto err_clk;
+ goto err_clk_axi;
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
@@ -527,6 +540,8 @@ remove_sdhc:
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
xenon_sdhc_unprepare(host);
+err_clk_axi:
+ clk_disable_unprepare(priv->axi_clk);
err_clk:
clk_disable_unprepare(pltfm_host->clk);
free_pltfm:
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
sdhci_remove_host(host, 0);
xenon_sdhc_unprepare(host);
-
+ clk_disable_unprepare(priv->axi_clk);
clk_disable_unprepare(pltfm_host->clk);
sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 2bc0510c0769..9994995c7c56 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -83,6 +83,7 @@ struct xenon_priv {
unsigned char bus_width;
unsigned char timing;
unsigned int clock;
+ struct clk *axi_clk;
int phy_type;
/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9ca994d0bab6..3591077a5f6b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1074,11 +1074,6 @@ static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
}
}
-static int bnx2x_ari_enabled(struct pci_dev *dev)
-{
- return dev->bus->self && dev->bus->self->ari_enabled;
-}
-
static int
bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{
@@ -1212,7 +1207,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
err = -EIO;
/* verify ari is enabled */
- if (!bnx2x_ari_enabled(bp->pdev)) {
+ if (!pci_ari_enabled(bp->pdev->bus)) {
BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index 4f0cb8e1ffc0..457201f409a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o bnxt_tc.o
+bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o
+bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 7dd3d131043a..4730c048ed9b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -23,8 +23,6 @@
#include "bnxt_tc.h"
#include "bnxt_vfr.h"
-#ifdef CONFIG_BNXT_FLOWER_OFFLOAD
-
#define BNXT_FID_INVALID 0xffff
#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
@@ -833,6 +831,3 @@ void bnxt_shutdown_tc(struct bnxt *bp)
rhashtable_destroy(&tc_info->flow_table);
rhashtable_destroy(&tc_info->l2_table);
}
-
-#else
-#endif
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index d68478afccbf..71989e180289 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -566,8 +566,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
return true;
default:
bpf_warn_invalid_xdp_action(action);
+ /* fall through */
case XDP_ABORTED:
trace_xdp_exception(nic->netdev, prog, action);
+ /* fall through */
case XDP_DROP:
/* Check if it's a recycled page, if not
* unmap the DMA mapping.
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index b65ce26ff72f..b3fd1f457639 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -8205,7 +8205,7 @@ struct flash_desc {
u32 size_mb;
};
-static int get_flash_params(struct adapter *adap)
+static int t4_get_flash_params(struct adapter *adap)
{
/* Table for non-Numonix supported flash parts. Numonix parts are left
* to the preexisting code. All flash parts have 64KB sectors.
@@ -8214,40 +8214,136 @@ static int get_flash_params(struct adapter *adap)
{ 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
};
+ unsigned int part, manufacturer;
+ unsigned int density, size;
+ u32 flashid = 0;
int ret;
- u32 info;
+
+ /* Issue a Read ID Command to the Flash part. We decode supported
+ * Flash parts and their sizes from this. There's a newer Query
+ * Command which can retrieve detailed geometry information but many
+ * Flash parts don't support it.
+ */
ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
if (!ret)
- ret = sf1_read(adap, 3, 0, 1, &info);
+ ret = sf1_read(adap, 3, 0, 1, &flashid);
t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
if (ret)
return ret;
- for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
- if (supported_flash[ret].vendor_and_model_id == info) {
- adap->params.sf_size = supported_flash[ret].size_mb;
+ /* Check to see if it's one of our non-standard supported Flash parts.
+ */
+ for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
+ if (supported_flash[part].vendor_and_model_id == flashid) {
+ adap->params.sf_size = supported_flash[part].size_mb;
adap->params.sf_nsec =
adap->params.sf_size / SF_SEC_SIZE;
- return 0;
+ goto found;
}
- if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ /* Decode Flash part size. The code below looks repetative with
+ * common encodings, but that's not guaranteed in the JEDEC
+ * specification for the Read JADEC ID command. The only thing that
+ * we're guaranteed by the JADEC specification is where the
+ * Manufacturer ID is in the returned result. After that each
+ * Manufacturer ~could~ encode things completely differently.
+ * Note, all Flash parts must have 64KB sectors.
+ */
+ manufacturer = flashid & 0xff;
+ switch (manufacturer) {
+ case 0x20: { /* Micron/Numonix */
+ /* This Density -> Size decoding table is taken from Micron
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x14: /* 1MB */
+ size = 1 << 20;
+ break;
+ case 0x15: /* 2MB */
+ size = 1 << 21;
+ break;
+ case 0x16: /* 4MB */
+ size = 1 << 22;
+ break;
+ case 0x17: /* 8MB */
+ size = 1 << 23;
+ break;
+ case 0x18: /* 16MB */
+ size = 1 << 24;
+ break;
+ case 0x19: /* 32MB */
+ size = 1 << 25;
+ break;
+ case 0x20: /* 64MB */
+ size = 1 << 26;
+ break;
+ case 0x21: /* 128MB */
+ size = 1 << 27;
+ break;
+ case 0x22: /* 256MB */
+ size = 1 << 28;
+ break;
+
+ default:
+ dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
return -EINVAL;
- info >>= 16; /* log2 of size */
- if (info >= 0x14 && info < 0x18)
- adap->params.sf_nsec = 1 << (info - 16);
- else if (info == 0x18)
- adap->params.sf_nsec = 64;
- else
+ }
+ break;
+ }
+ case 0xc2: { /* Macronix */
+ /* This Density -> Size decoding table is taken from Macronix
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17: /* 8MB */
+ size = 1 << 23;
+ break;
+ case 0x18: /* 16MB */
+ size = 1 << 24;
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
+ return -EINVAL;
+ }
+ }
+ case 0xef: { /* Winbond */
+ /* This Density -> Size decoding table is taken from Winbond
+ * Data Sheets.
+ */
+ density = (flashid >> 16) & 0xff;
+ switch (density) {
+ case 0x17: /* 8MB */
+ size = 1 << 23;
+ break;
+ case 0x18: /* 16MB */
+ size = 1 << 24;
+ break;
+ default:
+ dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
+ flashid, density);
return -EINVAL;
- adap->params.sf_size = 1 << info;
- adap->params.sf_fw_start =
- t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
+ }
+ break;
+ }
+ default:
+ dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
+ flashid);
+ return -EINVAL;
+ }
+
+ /* Store decoded Flash size and fall through into vetting code. */
+ adap->params.sf_size = size;
+ adap->params.sf_nsec = size / SF_SEC_SIZE;
+found:
if (adap->params.sf_size < FLASH_MIN_SIZE)
- dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
- adap->params.sf_size, FLASH_MIN_SIZE);
+ dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
+ flashid, adap->params.sf_size, FLASH_MIN_SIZE);
return 0;
}
@@ -8285,7 +8381,7 @@ int t4_prep_adapter(struct adapter *adapter)
get_pci_mode(adapter, &adapter->params.pci);
pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
- ret = get_flash_params(adapter);
+ ret = t4_get_flash_params(adapter);
if (ret < 0) {
dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 633e9751a25e..8c22bb8c9fbf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -181,6 +181,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a7), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x50a8), /* Custom T580-KR */
CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 9d7cb0387bf7..30000b6aa7b8 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -78,7 +78,7 @@ config HNS_ENET
config HNS3
tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
- depends on PCI
+ depends on PCI
---help---
This selects the framework support for Hisilicon Network Subsystem 3.
This layer facilitates clients like ENET, RoCE and user-space ethernet
@@ -87,7 +87,7 @@ config HNS3
config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
- depends on PCI_MSI
+ depends on PCI_MSI
depends on HNS3
---help---
This selects the HNS3_HCLGE network acceleration engine & its hardware
@@ -96,7 +96,7 @@ config HNS3_HCLGE
config HNS3_ENET
tristate "Hisilicon HNS3 Ethernet Device Support"
- depends on 64BIT && PCI
+ depends on 64BIT && PCI
depends on HNS3 && HNS3_HCLGE
---help---
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index c677530841cf..575f50df340c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -339,6 +339,10 @@ struct hnae3_ae_ops {
u8 *hfunc);
int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc);
+ int (*set_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
int (*get_tc_size)(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 8b511e6e0ce9..60960e588b5f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -85,6 +85,15 @@ static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
return 0;
}
+void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
+{
+ desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
+}
+
void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read)
{
@@ -208,7 +217,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back
*/
ntc = hw->cmq.csq.next_to_use;
- opcode = desc[0].opcode;
+ opcode = le16_to_cpu(desc[0].opcode);
while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle];
@@ -225,7 +234,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
- if (HCLGE_SEND_SYNC(desc->flag)) {
+ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
if (hclge_cmd_csq_done(hw))
break;
@@ -244,9 +253,9 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
pr_debug("Get cmd desc:\n");
if (likely(!hclge_is_special_opcode(opcode)))
- desc_ret = desc[handle].retval;
+ desc_ret = le16_to_cpu(desc[handle].retval);
else
- desc_ret = desc[0].retval;
+ desc_ret = le16_to_cpu(desc[0].retval);
if ((enum hclge_cmd_return_status)desc_ret ==
HCLGE_CMD_EXEC_SUCCESS)
@@ -276,15 +285,15 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return retval;
}
-enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw,
- u32 *version)
+static enum hclge_cmd_status hclge_cmd_query_firmware_version(
+ struct hclge_hw *hw, u32 *version)
{
- struct hclge_query_version *resp;
+ struct hclge_query_version_cmd *resp;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
- resp = (struct hclge_query_version *)desc.data;
+ resp = (struct hclge_query_version_cmd *)desc.data;
ret = hclge_cmd_send(hw, &desc, 1);
if (!ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 6b6d28eff664..b4373345c2b4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -221,12 +221,12 @@ enum hclge_opcode_type {
#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10
#define HCLGE_RCB_INIT_FLAG_EN_B 0
#define HCLGE_RCB_INIT_FLAG_FINI_B 8
-struct hclge_config_rcb_init {
+struct hclge_config_rcb_init_cmd {
__le16 rcb_init_flag;
u8 rsv[22];
};
-struct hclge_tqp_map {
+struct hclge_tqp_map_cmd {
__le16 tqp_id; /* Absolute tqp id for in this pf */
u8 tqp_vf; /* VF id */
#define HCLGE_TQP_MAP_TYPE_PF 0
@@ -246,15 +246,15 @@ enum hclge_int_type {
HCLGE_INT_EVENT,
};
-struct hclge_ctrl_vector_chain {
+struct hclge_ctrl_vector_chain_cmd {
u8 int_vector_id;
u8 int_cause_num;
#define HCLGE_INT_TYPE_S 0
-#define HCLGE_INT_TYPE_M 0x3
+#define HCLGE_INT_TYPE_M GENMASK(1, 0)
#define HCLGE_TQP_ID_S 2
-#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
+#define HCLGE_TQP_ID_M GENMASK(12, 2)
#define HCLGE_INT_GL_IDX_S 13
-#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
+#define HCLGE_INT_GL_IDX_M GENMASK(14, 13)
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
u8 vfid;
u8 rsv;
@@ -263,18 +263,18 @@ struct hclge_ctrl_vector_chain {
#define HCLGE_TC_NUM 8
#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
-struct hclge_tx_buff_alloc {
+struct hclge_tx_buff_alloc_cmd {
__le16 tx_pkt_buff[HCLGE_TC_NUM];
u8 tx_buff_rsv[8];
};
-struct hclge_rx_priv_buff {
+struct hclge_rx_priv_buff_cmd {
__le16 buf_num[HCLGE_TC_NUM];
__le16 shared_buf;
u8 rsv[6];
};
-struct hclge_query_version {
+struct hclge_query_version_cmd {
__le32 firmware;
__le32 firmware_rsv[5];
};
@@ -328,14 +328,14 @@ struct hclge_pkt_buf_alloc {
};
#define HCLGE_RX_COM_WL_EN_B 15
-struct hclge_rx_com_wl_buf {
+struct hclge_rx_com_wl_buf_cmd {
__le16 high_wl;
__le16 low_wl;
u8 rsv[20];
};
#define HCLGE_RX_PKT_EN_B 15
-struct hclge_rx_pkt_buf {
+struct hclge_rx_pkt_buf_cmd {
__le16 high_pkt;
__le16 low_pkt;
u8 rsv[20];
@@ -348,7 +348,7 @@ struct hclge_rx_pkt_buf {
#define HCLGE_PF_MAC_NUM_MASK 0x3
#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
-struct hclge_func_status {
+struct hclge_func_status_cmd {
__le32 vf_rst_state[4];
u8 pf_state;
u8 mac_id;
@@ -359,7 +359,7 @@ struct hclge_func_status {
u8 rsv[2];
};
-struct hclge_pf_res {
+struct hclge_pf_res_cmd {
__le16 tqp_num;
__le16 buf_size;
__le16 msixcap_localid_ba_nic;
@@ -372,30 +372,30 @@ struct hclge_pf_res {
};
#define HCLGE_CFG_OFFSET_S 0
-#define HCLGE_CFG_OFFSET_M 0xfffff /* Byte (8-10.3) */
+#define HCLGE_CFG_OFFSET_M GENMASK(19, 0)
#define HCLGE_CFG_RD_LEN_S 24
-#define HCLGE_CFG_RD_LEN_M (0xf << HCLGE_CFG_RD_LEN_S)
+#define HCLGE_CFG_RD_LEN_M GENMASK(27, 24)
#define HCLGE_CFG_RD_LEN_BYTES 16
#define HCLGE_CFG_RD_LEN_UNIT 4
#define HCLGE_CFG_VMDQ_S 0
-#define HCLGE_CFG_VMDQ_M (0xff << HCLGE_CFG_VMDQ_S)
+#define HCLGE_CFG_VMDQ_M GENMASK(7, 0)
#define HCLGE_CFG_TC_NUM_S 8
-#define HCLGE_CFG_TC_NUM_M (0xff << HCLGE_CFG_TC_NUM_S)
+#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8)
#define HCLGE_CFG_TQP_DESC_N_S 16
-#define HCLGE_CFG_TQP_DESC_N_M (0xffff << HCLGE_CFG_TQP_DESC_N_S)
+#define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16)
#define HCLGE_CFG_PHY_ADDR_S 0
-#define HCLGE_CFG_PHY_ADDR_M (0x1f << HCLGE_CFG_PHY_ADDR_S)
+#define HCLGE_CFG_PHY_ADDR_M GENMASK(4, 0)
#define HCLGE_CFG_MEDIA_TP_S 8
-#define HCLGE_CFG_MEDIA_TP_M (0xff << HCLGE_CFG_MEDIA_TP_S)
+#define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8)
#define HCLGE_CFG_RX_BUF_LEN_S 16
-#define HCLGE_CFG_RX_BUF_LEN_M (0xffff << HCLGE_CFG_RX_BUF_LEN_S)
+#define HCLGE_CFG_RX_BUF_LEN_M GENMASK(31, 16)
#define HCLGE_CFG_MAC_ADDR_H_S 0
-#define HCLGE_CFG_MAC_ADDR_H_M (0xffff << HCLGE_CFG_MAC_ADDR_H_S)
+#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
#define HCLGE_CFG_DEFAULT_SPEED_S 16
-#define HCLGE_CFG_DEFAULT_SPEED_M (0xff << HCLGE_CFG_DEFAULT_SPEED_S)
+#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
-struct hclge_cfg_param {
+struct hclge_cfg_param_cmd {
__le32 offset;
__le32 rsv;
__le32 param[4];
@@ -405,7 +405,7 @@ struct hclge_cfg_param {
#define HCLGE_DESC_NUM 0x40
#define HCLGE_ALLOC_VALID_B 0
-struct hclge_vf_num {
+struct hclge_vf_num_cmd {
u8 alloc_valid;
u8 rsv[23];
};
@@ -413,13 +413,13 @@ struct hclge_vf_num {
#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
#define HCLGE_RSS_HASH_KEY_NUM 16
-struct hclge_rss_config {
+struct hclge_rss_config_cmd {
u8 hash_config;
u8 rsv[7];
u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
};
-struct hclge_rss_input_tuple {
+struct hclge_rss_input_tuple_cmd {
u8 ipv4_tcp_en;
u8 ipv4_udp_en;
u8 ipv4_sctp_en;
@@ -433,26 +433,26 @@ struct hclge_rss_input_tuple {
#define HCLGE_RSS_CFG_TBL_SIZE 16
-struct hclge_rss_indirection_table {
- u16 start_table_index;
- u16 rss_set_bitmap;
+struct hclge_rss_indirection_table_cmd {
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
u8 rsv[4];
u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE];
};
#define HCLGE_RSS_TC_OFFSET_S 0
-#define HCLGE_RSS_TC_OFFSET_M (0x3ff << HCLGE_RSS_TC_OFFSET_S)
+#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0)
#define HCLGE_RSS_TC_SIZE_S 12
-#define HCLGE_RSS_TC_SIZE_M (0x7 << HCLGE_RSS_TC_SIZE_S)
+#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
#define HCLGE_RSS_TC_VALID_B 15
-struct hclge_rss_tc_mode {
- u16 rss_tc_mode[HCLGE_MAX_TC_NUM];
+struct hclge_rss_tc_mode_cmd {
+ __le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
u8 rsv[8];
};
#define HCLGE_LINK_STS_B 0
#define HCLGE_LINK_STATUS BIT(HCLGE_LINK_STS_B)
-struct hclge_link_status {
+struct hclge_link_status_cmd {
u8 status;
u8 rsv[23];
};
@@ -467,7 +467,7 @@ struct hclge_promisc_param {
#define HCLGE_PROMISC_EN_UC 0x1
#define HCLGE_PROMISC_EN_MC 0x2
#define HCLGE_PROMISC_EN_BC 0x4
-struct hclge_promisc_cfg {
+struct hclge_promisc_cfg_cmd {
u8 flag;
u8 vf_id;
__le16 rsv0;
@@ -495,18 +495,18 @@ enum hclge_promisc_type {
#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21
#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22
-struct hclge_config_mac_mode {
+struct hclge_config_mac_mode_cmd {
__le32 txrx_pad_fcs_loop_en;
u8 rsv[20];
};
#define HCLGE_CFG_SPEED_S 0
-#define HCLGE_CFG_SPEED_M (0x3f << HCLGE_CFG_SPEED_S)
+#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
#define HCLGE_CFG_DUPLEX_B 7
#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B)
-struct hclge_config_mac_speed_dup {
+struct hclge_config_mac_speed_dup_cmd {
u8 speed_dup;
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
@@ -518,17 +518,17 @@ struct hclge_config_mac_speed_dup {
#define HCLGE_QUERY_AN_B 0
#define HCLGE_QUERY_DUPLEX_B 2
-#define HCLGE_QUERY_SPEED_M (0x1f << HCLGE_QUERY_SPEED_S)
+#define HCLGE_QUERY_SPEED_M GENMASK(4, 0)
#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B)
#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B)
-struct hclge_query_an_speed_dup {
+struct hclge_query_an_speed_dup_cmd {
u8 an_syn_dup_speed;
u8 pause;
u8 rsv[23];
};
-#define HCLGE_RING_ID_MASK 0x3ff
+#define HCLGE_RING_ID_MASK GENMASK(9, 0)
#define HCLGE_TQP_ENABLE_B 0
#define HCLGE_MAC_CFG_AN_EN_B 0
@@ -539,7 +539,7 @@ struct hclge_query_an_speed_dup {
#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B)
-struct hclge_config_auto_neg {
+struct hclge_config_auto_neg_cmd {
__le32 cfg_an_cmd_flag;
u8 rsv[20];
};
@@ -548,7 +548,7 @@ struct hclge_config_auto_neg {
#define HCLGE_MAC_MAX_MTU 9728
#define HCLGE_MAC_UPLINK_PORT 0x100
-struct hclge_config_max_frm_size {
+struct hclge_config_max_frm_size_cmd {
__le16 max_frm_size;
u8 rsv[22];
};
@@ -565,10 +565,10 @@ enum hclge_mac_vlan_tbl_opcode {
#define HCLGE_MAC_EPORT_SW_EN_B 0xc
#define HCLGE_MAC_EPORT_TYPE_B 0xb
#define HCLGE_MAC_EPORT_VFID_S 0x3
-#define HCLGE_MAC_EPORT_VFID_M (0xff << HCLGE_MAC_EPORT_VFID_S)
+#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3)
#define HCLGE_MAC_EPORT_PFID_S 0x0
-#define HCLGE_MAC_EPORT_PFID_M (0x7 << HCLGE_MAC_EPORT_PFID_S)
-struct hclge_mac_vlan_tbl_entry {
+#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0)
+struct hclge_mac_vlan_tbl_entry_cmd {
u8 flags;
u8 resp_code;
__le16 vlan_tag;
@@ -583,15 +583,15 @@ struct hclge_mac_vlan_tbl_entry {
};
#define HCLGE_CFG_MTA_MAC_SEL_S 0x0
-#define HCLGE_CFG_MTA_MAC_SEL_M (0x3 << HCLGE_CFG_MTA_MAC_SEL_S)
+#define HCLGE_CFG_MTA_MAC_SEL_M GENMASK(1, 0)
#define HCLGE_CFG_MTA_MAC_EN_B 0x7
-struct hclge_mta_filter_mode {
+struct hclge_mta_filter_mode_cmd {
u8 dmac_sel_en; /* Use lowest 2 bit as sel_mode, bit 7 as enable */
u8 rsv[23];
};
#define HCLGE_CFG_FUNC_MTA_ACCEPT_B 0x0
-struct hclge_cfg_func_mta_filter {
+struct hclge_cfg_func_mta_filter_cmd {
u8 accept; /* Only used lowest 1 bit */
u8 function_id;
u8 rsv[22];
@@ -599,14 +599,14 @@ struct hclge_cfg_func_mta_filter {
#define HCLGE_CFG_MTA_ITEM_ACCEPT_B 0x0
#define HCLGE_CFG_MTA_ITEM_IDX_S 0x0
-#define HCLGE_CFG_MTA_ITEM_IDX_M (0xfff << HCLGE_CFG_MTA_ITEM_IDX_S)
-struct hclge_cfg_func_mta_item {
- u16 item_idx; /* Only used lowest 12 bit */
+#define HCLGE_CFG_MTA_ITEM_IDX_M GENMASK(11, 0)
+struct hclge_cfg_func_mta_item_cmd {
+ __le16 item_idx; /* Only used lowest 12 bit */
u8 accept; /* Only used lowest 1 bit */
u8 rsv[21];
};
-struct hclge_mac_vlan_add {
+struct hclge_mac_vlan_add_cmd {
__le16 flags;
__le16 mac_addr_hi16;
__le32 mac_addr_lo32;
@@ -619,7 +619,7 @@ struct hclge_mac_vlan_add {
};
#define HNS3_MAC_VLAN_CFG_FLAG_BIT 0
-struct hclge_mac_vlan_remove {
+struct hclge_mac_vlan_remove_cmd {
__le16 flags;
__le16 mac_addr_hi16;
__le32 mac_addr_lo32;
@@ -631,21 +631,21 @@ struct hclge_mac_vlan_remove {
u8 rsv[4];
};
-struct hclge_vlan_filter_ctrl {
+struct hclge_vlan_filter_ctrl_cmd {
u8 vlan_type;
u8 vlan_fe;
u8 rsv[22];
};
-struct hclge_vlan_filter_pf_cfg {
+struct hclge_vlan_filter_pf_cfg_cmd {
u8 vlan_offset;
u8 vlan_cfg;
u8 rsv[2];
u8 vlan_offset_bitmap[20];
};
-struct hclge_vlan_filter_vf_cfg {
- u16 vlan_id;
+struct hclge_vlan_filter_vf_cfg_cmd {
+ __le16 vlan_id;
u8 resp_code;
u8 rsv;
u8 vlan_cfg;
@@ -653,14 +653,14 @@ struct hclge_vlan_filter_vf_cfg {
u8 vf_bitmap[16];
};
-struct hclge_cfg_com_tqp_queue {
+struct hclge_cfg_com_tqp_queue_cmd {
__le16 tqp_id;
__le16 stream_id;
u8 enable;
u8 rsv[19];
};
-struct hclge_cfg_tx_queue_pointer {
+struct hclge_cfg_tx_queue_pointer_cmd {
__le16 tqp_id;
__le16 tx_tail;
__le16 tx_head;
@@ -670,12 +670,12 @@ struct hclge_cfg_tx_queue_pointer {
};
#define HCLGE_TSO_MSS_MIN_S 0
-#define HCLGE_TSO_MSS_MIN_M (0x3FFF << HCLGE_TSO_MSS_MIN_S)
+#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0)
#define HCLGE_TSO_MSS_MAX_S 16
-#define HCLGE_TSO_MSS_MAX_M (0x3FFF << HCLGE_TSO_MSS_MAX_S)
+#define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16)
-struct hclge_cfg_tso_status {
+struct hclge_cfg_tso_status_cmd {
__le16 tso_mss_min;
__le16 tso_mss_max;
u8 rsv[20];
@@ -685,7 +685,7 @@ struct hclge_cfg_tso_status {
#define HCLGE_TSO_MSS_MAX 9668
#define HCLGE_TQP_RESET_B 0
-struct hclge_reset_tqp_queue {
+struct hclge_reset_tqp_queue_cmd {
__le16 tqp_id;
u8 reset_req;
u8 ready_to_reset;
@@ -739,6 +739,7 @@ struct hclge_hw;
int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
enum hclge_opcode_type opcode, bool is_read);
+void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
struct hclge_promisc_param *param);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 1a13614af3de..c322b4534148 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -362,7 +362,7 @@ static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
#define HCLGE_64_BIT_RTN_DATANUM 4
u64 *data = (u64 *)(&hdev->hw_stats.all_64_bit_stats);
struct hclge_desc desc[HCLGE_64_BIT_CMD_NUM];
- u64 *desc_data;
+ __le64 *desc_data;
int i, k, n;
int ret;
@@ -376,14 +376,14 @@ static int hclge_64_bit_update_stats(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_64_BIT_CMD_NUM; i++) {
if (unlikely(i == 0)) {
- desc_data = (u64 *)(&desc[i].data[0]);
+ desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_64_BIT_RTN_DATANUM - 1;
} else {
- desc_data = (u64 *)(&desc[i]);
+ desc_data = (__le64 *)(&desc[i]);
n = HCLGE_64_BIT_RTN_DATANUM;
}
for (k = 0; k < n; k++) {
- *data++ += cpu_to_le64(*desc_data);
+ *data++ += le64_to_cpu(*desc_data);
desc_data++;
}
}
@@ -411,7 +411,7 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
struct hclge_desc desc[HCLGE_32_BIT_CMD_NUM];
struct hclge_32_bit_stats *all_32_bit_stats;
- u32 *desc_data;
+ __le32 *desc_data;
int i, k, n;
u64 *data;
int ret;
@@ -431,21 +431,27 @@ static int hclge_32_bit_update_stats(struct hclge_dev *hdev)
hclge_reset_partial_32bit_counter(all_32_bit_stats);
for (i = 0; i < HCLGE_32_BIT_CMD_NUM; i++) {
if (unlikely(i == 0)) {
+ __le16 *desc_data_16bit;
+
all_32_bit_stats->igu_rx_err_pkt +=
- cpu_to_le32(desc[i].data[0]);
+ le32_to_cpu(desc[i].data[0]);
+
+ desc_data_16bit = (__le16 *)&desc[i].data[1];
all_32_bit_stats->igu_rx_no_eof_pkt +=
- cpu_to_le32(desc[i].data[1] & 0xffff);
+ le16_to_cpu(*desc_data_16bit);
+
+ desc_data_16bit++;
all_32_bit_stats->igu_rx_no_sof_pkt +=
- cpu_to_le32((desc[i].data[1] >> 16) & 0xffff);
+ le16_to_cpu(*desc_data_16bit);
- desc_data = (u32 *)(&desc[i].data[2]);
+ desc_data = &desc[i].data[2];
n = HCLGE_32_BIT_RTN_DATANUM - 4;
} else {
- desc_data = (u32 *)(&desc[i]);
+ desc_data = (__le32 *)&desc[i];
n = HCLGE_32_BIT_RTN_DATANUM;
}
for (k = 0; k < n; k++) {
- *data++ += cpu_to_le32(*desc_data);
+ *data++ += le32_to_cpu(*desc_data);
desc_data++;
}
}
@@ -460,7 +466,7 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
u64 *data = (u64 *)(&hdev->hw_stats.mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
- u64 *desc_data;
+ __le64 *desc_data;
int i, k, n;
int ret;
@@ -475,14 +481,14 @@ static int hclge_mac_update_stats(struct hclge_dev *hdev)
for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
if (unlikely(i == 0)) {
- desc_data = (u64 *)(&desc[i].data[0]);
+ desc_data = (__le64 *)(&desc[i].data[0]);
n = HCLGE_RTN_DATA_NUM - 2;
} else {
- desc_data = (u64 *)(&desc[i]);
+ desc_data = (__le64 *)(&desc[i]);
n = HCLGE_RTN_DATA_NUM;
}
for (k = 0; k < n; k++) {
- *data++ += cpu_to_le64(*desc_data);
+ *data++ += le64_to_cpu(*desc_data);
desc_data++;
}
}
@@ -508,7 +514,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
HCLGE_OPC_QUERY_RX_STATUS,
true);
- desc[0].data[0] = (tqp->index & 0x1ff);
+ desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -517,7 +523,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
- cpu_to_le32(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[4]);
}
for (i = 0; i < kinfo->num_tqps; i++) {
@@ -528,7 +534,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
HCLGE_OPC_QUERY_TX_STATUS,
true);
- desc[0].data[0] = (tqp->index & 0x1ff);
+ desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -537,7 +543,7 @@ static int hclge_tqps_update_stats(struct hnae3_handle *handle)
return ret;
}
tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
- cpu_to_le32(desc[0].data[4]);
+ le32_to_cpu(desc[0].data[4]);
}
return 0;
@@ -552,12 +558,12 @@ static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_tx_ring_pktnum_rcd);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
}
for (i = 0; i < kinfo->num_tqps; i++) {
tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
- *buff++ = cpu_to_le64(tqp->tqp_stats.rcb_rx_ring_pktnum_rcd);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
}
return buff;
@@ -820,7 +826,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
}
static int hclge_parse_func_status(struct hclge_dev *hdev,
- struct hclge_func_status *status)
+ struct hclge_func_status_cmd *status)
{
if (!(status->pf_state & HCLGE_PF_STATE_DONE))
return -EINVAL;
@@ -837,13 +843,13 @@ static int hclge_parse_func_status(struct hclge_dev *hdev,
static int hclge_query_function_status(struct hclge_dev *hdev)
{
- struct hclge_func_status *req;
+ struct hclge_func_status_cmd *req;
struct hclge_desc desc;
int timeout = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
- req = (struct hclge_func_status *)desc.data;
+ req = (struct hclge_func_status_cmd *)desc.data;
do {
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -868,7 +874,7 @@ static int hclge_query_function_status(struct hclge_dev *hdev)
static int hclge_query_pf_resource(struct hclge_dev *hdev)
{
- struct hclge_pf_res *req;
+ struct hclge_pf_res_cmd *req;
struct hclge_desc desc;
int ret;
@@ -880,7 +886,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
return ret;
}
- req = (struct hclge_pf_res *)desc.data;
+ req = (struct hclge_pf_res_cmd *)desc.data;
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
@@ -938,12 +944,12 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
- struct hclge_cfg_param *req;
+ struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
u64 mac_addr_tmp;
int i;
- req = (struct hclge_cfg_param *)desc[0].data;
+ req = (struct hclge_cfg_param_cmd *)desc[0].data;
/* get the configuration */
cfg->vmdq_vport_num = hnae_get_field(__le32_to_cpu(req->param[0]),
@@ -978,7 +984,7 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
for (i = 0; i < ETH_ALEN; i++)
cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
- req = (struct hclge_cfg_param *)desc[1].data;
+ req = (struct hclge_cfg_param_cmd *)desc[1].data;
cfg->numa_node_map = __le32_to_cpu(req->param[0]);
}
@@ -989,20 +995,21 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
- struct hclge_cfg_param *req;
+ struct hclge_cfg_param_cmd *req;
int i, ret;
for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
- req = (struct hclge_cfg_param *)desc[i].data;
+ u32 offset = 0;
+
+ req = (struct hclge_cfg_param_cmd *)desc[i].data;
hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
true);
- hnae_set_field(req->offset, HCLGE_CFG_OFFSET_M,
+ hnae_set_field(offset, HCLGE_CFG_OFFSET_M,
HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
/* Len should be united by 4 bytes when send to hardware */
- hnae_set_field(req->offset, HCLGE_CFG_RD_LEN_M,
- HCLGE_CFG_RD_LEN_S,
+ hnae_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
- req->offset = cpu_to_le32(req->offset);
+ req->offset = cpu_to_le32(offset);
}
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
@@ -1099,16 +1106,23 @@ static int hclge_configure(struct hclge_dev *hdev)
static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
int tso_mss_max)
{
- struct hclge_cfg_tso_status *req;
+ struct hclge_cfg_tso_status_cmd *req;
struct hclge_desc desc;
+ u16 tso_mss;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
- req = (struct hclge_cfg_tso_status *)desc.data;
- hnae_set_field(req->tso_mss_min, HCLGE_TSO_MSS_MIN_M,
+ req = (struct hclge_cfg_tso_status_cmd *)desc.data;
+
+ tso_mss = 0;
+ hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
HCLGE_TSO_MSS_MIN_S, tso_mss_min);
- hnae_set_field(req->tso_mss_max, HCLGE_TSO_MSS_MIN_M,
+ req->tso_mss_min = cpu_to_le16(tso_mss);
+
+ tso_mss = 0;
+ hnae_set_field(tso_mss, HCLGE_TSO_MSS_MIN_M,
HCLGE_TSO_MSS_MIN_S, tso_mss_max);
+ req->tso_mss_max = cpu_to_le16(tso_mss);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -1144,15 +1158,15 @@ static int hclge_alloc_tqps(struct hclge_dev *hdev)
static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
u16 tqp_pid, u16 tqp_vid, bool is_pf)
{
- struct hclge_tqp_map *req;
+ struct hclge_tqp_map_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
- req = (struct hclge_tqp_map *)desc.data;
+ req = (struct hclge_tqp_map_cmd *)desc.data;
req->tqp_id = cpu_to_le16(tqp_pid);
- req->tqp_vf = cpu_to_le16(func_id);
+ req->tqp_vf = func_id;
req->tqp_flag = !is_pf << HCLGE_TQP_MAP_TYPE_B |
1 << HCLGE_TQP_MAP_EN_B;
req->tqp_vid = cpu_to_le16(tqp_vid);
@@ -1340,12 +1354,12 @@ static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
/* TX buffer size is unit by 128 byte */
#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
- struct hclge_tx_buff_alloc *req;
+ struct hclge_tx_buff_alloc_cmd *req;
struct hclge_desc desc;
int ret;
u8 i;
- req = (struct hclge_tx_buff_alloc *)desc.data;
+ req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
for (i = 0; i < HCLGE_TC_NUM; i++) {
@@ -1536,8 +1550,8 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
* @buf_alloc: pointer to buffer calculation data
* @return: 0: calculate sucessful, negative: fail
*/
-int hclge_rx_buffer_calc(struct hclge_dev *hdev,
- struct hclge_pkt_buf_alloc *buf_alloc)
+static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
{
u32 rx_all = hdev->pkt_buf_size;
int no_pfc_priv_num, pfc_priv_num;
@@ -1672,13 +1686,13 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev,
static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
- struct hclge_rx_priv_buff *req;
+ struct hclge_rx_priv_buff_cmd *req;
struct hclge_desc desc;
int ret;
int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
- req = (struct hclge_rx_priv_buff *)desc.data;
+ req = (struct hclge_rx_priv_buff_cmd *)desc.data;
/* Alloc private buffer TCs */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
@@ -1687,7 +1701,7 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
req->buf_num[i] =
cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
req->buf_num[i] |=
- cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
+ cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
}
req->shared_buf =
@@ -2000,11 +2014,11 @@ static void hclge_check_speed_dup(struct hclge_dev *hdev, int duplex, int speed)
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
{
- struct hclge_config_mac_speed_dup *req;
+ struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_config_mac_speed_dup *)desc.data;
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
@@ -2075,12 +2089,12 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
u8 *duplex)
{
- struct hclge_query_an_speed_dup *req;
+ struct hclge_query_an_speed_dup_cmd *req;
struct hclge_desc desc;
int speed_tmp;
int ret;
- req = (struct hclge_query_an_speed_dup *)desc.data;
+ req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2108,11 +2122,11 @@ static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed,
static int hclge_query_autoneg_result(struct hclge_dev *hdev)
{
struct hclge_mac *mac = &hdev->hw.mac;
- struct hclge_query_an_speed_dup *req;
+ struct hclge_query_an_speed_dup_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_query_an_speed_dup *)desc.data;
+ req = (struct hclge_query_an_speed_dup_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2129,14 +2143,16 @@ static int hclge_query_autoneg_result(struct hclge_dev *hdev)
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
{
- struct hclge_config_auto_neg *req;
+ struct hclge_config_auto_neg_cmd *req;
struct hclge_desc desc;
+ u32 flag = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
- req = (struct hclge_config_auto_neg *)desc.data;
- hnae_set_bit(req->cfg_an_cmd_flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ req = (struct hclge_config_auto_neg_cmd *)desc.data;
+ hnae_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, !!enable);
+ req->cfg_an_cmd_flag = cpu_to_le32(flag);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2214,7 +2230,7 @@ static void hclge_task_schedule(struct hclge_dev *hdev)
static int hclge_get_mac_link_status(struct hclge_dev *hdev)
{
- struct hclge_link_status *req;
+ struct hclge_link_status_cmd *req;
struct hclge_desc desc;
int link_status;
int ret;
@@ -2227,7 +2243,7 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
return ret;
}
- req = (struct hclge_link_status *)desc.data;
+ req = (struct hclge_link_status_cmd *)desc.data;
link_status = req->status & HCLGE_LINK_STATUS;
return !!link_status;
@@ -2451,7 +2467,7 @@ static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
static int hclge_get_rss_algo(struct hclge_dev *hdev)
{
- struct hclge_rss_config *req;
+ struct hclge_rss_config_cmd *req;
struct hclge_desc desc;
int rss_hash_algo;
int ret;
@@ -2465,7 +2481,7 @@ static int hclge_get_rss_algo(struct hclge_dev *hdev)
return ret;
}
- req = (struct hclge_rss_config *)desc.data;
+ req = (struct hclge_rss_config_cmd *)desc.data;
rss_hash_algo = (req->hash_config & HCLGE_RSS_HASH_ALGO_MASK);
if (rss_hash_algo == HCLGE_RSS_HASH_ALGO_TOEPLITZ)
@@ -2477,13 +2493,13 @@ static int hclge_get_rss_algo(struct hclge_dev *hdev)
static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
const u8 hfunc, const u8 *key)
{
- struct hclge_rss_config *req;
+ struct hclge_rss_config_cmd *req;
struct hclge_desc desc;
int key_offset;
int key_size;
int ret;
- req = (struct hclge_rss_config *)desc.data;
+ req = (struct hclge_rss_config_cmd *)desc.data;
for (key_offset = 0; key_offset < 3; key_offset++) {
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
@@ -2514,19 +2530,20 @@ static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
{
- struct hclge_rss_indirection_table *req;
+ struct hclge_rss_indirection_table_cmd *req;
struct hclge_desc desc;
int i, j;
int ret;
- req = (struct hclge_rss_indirection_table *)desc.data;
+ req = (struct hclge_rss_indirection_table_cmd *)desc.data;
for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
hclge_cmd_setup_basic_desc
(&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
- req->start_table_index = i * HCLGE_RSS_CFG_TBL_SIZE;
- req->rss_set_bitmap = HCLGE_RSS_SET_BITMAP_MSK;
+ req->start_table_index =
+ cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
req->rss_result[j] =
@@ -2546,21 +2563,24 @@ static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u32 *indir)
static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
u16 *tc_size, u16 *tc_offset)
{
- struct hclge_rss_tc_mode *req;
+ struct hclge_rss_tc_mode_cmd *req;
struct hclge_desc desc;
int ret;
int i;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
- req = (struct hclge_rss_tc_mode *)desc.data;
+ req = (struct hclge_rss_tc_mode_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
- hnae_set_bit(req->rss_tc_mode[i], HCLGE_RSS_TC_VALID_B,
- (tc_valid[i] & 0x1));
- hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_SIZE_M,
+ u16 mode = 0;
+
+ hnae_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+ hnae_set_field(mode, HCLGE_RSS_TC_SIZE_M,
HCLGE_RSS_TC_SIZE_S, tc_size[i]);
- hnae_set_field(req->rss_tc_mode[i], HCLGE_RSS_TC_OFFSET_M,
+ hnae_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
}
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -2575,15 +2595,13 @@ static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
{
-#define HCLGE_RSS_INPUT_TUPLE_OTHER 0xf
-#define HCLGE_RSS_INPUT_TUPLE_SCTP 0x1f
- struct hclge_rss_input_tuple *req;
+ struct hclge_rss_input_tuple_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
- req = (struct hclge_rss_input_tuple *)desc.data;
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
req->ipv4_tcp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
req->ipv4_udp_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
req->ipv4_sctp_en = HCLGE_RSS_INPUT_TUPLE_SCTP;
@@ -2657,6 +2675,161 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
return ret;
}
+static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGE_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGE_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGE_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGE_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGE_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Read rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_reuse_desc(&desc, false);
+
+ tuple_sets = hclge_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set rss tuple fail, status = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ nfc->data = 0;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Read rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ tuple_sets = req->ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ tuple_sets = req->ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ tuple_sets = req->ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ tuple_sets = req->ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ tuple_sets = req->ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ tuple_sets = req->ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!tuple_sets)
+ return 0;
+
+ if (tuple_sets & HCLGE_D_PORT_BIT)
+ nfc->data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGE_S_PORT_BIT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGE_D_IP_BIT)
+ nfc->data |= RXH_IP_DST;
+ if (tuple_sets & HCLGE_S_IP_BIT)
+ nfc->data |= RXH_IP_SRC;
+
+ return 0;
+}
+
static int hclge_get_tc_size(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -2750,7 +2923,7 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain *req;
+ struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
int ret;
@@ -2758,20 +2931,21 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ADD_RING_TO_VECTOR, false);
- req = (struct hclge_ctrl_vector_chain *)desc.data;
+ req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
+ u16 type_and_id = 0;
+
+ hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ node->tqp_index);
+ hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
@@ -2807,9 +2981,9 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
return 0;
}
-int hclge_map_handle_ring_to_vector(struct hnae3_handle *handle,
- int vector,
- struct hnae3_ring_chain_node *ring_chain)
+static int hclge_map_handle_ring_to_vector(
+ struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -2831,7 +3005,7 @@ static int hclge_unmap_ring_from_vector(
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_ctrl_vector_chain *req;
+ struct hclge_ctrl_vector_chain_cmd *req;
struct hnae3_ring_chain_node *node;
struct hclge_desc desc;
int i, vector_id;
@@ -2846,21 +3020,22 @@ static int hclge_unmap_ring_from_vector(
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_DEL_RING_TO_VECTOR, false);
- req = (struct hclge_ctrl_vector_chain *)desc.data;
+ req = (struct hclge_ctrl_vector_chain_cmd *)desc.data;
req->int_vector_id = vector_id;
i = 0;
for (node = ring_chain; node; node = node->next) {
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_TYPE_M,
- HCLGE_INT_TYPE_S,
+ u16 type_and_id = 0;
+
+ hnae_set_field(type_and_id, HCLGE_INT_TYPE_M, HCLGE_INT_TYPE_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
- HCLGE_TQP_ID_S, node->tqp_index);
- hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
+ hnae_set_field(type_and_id, HCLGE_TQP_ID_M, HCLGE_TQP_ID_S,
+ node->tqp_index);
+ hnae_set_field(type_and_id, HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
- req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
+ req->tqp_type_and_id[i] = cpu_to_le16(type_and_id);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
@@ -2898,13 +3073,13 @@ static int hclge_unmap_ring_from_vector(
int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
struct hclge_promisc_param *param)
{
- struct hclge_promisc_cfg *req;
+ struct hclge_promisc_cfg_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
- req = (struct hclge_promisc_cfg *)desc.data;
+ req = (struct hclge_promisc_cfg_cmd *)desc.data;
req->vf_id = param->vf_id;
req->flag = (param->enable << HCLGE_PROMISC_EN_B);
@@ -2946,29 +3121,27 @@ static void hclge_set_promisc_mode(struct hnae3_handle *handle, u32 en)
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
struct hclge_desc desc;
- struct hclge_config_mac_mode *req =
- (struct hclge_config_mac_mode *)desc.data;
+ struct hclge_config_mac_mode_cmd *req =
+ (struct hclge_config_mac_mode_cmd *)desc.data;
+ u32 loop_en = 0;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_TX_EN_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_EN_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_TX_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_PAD_RX_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_TX_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_1588_RX_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_APP_LP_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_LINE_LP_B, 0);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_FCS_TX_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en, HCLGE_MAC_RX_FCS_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_RX_FCS_STRIP_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
- hnae_set_bit(req->txrx_pad_fcs_loop_en,
- HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_TX_EN_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_EN_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_1588_TX_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_1588_RX_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_APP_LP_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_LINE_LP_B, 0);
+ hnae_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, enable);
+ hnae_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, enable);
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
@@ -2980,8 +3153,8 @@ static int hclge_tqp_enable(struct hclge_dev *hdev, int tqp_id,
int stream_id, bool enable)
{
struct hclge_desc desc;
- struct hclge_cfg_com_tqp_queue *req =
- (struct hclge_cfg_com_tqp_queue *)desc.data;
+ struct hclge_cfg_com_tqp_queue_cmd *req =
+ (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
@@ -3145,16 +3318,16 @@ static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
word_num = vfid / 32;
bit_num = vfid % 32;
if (clr)
- desc[1].data[word_num] &= ~(1 << bit_num);
+ desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
else
- desc[1].data[word_num] |= (1 << bit_num);
+ desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
} else {
word_num = (vfid - 192) / 32;
bit_num = vfid % 32;
if (clr)
- desc[2].data[word_num] &= ~(1 << bit_num);
+ desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
else
- desc[2].data[word_num] |= (1 << bit_num);
+ desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
}
return 0;
@@ -3174,7 +3347,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
return true;
}
-static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
+static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
const u8 *addr)
{
const unsigned char *mac_addr = addr;
@@ -3186,8 +3359,8 @@ static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry *new_req,
new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
}
-u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
- const u8 *addr)
+static u16 hclge_get_mac_addr_to_mta_index(struct hclge_vport *vport,
+ const u8 *addr)
{
u16 high_val = addr[1] | (addr[0] << 8);
struct hclge_dev *hdev = vport->back;
@@ -3201,11 +3374,11 @@ static int hclge_set_mta_filter_mode(struct hclge_dev *hdev,
enum hclge_mta_dmac_sel_type mta_mac_sel,
bool enable)
{
- struct hclge_mta_filter_mode *req;
+ struct hclge_mta_filter_mode_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_mta_filter_mode *)desc.data;
+ req = (struct hclge_mta_filter_mode_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_MODE_CFG, false);
hnae_set_bit(req->dmac_sel_en, HCLGE_CFG_MTA_MAC_EN_B,
@@ -3228,11 +3401,11 @@ int hclge_cfg_func_mta_filter(struct hclge_dev *hdev,
u8 func_id,
bool enable)
{
- struct hclge_cfg_func_mta_filter *req;
+ struct hclge_cfg_func_mta_filter_cmd *req;
struct hclge_desc desc;
int ret;
- req = (struct hclge_cfg_func_mta_filter *)desc.data;
+ req = (struct hclge_cfg_func_mta_filter_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_MAC_FUNC_CFG, false);
hnae_set_bit(req->accept, HCLGE_CFG_FUNC_MTA_ACCEPT_B,
@@ -3255,17 +3428,18 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
bool enable)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_cfg_func_mta_item *req;
+ struct hclge_cfg_func_mta_item_cmd *req;
struct hclge_desc desc;
+ u16 item_idx = 0;
int ret;
- req = (struct hclge_cfg_func_mta_item *)desc.data;
+ req = (struct hclge_cfg_func_mta_item_cmd *)desc.data;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MTA_TBL_ITEM_CFG, false);
hnae_set_bit(req->accept, HCLGE_CFG_MTA_ITEM_ACCEPT_B, enable);
- hnae_set_field(req->item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
+ hnae_set_field(item_idx, HCLGE_CFG_MTA_ITEM_IDX_M,
HCLGE_CFG_MTA_ITEM_IDX_S, idx);
- req->item_idx = cpu_to_le16(req->item_idx);
+ req->item_idx = cpu_to_le16(item_idx);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -3279,16 +3453,17 @@ static int hclge_set_mta_table_item(struct hclge_vport *vport,
}
static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
- struct hclge_mac_vlan_tbl_entry *req)
+ struct hclge_mac_vlan_tbl_entry_cmd *req)
{
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
u8 resp_code;
+ u16 retval;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
- memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
+ memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -3297,19 +3472,21 @@ static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
ret);
return ret;
}
- resp_code = (desc.data[0] >> 8) & 0xff;
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
- return hclge_get_mac_vlan_cmd_status(vport, desc.retval, resp_code,
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
HCLGE_MAC_VLAN_REMOVE);
}
static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
- struct hclge_mac_vlan_tbl_entry *req,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
struct hclge_desc *desc,
bool is_mc)
{
struct hclge_dev *hdev = vport->back;
u8 resp_code;
+ u16 retval;
int ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
@@ -3317,7 +3494,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
memcpy(desc[0].data,
req,
- sizeof(struct hclge_mac_vlan_tbl_entry));
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
hclge_cmd_setup_basic_desc(&desc[1],
HCLGE_OPC_MAC_VLAN_ADD,
true);
@@ -3329,7 +3506,7 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
} else {
memcpy(desc[0].data,
req,
- sizeof(struct hclge_mac_vlan_tbl_entry));
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, desc, 1);
}
if (ret) {
@@ -3338,19 +3515,21 @@ static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
ret);
return ret;
}
- resp_code = (desc[0].data[0] >> 8) & 0xff;
+ resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc[0].retval);
- return hclge_get_mac_vlan_cmd_status(vport, desc[0].retval, resp_code,
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
HCLGE_MAC_VLAN_LKUP);
}
static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
- struct hclge_mac_vlan_tbl_entry *req,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
struct hclge_desc *mc_desc)
{
struct hclge_dev *hdev = vport->back;
int cfg_status;
u8 resp_code;
+ u16 retval;
int ret;
if (!mc_desc) {
@@ -3359,10 +3538,13 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
hclge_cmd_setup_basic_desc(&desc,
HCLGE_OPC_MAC_VLAN_ADD,
false);
- memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry));
+ memcpy(desc.data, req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- resp_code = (desc.data[0] >> 8) & 0xff;
- cfg_status = hclge_get_mac_vlan_cmd_status(vport, desc.retval,
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
resp_code,
HCLGE_MAC_VLAN_ADD);
} else {
@@ -3373,11 +3555,12 @@ static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
memcpy(mc_desc[0].data, req,
- sizeof(struct hclge_mac_vlan_tbl_entry));
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
- resp_code = (mc_desc[0].data[0] >> 8) & 0xff;
- cfg_status = hclge_get_mac_vlan_cmd_status(vport,
- mc_desc[0].retval,
+ resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(mc_desc[0].retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
resp_code,
HCLGE_MAC_VLAN_ADD);
}
@@ -3404,8 +3587,9 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
+ u16 egress_port = 0;
/* mac addr check */
if (is_zero_ether_addr(addr) ||
@@ -3425,15 +3609,15 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
hnae_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 0);
hnae_set_bit(req.mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
- hnae_set_bit(req.egress_port,
- HCLGE_MAC_EPORT_SW_EN_B, 0);
- hnae_set_bit(req.egress_port,
- HCLGE_MAC_EPORT_TYPE_B, 0);
- hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_VFID_M,
+
+ hnae_set_bit(egress_port, HCLGE_MAC_EPORT_SW_EN_B, 0);
+ hnae_set_bit(egress_port, HCLGE_MAC_EPORT_TYPE_B, 0);
+ hnae_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
- hnae_set_field(req.egress_port, HCLGE_MAC_EPORT_PFID_M,
+ hnae_set_field(egress_port, HCLGE_MAC_EPORT_PFID_M,
HCLGE_MAC_EPORT_PFID_S, 0);
- req.egress_port = cpu_to_le16(req.egress_port);
+
+ req.egress_port = cpu_to_le16(egress_port);
hclge_prepare_mac_addr(&req, addr);
@@ -3454,7 +3638,7 @@ int hclge_rm_uc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
/* mac addr check */
@@ -3488,7 +3672,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
u16 tbl_idx;
int status;
@@ -3539,7 +3723,7 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
const unsigned char *addr)
{
struct hclge_dev *hdev = vport->back;
- struct hclge_mac_vlan_tbl_entry req;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
enum hclge_cmd_status status;
struct hclge_desc desc[3];
u16 tbl_idx;
@@ -3622,13 +3806,13 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p)
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
bool filter_en)
{
- struct hclge_vlan_filter_ctrl *req;
+ struct hclge_vlan_filter_ctrl_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
- req = (struct hclge_vlan_filter_ctrl *)desc.data;
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
req->vlan_type = vlan_type;
req->vlan_fe = filter_en;
@@ -3646,8 +3830,8 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
bool is_kill, u16 vlan, u8 qos, __be16 proto)
{
#define HCLGE_MAX_VF_BYTES 16
- struct hclge_vlan_filter_vf_cfg *req0;
- struct hclge_vlan_filter_vf_cfg *req1;
+ struct hclge_vlan_filter_vf_cfg_cmd *req0;
+ struct hclge_vlan_filter_vf_cfg_cmd *req1;
struct hclge_desc desc[2];
u8 vf_byte_val;
u8 vf_byte_off;
@@ -3663,10 +3847,10 @@ int hclge_set_vf_vlan_common(struct hclge_dev *hdev, int vfid,
vf_byte_off = vfid / 8;
vf_byte_val = 1 << (vfid % 8);
- req0 = (struct hclge_vlan_filter_vf_cfg *)desc[0].data;
- req1 = (struct hclge_vlan_filter_vf_cfg *)desc[1].data;
+ req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+ req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
- req0->vlan_id = vlan;
+ req0->vlan_id = cpu_to_le16(vlan);
req0->vlan_cfg = is_kill;
if (vf_byte_off < HCLGE_MAX_VF_BYTES)
@@ -3707,7 +3891,7 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hclge_vlan_filter_pf_cfg *req;
+ struct hclge_vlan_filter_pf_cfg_cmd *req;
struct hclge_desc desc;
u8 vlan_offset_byte_val;
u8 vlan_offset_byte;
@@ -3720,7 +3904,7 @@ static int hclge_set_port_vlan_filter(struct hnae3_handle *handle,
vlan_offset_byte = (vlan_id % 160) / 8;
vlan_offset_byte_val = 1 << (vlan_id % 8);
- req = (struct hclge_vlan_filter_pf_cfg *)desc.data;
+ req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
req->vlan_offset = vlan_offset_160;
req->vlan_cfg = is_kill;
req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
@@ -3782,7 +3966,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_config_max_frm_size *req;
+ struct hclge_config_max_frm_size_cmd *req;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
int ret;
@@ -3793,7 +3977,7 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
hdev->mps = new_mtu;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
- req = (struct hclge_config_max_frm_size *)desc.data;
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
req->max_frm_size = cpu_to_le16(new_mtu);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -3808,13 +3992,13 @@ static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
bool enable)
{
- struct hclge_reset_tqp_queue *req;
+ struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
- req = (struct hclge_reset_tqp_queue *)desc.data;
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
hnae_set_bit(req->reset_req, HCLGE_TQP_RESET_B, enable);
@@ -3830,13 +4014,13 @@ static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
{
- struct hclge_reset_tqp_queue *req;
+ struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
- req = (struct hclge_reset_tqp_queue *)desc.data;
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
@@ -4313,6 +4497,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_rss_indir_size = hclge_get_rss_indir_size,
.get_rss = hclge_get_rss,
.set_rss = hclge_set_rss,
+ .set_rss_tuple = hclge_set_rss_tuple,
+ .get_rss_tuple = hclge_get_rss_tuple,
.get_tc_size = hclge_get_tc_size,
.get_mac_addr = hclge_get_mac_addr,
.set_mac_addr = hclge_set_mac_addr,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 7c66c00e8a3e..a7c018c7b0ec 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -32,7 +32,7 @@
#define HCLGE_VECTOR_VF_OFFSET 0x100000
#define HCLGE_RSS_IND_TBL_SIZE 512
-#define HCLGE_RSS_SET_BITMAP_MSK 0xffff
+#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
#define HCLGE_RSS_KEY_SIZE 40
#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
@@ -41,6 +41,14 @@
#define HCLGE_RSS_CFG_TBL_NUM \
(HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
+#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+#define HCLGE_D_PORT_BIT BIT(0)
+#define HCLGE_S_PORT_BIT BIT(1)
+#define HCLGE_D_IP_BIT BIT(2)
+#define HCLGE_S_IP_BIT BIT(3)
+#define HCLGE_V_TAG_BIT BIT(4)
+
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
#define HCLGE_RSS_TC_SIZE_2 4
@@ -65,7 +73,7 @@
#define HCLGE_PHY_CSS_REG 17
#define HCLGE_PHY_MDIX_CTRL_S (5)
-#define HCLGE_PHY_MDIX_CTRL_M (3 << HCLGE_PHY_MDIX_CTRL_S)
+#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
#define HCLGE_PHY_MDIX_STATUS_B (6)
#define HCLGE_PHY_SPEED_DUP_RESOLVE_B (11)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 359ee670d1e1..1ae6eae82eb3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -283,6 +283,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
struct hclge_pg_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
+ u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
HCLGE_OPC_TM_PG_C_SHAPPING;
@@ -292,11 +293,13 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pg_id = pg_id;
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -337,6 +340,7 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
struct hclge_pri_shapping_cmd *shap_cfg_cmd;
enum hclge_opcode_type opcode;
struct hclge_desc desc;
+ u32 shapping_para = 0;
opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
HCLGE_OPC_TM_PRI_C_SHAPPING;
@@ -347,11 +351,13 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
shap_cfg_cmd->pri_id = pri_id;
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
- hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
index 9832172bfb08..925619a7c50a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_dcbnl.c
@@ -13,8 +13,7 @@
static
int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(ndev);
if (h->kinfo.dcb_ops->ieee_getets)
return h->kinfo.dcb_ops->ieee_getets(h, ets);
@@ -25,8 +24,7 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
static
int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(ndev);
if (h->kinfo.dcb_ops->ieee_setets)
return h->kinfo.dcb_ops->ieee_setets(h, ets);
@@ -37,8 +35,7 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
static
int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(ndev);
if (h->kinfo.dcb_ops->ieee_getpfc)
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
@@ -49,8 +46,7 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
static
int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(ndev);
if (h->kinfo.dcb_ops->ieee_setpfc)
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
@@ -61,8 +57,7 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
/* DCBX configuration */
static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(ndev);
if (h->kinfo.dcb_ops->getdcbx)
return h->kinfo.dcb_ops->getdcbx(h);
@@ -73,8 +68,7 @@ static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
/* return 0 if successful, otherwise fail */
static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode)
{
- struct hns3_nic_priv *priv = netdev_priv(ndev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(ndev);
if (h->kinfo.dcb_ops->setdcbx)
return h->kinfo.dcb_ops->setdcbx(h, mode);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
index c31506514e5d..ba550c1b5b01 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
@@ -24,7 +24,7 @@
#include "hnae3.h"
#include "hns3_enet.h"
-const char hns3_driver_name[] = "hns3";
+static const char hns3_driver_name[] = "hns3";
const char hns3_driver_version[] = VERMAGIC_STRING;
static const char hns3_driver_string[] =
"Hisilicon Ethernet Network Driver for Hip08 Family";
@@ -198,8 +198,7 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
static int hns3_nic_set_real_num_queue(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
int ret;
@@ -305,24 +304,10 @@ static int hns3_nic_net_stop(struct net_device *netdev)
return 0;
}
-void hns3_set_multicast_list(struct net_device *netdev)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
- struct netdev_hw_addr *ha = NULL;
-
- if (h->ae_algo->ops->set_mc_addr) {
- netdev_for_each_mc_addr(ha, netdev)
- if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
- netdev_err(netdev, "set multicast fail\n");
- }
-}
-
static int hns3_nic_uc_sync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->add_uc_addr)
return h->ae_algo->ops->add_uc_addr(h, addr);
@@ -333,8 +318,7 @@ static int hns3_nic_uc_sync(struct net_device *netdev,
static int hns3_nic_uc_unsync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->rm_uc_addr)
return h->ae_algo->ops->rm_uc_addr(h, addr);
@@ -345,8 +329,7 @@ static int hns3_nic_uc_unsync(struct net_device *netdev,
static int hns3_nic_mc_sync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->add_mc_addr)
return h->ae_algo->ops->add_mc_addr(h, addr);
@@ -357,8 +340,7 @@ static int hns3_nic_mc_sync(struct net_device *netdev,
static int hns3_nic_mc_unsync(struct net_device *netdev,
const unsigned char *addr)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->rm_mc_addr)
return h->ae_algo->ops->rm_mc_addr(h, addr);
@@ -366,10 +348,9 @@ static int hns3_nic_mc_unsync(struct net_device *netdev,
return 0;
}
-void hns3_nic_set_rx_mode(struct net_device *netdev)
+static void hns3_nic_set_rx_mode(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo->ops->set_promisc_mode) {
if (netdev->flags & IFF_PROMISC)
@@ -768,7 +749,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
if (type == DESC_TYPE_SKB) {
skb = (struct sk_buff *)priv;
- paylen = cpu_to_le16(skb->len);
+ paylen = skb->len;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
skb_reset_mac_len(skb);
@@ -802,7 +783,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
cpu_to_le32(ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len =
cpu_to_le32(type_cs_vlan_tso);
- desc->tx.paylen = cpu_to_le16(paylen);
+ desc->tx.paylen = cpu_to_le32(paylen);
desc->tx.mss = cpu_to_le16(mss);
}
@@ -1025,8 +1006,7 @@ out_net_tx_busy:
static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
struct sockaddr *mac_addr = p;
int ret;
@@ -1208,8 +1188,7 @@ static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
static int hns3_setup_tc(struct net_device *netdev, u8 tc)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo = &h->kinfo;
unsigned int i;
int ret;
@@ -1259,8 +1238,7 @@ static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
static int hns3_vlan_rx_add_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
@@ -1272,8 +1250,7 @@ static int hns3_vlan_rx_add_vid(struct net_device *netdev,
static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vlan_filter)
@@ -1285,8 +1262,7 @@ static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
u8 qos, __be16 vlan_proto)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
int ret = -EIO;
if (h->ae_algo->ops->set_vf_vlan_filter)
@@ -1298,8 +1274,7 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
bool if_running = netif_running(netdev);
int ret;
@@ -2609,7 +2584,7 @@ static void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->next_to_use = 0;
}
-int hns3_buf_size2type(u32 buf_size)
+static int hns3_buf_size2type(u32 buf_size)
{
int bd_size_type;
@@ -2662,7 +2637,7 @@ static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
}
}
-static int hns3_init_all_ring(struct hns3_nic_priv *priv)
+int hns3_init_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int ring_num = h->kinfo.num_tqps * 2;
@@ -2686,12 +2661,12 @@ static int hns3_init_all_ring(struct hns3_nic_priv *priv)
out_when_alloc_ring_memory:
for (j = i - 1; j >= 0; j--)
- hns3_fini_ring(priv->ring_data[i].ring);
+ hns3_fini_ring(priv->ring_data[j].ring);
return -ENOMEM;
}
-static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
int i;
@@ -2921,7 +2896,7 @@ err_out:
return ret;
}
-const struct hnae3_client_ops client_ops = {
+static const struct hnae3_client_ops client_ops = {
.init_instance = hns3_client_init,
.uninit_instance = hns3_client_uninit,
.link_status_change = hns3_link_status_change,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
index 481eada73e2d..66599890b4d4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h
@@ -76,6 +76,8 @@ enum hns3_nic_state {
#define HNS3_RING_NAME_LEN 16
#define HNS3_BUFFER_SIZE_2048 2048
#define HNS3_RING_MAX_PENDING 32768
+#define HNS3_RING_MIN_PENDING 8
+#define HNS3_RING_BD_MULTIPLE 8
#define HNS3_MAX_MTU 9728
#define HNS3_BD_SIZE_512_TYPE 0
@@ -587,9 +589,14 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
#define hns3_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
+#define hns3_get_handle(ndev) \
+ (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+
void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
+int hns3_init_all_ring(struct hns3_nic_priv *priv);
+int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
#ifdef CONFIG_HNS3_DCB
void hns3_dcbnl_setup(struct hnae3_handle *handle);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
index d636399232fb..9b36ce081f62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c
@@ -102,8 +102,7 @@ static void hns3_driv_to_eth_caps(u32 caps, struct ethtool_link_ksettings *cmd,
static int hns3_get_sset_count(struct net_device *netdev, int stringset)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
if (!ops->get_sset_count)
@@ -164,8 +163,7 @@ static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops = h->ae_algo->ops;
char *buff = (char *)data;
@@ -217,11 +215,10 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
* @stats: statistics info.
* @data: statistics data.
*/
-void hns3_get_stats(struct net_device *netdev, struct ethtool_stats *stats,
- u64 *data)
+static void hns3_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
u64 *p = data;
if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
@@ -262,10 +259,7 @@ static void hns3_get_drvinfo(struct net_device *netdev,
static u32 hns3_get_link(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h;
-
- h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_status)
return h->ae_algo->ops->get_status(h);
@@ -277,7 +271,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *param)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
- int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
@@ -289,8 +284,7 @@ static void hns3_get_ringparam(struct net_device *netdev,
static void hns3_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *param)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (h->ae_algo && h->ae_algo->ops && h->ae_algo->ops->get_pauseparam)
h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
@@ -300,8 +294,7 @@ static void hns3_get_pauseparam(struct net_device *netdev,
static int hns3_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
u32 supported_caps;
u32 advertised_caps;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
@@ -392,8 +385,7 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
static u32 hns3_get_rss_key_size(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_key_size)
@@ -404,8 +396,7 @@ static u32 hns3_get_rss_key_size(struct net_device *netdev)
static u32 hns3_get_rss_indir_size(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops ||
!h->ae_algo->ops->get_rss_indir_size)
@@ -417,8 +408,7 @@ static u32 hns3_get_rss_indir_size(struct net_device *netdev)
static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_rss)
return -EOPNOTSUPP;
@@ -429,8 +419,7 @@ static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
@@ -454,16 +443,17 @@ static int hns3_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_handle *h = hns3_get_handle(netdev);
if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->get_tc_size)
return -EOPNOTSUPP;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- cmd->data = h->ae_algo->ops->get_tc_size(h);
+ cmd->data = h->kinfo.num_tc * h->kinfo.rss_size;
break;
+ case ETHTOOL_GRXFH:
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
default:
return -EOPNOTSUPP;
}
@@ -471,15 +461,106 @@ static int hns3_get_rxnfc(struct net_device *netdev,
return 0;
}
+int hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, u32 new_desc_num)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.num_desc = new_desc_num;
+
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ priv->ring_data[i].ring->desc_num = new_desc_num;
+
+ return hns3_init_all_ring(priv);
+}
+
+int hns3_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ bool if_running = netif_running(ndev);
+ u32 old_desc_num, new_desc_num;
+ int ret;
+
+ if (param->rx_mini_pending || param->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (param->tx_pending != param->rx_pending) {
+ netdev_err(ndev,
+ "Descriptors of tx and rx must be equal");
+ return -EINVAL;
+ }
+
+ if (param->tx_pending > HNS3_RING_MAX_PENDING ||
+ param->tx_pending < HNS3_RING_MIN_PENDING) {
+ netdev_err(ndev,
+ "Descriptors requested (Tx/Rx: %d) out of range [%d-%d]\n",
+ param->tx_pending, HNS3_RING_MIN_PENDING,
+ HNS3_RING_MAX_PENDING);
+ return -EINVAL;
+ }
+
+ new_desc_num = param->tx_pending;
+
+ /* Hardware requires that its descriptors must be multiple of eight */
+ new_desc_num = ALIGN(new_desc_num, HNS3_RING_BD_MULTIPLE);
+ old_desc_num = h->kinfo.num_desc;
+ if (old_desc_num == new_desc_num)
+ return 0;
+
+ netdev_info(ndev,
+ "Changing descriptor count from %d to %d.\n",
+ old_desc_num, new_desc_num);
+
+ if (if_running)
+ dev_close(ndev);
+
+ ret = hns3_uninit_all_ring(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_change_all_ring_bd_num(priv, new_desc_num);
+ if (ret) {
+ ret = hns3_change_all_ring_bd_num(priv, old_desc_num);
+ if (ret) {
+ netdev_err(ndev,
+ "Revert to old bd num fail, ret=%d.\n", ret);
+ return ret;
+ }
+ }
+
+ if (if_running)
+ ret = dev_open(ndev);
+
+ return ret;
+}
+
+static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo || !h->ae_algo->ops || !h->ae_algo->ops->set_rss_tuple)
+ return -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops hns3_ethtool_ops = {
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
.get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
.get_pauseparam = hns3_get_pauseparam,
.get_strings = hns3_get_strings,
.get_ethtool_stats = hns3_get_stats,
.get_sset_count = hns3_get_sset_count,
.get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
.get_rxfh_key_size = hns3_get_rss_key_size,
.get_rxfh_indir_size = hns3_get_rss_indir_size,
.get_rxfh = hns3_get_rss,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0641c0098738..afb7ebe20b24 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -398,6 +398,7 @@
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
/* If this bit asserted, the driver should claim the interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 98e68888abb1..2311b31bdcac 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -94,10 +94,6 @@ struct e1000_info;
*/
#define E1000_CHECK_RESET_COUNT 25
-#define DEFAULT_RDTR 0
-#define DEFAULT_RADV 8
-#define BURST_RDTR 0x20
-#define BURST_RADV 0x20
#define PCICFG_DESC_RING_STATUS 0xe4
#define FLUSH_DESC_REQUIRED 0x100
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b322011ec282..f457c5703d0c 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
* Checks to see of the link status of the hardware has changed. If a
* change in link status has been detected, then we read the PHY registers
* to get the current speed/duplex if link exists.
+ *
+ * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
+ * up).
**/
s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
{
@@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* Change or Rx Sequence Error interrupt.
*/
if (!mac->get_link_status)
- return 0;
+ return 1;
/* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
@@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
* different link partner.
*/
ret_val = e1000e_config_fc_after_link_up(hw);
- if (ret_val)
+ if (ret_val) {
e_dbg("Error configuring flow control\n");
+ return ret_val;
+ }
- return ret_val;
+ return 1;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8436c5f2c3e8..bf8f38f76953 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1071,7 +1071,8 @@ next_desc:
}
static void e1000_put_txbuf(struct e1000_ring *tx_ring,
- struct e1000_buffer *buffer_info)
+ struct e1000_buffer *buffer_info,
+ bool drop)
{
struct e1000_adapter *adapter = tx_ring->adapter;
@@ -1085,7 +1086,10 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
buffer_info->dma = 0;
}
if (buffer_info->skb) {
- dev_kfree_skb_any(buffer_info->skb);
+ if (drop)
+ dev_kfree_skb_any(buffer_info->skb);
+ else
+ dev_consume_skb_any(buffer_info->skb);
buffer_info->skb = NULL;
}
buffer_info->time_stamp = 0;
@@ -1199,7 +1203,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
wmb(); /* force write prior to skb_tstamp_tx */
skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ dev_consume_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -1254,7 +1258,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
}
}
- e1000_put_txbuf(tx_ring, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info, false);
tx_desc->upper.data = 0;
i++;
@@ -1910,14 +1914,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 icr;
+ bool enable = true;
+
+ icr = er32(ICR);
+ if (icr & E1000_ICR_RXO) {
+ ew32(ICR, E1000_ICR_RXO);
+ enable = false;
+ /* napi poll will re-enable Other, make sure it runs */
+ if (napi_schedule_prep(&adapter->napi)) {
+ adapter->total_rx_bytes = 0;
+ adapter->total_rx_packets = 0;
+ __napi_schedule(&adapter->napi);
+ }
+ }
+ if (icr & E1000_ICR_LSC) {
+ ew32(ICR, E1000_ICR_LSC);
+ hw->mac.get_link_status = true;
+ /* guard against interrupt when we're going down */
+ if (!test_bit(__E1000_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ }
- hw->mac.get_link_status = true;
-
- /* guard against interrupt when we're going down */
- if (!test_bit(__E1000_DOWN, &adapter->state)) {
- mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ if (enable && !test_bit(__E1000_DOWN, &adapter->state))
ew32(IMS, E1000_IMS_OTHER);
- }
return IRQ_HANDLED;
}
@@ -2421,7 +2441,7 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
for (i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(tx_ring, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info, false);
}
netdev_reset_queue(adapter->netdev);
@@ -2687,7 +2707,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
napi_complete_done(napi, work_done);
if (!test_bit(__E1000_DOWN, &adapter->state)) {
if (adapter->msix_entries)
- ew32(IMS, adapter->rx_ring->ims_val);
+ ew32(IMS, adapter->rx_ring->ims_val |
+ E1000_IMS_OTHER);
else
e1000_irq_enable(adapter);
}
@@ -3004,8 +3025,8 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
hw->mac.ops.config_collision_dist(hw);
- /* SPT and CNP Si errata workaround to avoid data corruption */
- if (hw->mac.type >= e1000_pch_spt) {
+ /* SPT and KBL Si errata workaround to avoid data corruption */
+ if (hw->mac.type == e1000_pch_spt) {
u32 reg_val;
reg_val = er32(IOSFPC);
@@ -3013,7 +3034,9 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0));
- reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ;
+ /* SPT and KBL Si errata workaround to avoid Tx hang */
+ reg_val &= ~BIT(28);
+ reg_val |= BIT(29);
ew32(TARC(0), reg_val);
}
}
@@ -3223,14 +3246,6 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
*/
ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
-
- /* override the delay timers for enabling bursting, only if
- * the value was not set by the user via module options
- */
- if (adapter->rx_int_delay == DEFAULT_RDTR)
- adapter->rx_int_delay = BURST_RDTR;
- if (adapter->rx_abs_int_delay == DEFAULT_RADV)
- adapter->rx_abs_int_delay = BURST_RADV;
}
/* set the Receive Delay Timer Register */
@@ -4204,7 +4219,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
if (adapter->msix_entries)
- ew32(ICS, E1000_ICS_OTHER);
+ ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
else
ew32(ICS, E1000_ICS_LSC);
}
@@ -5074,14 +5089,14 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
/* get_link_status is set on LSC (link status) interrupt or
* Rx sequence error interrupt. get_link_status will stay
- * false until the check_for_link establishes link
+ * true until the check_for_link establishes link
* for copper adapters ONLY
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
if (hw->mac.get_link_status) {
ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
+ link_active = ret_val > 0;
} else {
link_active = true;
}
@@ -5092,14 +5107,14 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
break;
case e1000_media_type_internal_serdes:
ret_val = hw->mac.ops.check_for_link(hw);
- link_active = adapter->hw.mac.serdes_has_link;
+ link_active = hw->mac.serdes_has_link;
break;
default:
case e1000_media_type_unknown:
break;
}
- if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+ if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
(er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
e_info("Gigabit has been disabled, downgrading speed\n");
@@ -5614,7 +5629,7 @@ dma_error:
i += tx_ring->count;
i--;
buffer_info = &tx_ring->buffer_info[i];
- e1000_put_txbuf(tx_ring, buffer_info);
+ e1000_put_txbuf(tx_ring, buffer_info, true);
}
return 0;
@@ -7408,7 +7423,7 @@ static void e1000_remove(struct pci_dev *pdev)
if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
cancel_work_sync(&adapter->tx_hwtstamp_work);
if (adapter->tx_hwtstamp_skb) {
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ dev_consume_skb_any(adapter->tx_hwtstamp_skb);
adapter->tx_hwtstamp_skb = NULL;
}
}
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 6d8c39abee16..47da51864543 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -73,17 +73,25 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
+ * Burst variant is used as default if device has FLAG2_DMA_BURST.
+ *
* Valid Range: 0-65535
*/
E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR 0
+#define BURST_RDTR 0x20
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
+ * Burst variant is used as default if device has FLAG2_DMA_BURST.
+ *
* Valid Range: 0-65535
*/
E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV 8
+#define BURST_RADV 0x20
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
@@ -297,6 +305,9 @@ void e1000e_check_options(struct e1000_adapter *adapter)
.max = MAX_RXDELAY } }
};
+ if (adapter->flags2 & FLAG2_DMA_BURST)
+ opt.def = BURST_RDTR;
+
if (num_RxIntDelay > bd) {
adapter->rx_int_delay = RxIntDelay[bd];
e1000_validate_option(&adapter->rx_int_delay, &opt,
@@ -307,7 +318,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
}
/* Receive Absolute Interrupt Delay */
{
- static const struct e1000_option opt = {
+ static struct e1000_option opt = {
.type = range_option,
.name = "Receive Absolute Interrupt Delay",
.err = "using default of "
@@ -317,6 +328,9 @@ void e1000e_check_options(struct e1000_adapter *adapter)
.max = MAX_RXABSDELAY } }
};
+ if (adapter->flags2 & FLAG2_DMA_BURST)
+ opt.def = BURST_RADV;
+
if (num_RxAbsIntDelay > bd) {
adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index d78d47b41a71..86ff0969efb6 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
s32 ret_val = 0;
u16 i, phy_status;
+ *success = false;
for (i = 0; i < iterations; i++) {
/* Some PHYs require the MII_BMSR register to be read
* twice due to the link bit being sticky. No harm doing
@@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
- if (phy_status & BMSR_LSTATUS)
+ if (phy_status & BMSR_LSTATUS) {
+ *success = true;
break;
+ }
if (usec_interval >= 1000)
msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
- *success = (i < iterations);
-
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 439c63cb2a0c..8139b4ee1dc3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -350,7 +350,7 @@ struct i40e_pf {
u16 num_vmdq_vsis; /* num vmdq vsis this PF has set up */
u16 num_vmdq_qps; /* num queue pairs per vmdq pool */
u16 num_vmdq_msix; /* num queue vectors per vmdq pool */
- u16 num_req_vfs; /* num VFs requested for this VF */
+ u16 num_req_vfs; /* num VFs requested for this PF */
u16 num_vf_qps; /* num queue pairs per VF */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
@@ -403,55 +403,57 @@ struct i40e_pf {
struct timer_list service_timer;
struct work_struct service_task;
- u64 hw_features;
-#define I40E_HW_RSS_AQ_CAPABLE BIT_ULL(0)
-#define I40E_HW_128_QP_RSS_CAPABLE BIT_ULL(1)
-#define I40E_HW_ATR_EVICT_CAPABLE BIT_ULL(2)
-#define I40E_HW_WB_ON_ITR_CAPABLE BIT_ULL(3)
-#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(4)
-#define I40E_HW_NO_PCI_LINK_CHECK BIT_ULL(5)
-#define I40E_HW_100M_SGMII_CAPABLE BIT_ULL(6)
-#define I40E_HW_NO_DCB_SUPPORT BIT_ULL(7)
-#define I40E_HW_USE_SET_LLDP_MIB BIT_ULL(8)
-#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT_ULL(9)
-#define I40E_HW_PTP_L4_CAPABLE BIT_ULL(10)
-#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT_ULL(11)
-#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT_ULL(12)
-#define I40E_HW_HAVE_CRT_RETIMER BIT_ULL(13)
-#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT_ULL(14)
-#define I40E_HW_PHY_CONTROLS_LEDS BIT_ULL(15)
-#define I40E_HW_STOP_FW_LLDP BIT_ULL(16)
-#define I40E_HW_PORT_ID_VALID BIT_ULL(17)
-#define I40E_HW_RESTART_AUTONEG BIT_ULL(18)
-
- u64 flags;
-#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
-#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
-#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
-#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT_ULL(4)
-#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
-#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
-#define I40E_FLAG_IWARP_ENABLED BIT_ULL(10)
-#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
-#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
-#define I40E_FLAG_SRIOV_ENABLED BIT_ULL(19)
-#define I40E_FLAG_DCB_ENABLED BIT_ULL(20)
-#define I40E_FLAG_FD_SB_ENABLED BIT_ULL(21)
-#define I40E_FLAG_FD_ATR_ENABLED BIT_ULL(22)
-#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT_ULL(23)
-#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT_ULL(24)
-#define I40E_FLAG_PTP BIT_ULL(25)
-#define I40E_FLAG_MFP_ENABLED BIT_ULL(26)
-#define I40E_FLAG_UDP_FILTER_SYNC BIT_ULL(27)
-#define I40E_FLAG_DCB_CAPABLE BIT_ULL(29)
-#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37)
-#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39)
-#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
-#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
-#define I40E_FLAG_CLIENT_RESET BIT_ULL(54)
-#define I40E_FLAG_TEMP_LINK_POLLING BIT_ULL(55)
-#define I40E_FLAG_CLIENT_L2_CHANGE BIT_ULL(56)
-#define I40E_FLAG_LEGACY_RX BIT_ULL(58)
+ u32 hw_features;
+#define I40E_HW_RSS_AQ_CAPABLE BIT(0)
+#define I40E_HW_128_QP_RSS_CAPABLE BIT(1)
+#define I40E_HW_ATR_EVICT_CAPABLE BIT(2)
+#define I40E_HW_WB_ON_ITR_CAPABLE BIT(3)
+#define I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT(4)
+#define I40E_HW_NO_PCI_LINK_CHECK BIT(5)
+#define I40E_HW_100M_SGMII_CAPABLE BIT(6)
+#define I40E_HW_NO_DCB_SUPPORT BIT(7)
+#define I40E_HW_USE_SET_LLDP_MIB BIT(8)
+#define I40E_HW_GENEVE_OFFLOAD_CAPABLE BIT(9)
+#define I40E_HW_PTP_L4_CAPABLE BIT(10)
+#define I40E_HW_WOL_MC_MAGIC_PKT_WAKE BIT(11)
+#define I40E_HW_MPLS_HDR_OFFLOAD_CAPABLE BIT(12)
+#define I40E_HW_HAVE_CRT_RETIMER BIT(13)
+#define I40E_HW_OUTER_UDP_CSUM_CAPABLE BIT(14)
+#define I40E_HW_PHY_CONTROLS_LEDS BIT(15)
+#define I40E_HW_STOP_FW_LLDP BIT(16)
+#define I40E_HW_PORT_ID_VALID BIT(17)
+#define I40E_HW_RESTART_AUTONEG BIT(18)
+
+ u32 flags;
+#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
+#define I40E_FLAG_MSI_ENABLED BIT(1)
+#define I40E_FLAG_MSIX_ENABLED BIT(2)
+#define I40E_FLAG_RSS_ENABLED BIT(3)
+#define I40E_FLAG_VMDQ_ENABLED BIT(4)
+#define I40E_FLAG_FILTER_SYNC BIT(5)
+#define I40E_FLAG_SRIOV_ENABLED BIT(6)
+#define I40E_FLAG_DCB_CAPABLE BIT(7)
+#define I40E_FLAG_DCB_ENABLED BIT(8)
+#define I40E_FLAG_FD_SB_ENABLED BIT(9)
+#define I40E_FLAG_FD_ATR_ENABLED BIT(10)
+#define I40E_FLAG_FD_SB_AUTO_DISABLED BIT(11)
+#define I40E_FLAG_FD_ATR_AUTO_DISABLED BIT(12)
+#define I40E_FLAG_MFP_ENABLED BIT(13)
+#define I40E_FLAG_UDP_FILTER_SYNC BIT(14)
+#define I40E_FLAG_HW_ATR_EVICT_ENABLED BIT(15)
+#define I40E_FLAG_VEB_MODE_ENABLED BIT(16)
+#define I40E_FLAG_VEB_STATS_ENABLED BIT(17)
+#define I40E_FLAG_LINK_POLLING_ENABLED BIT(18)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT(19)
+#define I40E_FLAG_TEMP_LINK_POLLING BIT(20)
+#define I40E_FLAG_LEGACY_RX BIT(21)
+#define I40E_FLAG_PTP BIT(22)
+#define I40E_FLAG_IWARP_ENABLED BIT(23)
+#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT(24)
+#define I40E_FLAG_CLIENT_L2_CHANGE BIT(25)
+#define I40E_FLAG_CLIENT_RESET BIT(26)
+#define I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED BIT(27)
+#define I40E_FLAG_SOURCE_PRUNING_DISABLED BIT(28)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
@@ -947,9 +949,6 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
struct i40e_hw *hw = &pf->hw;
u32 val;
- /* definitely clear the PBA here, as this function is meant to
- * clean out all previous interrupts AND enable the interrupt
- */
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
@@ -958,7 +957,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
}
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
int i40e_open(struct net_device *netdev);
int i40e_close(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 4c85ea9cd89a..a8f65aed5421 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1771,9 +1771,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
I40E_PHY_TYPE_DEFAULT = 0xFF,
- I40E_PHY_TYPE_MAX
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 60542beda7ad..53aad378d49c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1567,30 +1567,46 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
struct i40e_aq_desc desc;
i40e_status status;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+ u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
if (!abilities)
return I40E_ERR_PARAM;
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_get_phy_abilities);
+ do {
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_phy_abilities);
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- if (abilities_size > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+ if (abilities_size > I40E_AQ_LARGE_BUF)
+ desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
- if (qualified_modules)
- desc.params.external.param0 |=
+ if (qualified_modules)
+ desc.params.external.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
- if (report_init)
- desc.params.external.param0 |=
+ if (report_init)
+ desc.params.external.param0 |=
cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
- status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
- cmd_details);
+ status = i40e_asq_send_command(hw, &desc, abilities,
+ abilities_size, cmd_details);
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
- status = I40E_ERR_UNKNOWN_PHY;
+ if (status)
+ break;
+
+ if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ status = I40E_ERR_UNKNOWN_PHY;
+ break;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ usleep_range(1000, 2000);
+ total_delay++;
+ status = I40E_ERR_TIMEOUT;
+ }
+ } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
+ (total_delay < max_delay));
+
+ if (status)
+ return status;
if (report_init) {
if (hw->mac.type == I40E_MAC_XL710 &&
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 8f326f87a815..6f2725fc50a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -278,8 +278,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->netdev,
rx_ring->rx_bi);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, rx_ring->state,
+ " rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *rx_ring->state,
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
@@ -334,8 +334,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->netdev,
tx_ring->tx_bi);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
- i, tx_ring->state,
+ " tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
+ i, *tx_ring->state,
tx_ring->queue_index,
tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 1136d02e2e95..afd3ca8d9851 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -227,6 +227,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
+ I40E_PRIV_FLAG("disable-source-pruning",
+ I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
};
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_gstrings_priv_flags)
@@ -2008,7 +2010,9 @@ static int i40e_set_phys_id(struct net_device *netdev,
if (!(pf->hw_features & I40E_HW_PHY_CONTROLS_LEDS)) {
pf->led_status = i40e_led_get(hw);
} else {
- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL,
+ NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
@@ -2033,7 +2037,8 @@ static int i40e_set_phys_id(struct net_device *netdev,
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
I40E_PHY_LED_MODE_ORIG));
- i40e_aq_set_phy_debug(hw, 0, NULL);
+ if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE))
+ i40e_aq_set_phy_debug(hw, 0, NULL);
}
break;
default:
@@ -4090,7 +4095,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- u64 orig_flags, new_flags, changed_flags;
+ u32 orig_flags, new_flags, changed_flags;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -4142,12 +4147,12 @@ flags_complete:
return -EOPNOTSUPP;
/* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg64 returns anything but the old value, this means that
+ * is if cmpxchg returns anything but the old value, this means that
* something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the
* message buffer.
*/
- if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
+ if (cmpxchg(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev,
"Unable to update pf->flags as it was modified by another thread...\n");
return -EAGAIN;
@@ -4189,8 +4194,9 @@ flags_complete:
/* Issue reset to cause things to take effect, as additional bits
* are added we will need to create a mask of bits requiring reset
*/
- if ((changed_flags & I40E_FLAG_VEB_STATS_ENABLED) ||
- ((changed_flags & I40E_FLAG_LEGACY_RX) && netif_running(dev)))
+ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+ I40E_FLAG_LEGACY_RX |
+ I40E_FLAG_SOURCE_PRUNING_DISABLED))
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 3f9e89b054ec..4de52001a2b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1776,11 +1776,6 @@ static void i40e_set_rx_mode(struct net_device *netdev)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
-
- /* schedule our worker thread which will take care of
- * applying the new filter changes
- */
- i40e_service_event_schedule(vsi->back);
}
/**
@@ -2884,22 +2879,18 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
**/
static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
{
- struct i40e_vsi *vsi = ring->vsi;
+ int cpu;
if (!ring->q_vector || !ring->netdev)
return;
- if ((vsi->tc_config.numtc <= 1) &&
- !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
- netif_set_xps_queue(ring->netdev,
- get_cpu_mask(ring->q_vector->v_idx),
- ring->queue_index);
- }
+ /* We only initialize XPS once, so as not to overwrite user settings */
+ if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state))
+ return;
- /* schedule our worker thread which will take care of
- * applying the new filter changes
- */
- i40e_service_event_schedule(vsi->back);
+ cpu = cpumask_local_spread(ring->q_vector->v_idx, -1);
+ netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu),
+ ring->queue_index);
}
/**
@@ -3009,7 +3000,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
struct i40e_hmc_obj_rxq rx_ctx;
i40e_status err = 0;
- ring->state = 0;
+ bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
/* clear the context structure first */
memset(&rx_ctx, 0, sizeof(rx_ctx));
@@ -3034,7 +3025,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
if (hw->revision_id == 0)
rx_ctx.lrxqthresh = 0;
else
- rx_ctx.lrxqthresh = 2;
+ rx_ctx.lrxqthresh = 1;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
/* this controls whether VLAN is stripped from inner headers */
@@ -3407,15 +3398,14 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
/**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure
- * @clearpba: true when all pending interrupt events should be cleared
**/
-void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
+void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
u32 val;
val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, val);
@@ -3482,6 +3472,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
int tx_int_idx = 0;
int vector, err;
int irq_num;
+ int cpu;
for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
@@ -3517,10 +3508,14 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
q_vector->affinity_notify.notify = i40e_irq_affinity_notify;
q_vector->affinity_notify.release = i40e_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* get_cpu_mask returns a static constant mask with
- * a permanent lifetime so it's ok to use here.
+ /* Spread affinity hints out across online CPUs.
+ *
+ * get_cpu_mask returns a static constant mask with
+ * a permanent lifetime so it's ok to pass to
+ * irq_set_affinity_hint without making a copy.
*/
- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@@ -3596,7 +3591,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i);
} else {
- i40e_irq_dynamic_enable_icr0(pf, true);
+ i40e_irq_dynamic_enable_icr0(pf);
}
i40e_flush(&pf->hw);
@@ -3745,7 +3740,7 @@ enable_intr:
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, pf->state)) {
i40e_service_event_schedule(pf);
- i40e_irq_dynamic_enable_icr0(pf, false);
+ i40e_irq_dynamic_enable_icr0(pf);
}
return ret;
@@ -6231,6 +6226,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
hlist_del(&filter->fdir_node);
kfree(filter);
pf->fdir_pf_active_filters--;
+ pf->fd_inv = 0;
}
}
}
@@ -6557,12 +6553,26 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
*/
i40e_link_event(pf);
- /* check for unqualified module, if link is down */
- if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
- (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
- (!(status->link_info & I40E_AQ_LINK_UP)))
+ /* Check if module meets thermal requirements */
+ if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) {
+ dev_err(&pf->pdev->dev,
+ "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
dev_err(&pf->pdev->dev,
- "The driver failed to link because an unqualified module was detected.\n");
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ } else {
+ /* check for unqualified module, if link is down, suppress
+ * the message if link was forced to be down.
+ */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)) &&
+ (!(pf->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED))) {
+ dev_err(&pf->pdev->dev,
+ "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
+ dev_err(&pf->pdev->dev,
+ "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ }
+ }
}
/**
@@ -7678,7 +7688,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
/**
* i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
- * @type: VSI pointer
+ * @vsi: VSI pointer
* @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
*
* On error: returns error code (negative)
@@ -8439,7 +8449,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_flush(hw);
- i40e_irq_dynamic_enable_icr0(pf, true);
+ i40e_irq_dynamic_enable_icr0(pf);
return err;
}
@@ -8967,8 +8977,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MSIX_ENABLED;
/* Set default ITR */
- pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
- pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+ pf->rx_itr_default = I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_TX_DEF;
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
@@ -9068,6 +9078,11 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.aq.fw_maj_ver >= 5)))
pf->hw_features |= I40E_HW_USE_SET_LLDP_MIB;
+ /* Enable PTP L4 if FW > v6.0 */
+ if (pf->hw.mac.type == I40E_MAC_XL710 &&
+ pf->hw.aq.fw_maj_ver >= 6)
+ pf->hw_features |= I40E_HW_PTP_L4_CAPABLE;
+
if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
pf->flags |= I40E_FLAG_VMDQ_ENABLED;
@@ -9903,6 +9918,31 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
enabled_tc = i40e_pf_get_tc_map(pf);
+ /* Source pruning is enabled by default, so the flag is
+ * negative logic - if it's set, we need to fiddle with
+ * the VSI to disable source pruning.
+ */
+ if (pf->flags & I40E_FLAG_SOURCE_PRUNING_DISABLED) {
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->main_vsi_seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.vf_num = 0;
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ ctxt.info.switch_id =
+ cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "update vsi failed, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ ret = -ENOENT;
+ goto err;
+ }
+ }
+
/* MFP mode setup queue map and update VSI */
if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
!(pf->hw.func_caps.iscsi)) { /* NIC type PF */
@@ -12000,6 +12040,28 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
}
/**
+ * i40e_pci_error_reset_prepare - prepare device driver for pci reset
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_prepare(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ i40e_prep_for_reset(pf, false);
+}
+
+/**
+ * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void i40e_pci_error_reset_done(struct pci_dev *pdev)
+{
+ struct i40e_pf *pf = pci_get_drvdata(pdev);
+
+ i40e_reset_and_rebuild(pf, false, false);
+}
+
+/**
* i40e_pci_error_resume - restart operations after PCI error recovery
* @pdev: PCI device information struct
*
@@ -12189,6 +12251,8 @@ static int i40e_resume(struct device *dev)
static const struct pci_error_handlers i40e_err_handler = {
.error_detected = i40e_pci_error_detected,
.slot_reset = i40e_pci_error_slot_reset,
+ .reset_prepare = i40e_pci_error_reset_prepare,
+ .reset_done = i40e_pci_error_reset_done,
.resume = i40e_pci_error_resume,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 57505b1df98d..151d9cfb6ea4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -311,13 +311,10 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
u16 offset, u16 *data)
{
- i40e_status ret_code = 0;
-
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_word_aq(hw, offset, data);
- else
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
- return ret_code;
+ return i40e_read_nvm_word_aq(hw, offset, data);
+
+ return i40e_read_nvm_word_srctl(hw, offset, data);
}
/**
@@ -331,7 +328,7 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- i40e_status ret_code = 0;
+ i40e_status ret_code;
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
@@ -446,13 +443,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
u16 offset, u16 *words,
u16 *data)
{
- i40e_status ret_code = 0;
-
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
- ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
- else
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
- return ret_code;
+ return i40e_read_nvm_buffer_aq(hw, offset, words, data);
+
+ return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 86ca27f72f02..c234758dad15 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -2794,7 +2794,7 @@
#define I40E_GLV_RUPP_MAX_INDEX 383
#define I40E_GLV_RUPP_RUPP_SHIFT 0
#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
+#define I40E_GLV_TEPC(_i) (0x00344000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
#define I40E_GLV_TEPC_MAX_INDEX 383
#define I40E_GLV_TEPC_TEPC_SHIFT 0
#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d9fdf69bbc6e..a23306f04e00 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -1372,6 +1372,15 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
+ /* Hardware only fetches new descriptors in cache lines of 8,
+ * essentially ignoring the lower 3 bits of the tail register. We want
+ * to ensure our tail writes are aligned to avoid unnecessary work. We
+ * can't simply round down the cleaned count, since we might fail to
+ * allocate some buffers. What we really want is to ensure that
+ * next_to_used + cleaned_count produces an aligned value.
+ */
+ cleaned_count -= (ntu + cleaned_count) & 0x7;
+
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
@@ -2202,9 +2211,7 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- /* Don't clear PBA because that can cause lost interrupts that
- * came in while we were cleaning/polling
- */
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
@@ -2241,7 +2248,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
- i40e_irq_dynamic_enable_icr0(vsi->back, false);
+ i40e_irq_dynamic_enable_icr0(vsi->back);
return;
}
@@ -3167,38 +3174,12 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* write last descriptor with EOP bit */
td_cmd |= I40E_TX_DESC_CMD_EOP;
- /* We can OR these values together as they both are checked against
- * 4 below and at this point desc_count will be used as a boolean value
- * after this if/else block.
+ /* We OR these values together to check both against 4 (WB_STRIDE)
+ * below. This is safe since we don't re-use desc_count afterwards.
*/
desc_count |= ++tx_ring->packet_stride;
- /* Algorithm to optimize tail and RS bit setting:
- * if queue is stopped
- * mark RS bit
- * reset packet counter
- * else if xmit_more is supported and is true
- * advance packet counter to 4
- * reset desc_count to 0
- *
- * if desc_count >= 4
- * mark RS bit
- * reset packet counter
- * if desc_count > 0
- * update tail
- *
- * Note: If there are less than 4 descriptors
- * pending and interrupts were disabled the service task will
- * trigger a force WB.
- */
- if (netif_xmit_stopped(txring_txq(tx_ring))) {
- goto do_rs;
- } else if (skb->xmit_more) {
- /* set stride to arm on next packet and reset desc_count */
- tx_ring->packet_stride = WB_STRIDE;
- desc_count = 0;
- } else if (desc_count >= WB_STRIDE) {
-do_rs:
+ if (desc_count >= WB_STRIDE) {
/* write last descriptor with RS bit set */
td_cmd |= I40E_TX_DESC_CMD_RS;
tx_ring->packet_stride = 0;
@@ -3219,7 +3200,7 @@ do_rs:
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (desc_count) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 2f848bc5e391..ff57ae451524 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -38,8 +38,10 @@
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF I40E_ITR_20K
-#define I40E_ITR_TX_DEF I40E_ITR_20K
+#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -206,7 +208,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
#define I40E_RX_INCREMENT(r, i) \
do { \
(i)++; \
@@ -342,6 +344,7 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
+ __I40E_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -366,7 +369,7 @@ struct i40e_ring {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
};
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 4b32b1d38a66..0410fcbdbb94 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -46,6 +46,9 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) ((time) * 1000)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 04568137e029..0c4fa225c7be 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -273,7 +273,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
struct i40e_hw *hw = &pf->hw;
u16 vsi_queue_id, pf_queue_id;
enum i40e_queue_type qtype;
- u16 next_q, vector_id;
+ u16 next_q, vector_id, size;
u32 reg, reg_idx;
u16 itr_idx = 0;
@@ -303,9 +303,11 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
vsi_queue_id + 1));
}
- next_q = find_first_bit(&linklistmap,
- (I40E_MAX_VSI_QP *
- I40E_VIRTCHNL_SUPPORTED_QTYPES));
+ size = I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES;
+ next_q = find_first_bit(&linklistmap, size);
+ if (unlikely(next_q == size))
+ goto irq_list_done;
+
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
@@ -313,7 +315,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
wr32(hw, reg_idx, reg);
- while (next_q < (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ while (next_q < size) {
switch (qtype) {
case I40E_QUEUE_TYPE_RX:
reg_idx = I40E_QINT_RQCTL(pf_queue_id);
@@ -327,12 +329,8 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
break;
}
- next_q = find_next_bit(&linklistmap,
- (I40E_MAX_VSI_QP *
- I40E_VIRTCHNL_SUPPORTED_QTYPES),
- next_q + 1);
- if (next_q <
- (I40E_MAX_VSI_QP * I40E_VIRTCHNL_SUPPORTED_QTYPES)) {
+ next_q = find_next_bit(&linklistmap, size, next_q + 1);
+ if (next_q < size) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
@@ -639,7 +637,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
rx_ctx.dsize = 1;
/* default values */
- rx_ctx.lrxqthresh = 2;
+ rx_ctx.lrxqthresh = 1;
rx_ctx.crcstrip = 1;
rx_ctx.prefena = 1;
rx_ctx.l2tsel = 1;
@@ -1358,7 +1356,7 @@ err_alloc:
i40e_free_vfs(pf);
err_iov:
/* Re-enable interrupt 0. */
- i40e_irq_dynamic_enable_icr0(pf, false);
+ i40e_irq_dynamic_enable_icr0(pf);
return ret;
}
@@ -2883,6 +2881,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
struct i40e_mac_filter *f;
struct i40e_vf *vf;
int ret = 0;
+ struct hlist_node *h;
int bkt;
/* validate the request */
@@ -2921,7 +2920,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* Delete all the filters for this VSI - we're going to kill it
* anyway.
*/
- hash_for_each(vsi->mac_filter_hash, bkt, f, hlist)
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
__i40e_del_filter(vsi, f);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index ed5602f4bbcd..60c892f559b9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1767,9 +1767,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_25GBASE_CR = 0x20,
I40E_PHY_TYPE_25GBASE_SR = 0x21,
I40E_PHY_TYPE_25GBASE_LR = 0x22,
+ I40E_PHY_TYPE_MAX,
+ I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD,
I40E_PHY_TYPE_EMPTY = 0xFE,
I40E_PHY_TYPE_DEFAULT = 0xFF,
- I40E_PHY_TYPE_MAX
};
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 37e1de886d48..6806ada11490 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -711,6 +711,15 @@ bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
+ /* Hardware only fetches new descriptors in cache lines of 8,
+ * essentially ignoring the lower 3 bits of the tail register. We want
+ * to ensure our tail writes are aligned to avoid unnecessary work. We
+ * can't simply round down the cleaned count, since we might fail to
+ * allocate some buffers. What we really want is to ensure that
+ * next_to_used + cleaned_count produces an aligned value.
+ */
+ cleaned_count -= (ntu + cleaned_count) & 0x7;
+
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
@@ -1409,9 +1418,7 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val;
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- /* Don't clear PBA because that can cause lost interrupts that
- * came in while we were cleaning/polling
- */
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
(type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
(itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0d9f98bc07bd..8d26c85d12e1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -38,8 +38,10 @@
#define I40E_ITR_8K 0x003E
#define I40E_ITR_4K 0x007A
#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF I40E_ITR_20K
-#define I40E_ITR_TX_DEF I40E_ITR_20K
+#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
+ I40E_ITR_DYNAMIC)
#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
@@ -189,7 +191,7 @@ static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
-#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+#define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */
#define I40E_RX_INCREMENT(r, i) \
do { \
(i)++; \
@@ -325,6 +327,7 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
+ __I40E_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -348,7 +351,7 @@ struct i40e_ring {
struct i40e_tx_buffer *tx_bi;
struct i40e_rx_buffer *rx_bi;
};
- unsigned long state;
+ DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS);
u16 queue_index; /* Queue number of ring */
u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 9364b67fff9c..213b773dfad6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -46,6 +46,9 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
+/* Max timeout in ms for the phy to respond */
+#define I40E_MAX_PHY_TIMEOUT 500
+
/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
#define I40E_MS_TO_GTIME(time) ((time) * 1000)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 5982362c5643..de0af521d602 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -222,22 +222,22 @@ struct i40evf_adapter {
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
-#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
-#define I40EVF_FLAG_PF_COMMS_FAILED BIT(8)
-#define I40EVF_FLAG_RESET_PENDING BIT(9)
-#define I40EVF_FLAG_RESET_NEEDED BIT(10)
-#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
-#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
-#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
-#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(14)
-#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(15)
-#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(16)
-#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(17)
-#define I40EVF_FLAG_PROMISC_ON BIT(18)
-#define I40EVF_FLAG_ALLMULTI_ON BIT(19)
-#define I40EVF_FLAG_LEGACY_RX BIT(20)
-#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(21)
+#define I40EVF_FLAG_IMIR_ENABLED BIT(1)
+#define I40EVF_FLAG_MQ_CAPABLE BIT(2)
+#define I40EVF_FLAG_PF_COMMS_FAILED BIT(3)
+#define I40EVF_FLAG_RESET_PENDING BIT(4)
+#define I40EVF_FLAG_RESET_NEEDED BIT(5)
+#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
+#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(7)
+#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(8)
+#define I40EVF_FLAG_SERVICE_CLIENT_REQUESTED BIT(9)
+#define I40EVF_FLAG_CLIENT_NEEDS_OPEN BIT(10)
+#define I40EVF_FLAG_CLIENT_NEEDS_CLOSE BIT(11)
+#define I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS BIT(12)
+#define I40EVF_FLAG_PROMISC_ON BIT(13)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(14)
+#define I40EVF_FLAG_LEGACY_RX BIT(15)
+#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16)
/* duplicates for common code */
#define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f2f1e754c2ce..5bcbd46e2f6c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -515,6 +515,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
unsigned int vector, q_vectors;
unsigned int rx_int_idx = 0, tx_int_idx = 0;
int irq_num, err;
+ int cpu;
i40evf_irq_disable(adapter);
/* Decrement for Other and TCP Timer vectors */
@@ -553,10 +554,12 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vector->affinity_notify.release =
i40evf_irq_affinity_release;
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
- /* get_cpu_mask returns a static constant mask with
- * a permanent lifetime so it's ok to use here.
+ /* Spread the IRQ affinity hints across online CPUs. Note that
+ * get_cpu_mask returns a mask with a permanent lifetime so
+ * it's safe to use as a hint for irq_set_affinity_hint.
*/
- irq_set_affinity_hint(irq_num, get_cpu_mask(q_vector->v_idx));
+ cpu = cpumask_local_spread(q_vector->v_idx, -1);
+ irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -877,6 +880,8 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+ } else {
+ f->remove = false;
}
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
@@ -1218,7 +1223,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
- tx_ring->tx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF);
+ tx_ring->tx_itr_setting = I40E_ITR_TX_DEF;
if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
@@ -1227,7 +1232,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
- rx_ring->rx_itr_setting = (I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF);
+ rx_ring->rx_itr_setting = I40E_ITR_RX_DEF;
}
adapter->num_active_queues = num_active_queues;
@@ -2420,10 +2425,6 @@ out_err:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
-#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
- NETIF_F_HW_VLAN_CTAG_RX |\
- NETIF_F_HW_VLAN_CTAG_FILTER)
-
/**
* i40evf_fix_features - fix up the netdev feature bits
* @netdev: our net device
@@ -2436,9 +2437,11 @@ static netdev_features_t i40evf_fix_features(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- features &= ~I40EVF_VLAN_FEATURES;
- if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
- features |= I40EVF_VLAN_FEATURES;
+ if (!(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
+ features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+
return features;
}
@@ -2569,9 +2572,17 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
*/
hw_features = hw_enc_features;
+ /* Enable VLAN features if supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX);
+
netdev->hw_features |= hw_features;
- netdev->features |= hw_features | I40EVF_VLAN_FEATURES;
+ netdev->features |= hw_features;
+
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
adapter->vsi.id = adapter->vsi_res->vsi_id;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index fd4a46b03cc8..837d9b46a390 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3162,6 +3162,8 @@ static int igb_sw_init(struct igb_adapter *adapter)
/* Setup and initialize a copy of the hw vlan table array */
adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
GFP_ATOMIC);
+ if (!adapter->shadow_vfta)
+ return -ENOMEM;
/* This call may decrease the number of queues */
if (igb_init_interrupt_scheme(adapter, true)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index dd5578756ae0..468c3555a629 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -275,6 +275,7 @@ struct ixgbe_rx_queue_stats {
u64 rsc_count;
u64 rsc_flush;
u64 non_eop_descs;
+ u64 alloc_rx_page;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 csum_err;
@@ -434,8 +435,15 @@ static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
}
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
+#define IXGBE_ITR_ADAPTIVE_MIN_INC 2
+#define IXGBE_ITR_ADAPTIVE_MIN_USECS 10
+#define IXGBE_ITR_ADAPTIVE_MAX_USECS 126
+#define IXGBE_ITR_ADAPTIVE_LATENCY 0x80
+#define IXGBE_ITR_ADAPTIVE_BULK 0x00
+
struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */
+ unsigned long next_update; /* jiffies value of last update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
@@ -655,6 +663,7 @@ struct ixgbe_adapter {
u64 rsc_total_count;
u64 rsc_total_flush;
u64 non_eop_descs;
+ u32 alloc_rx_page;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 523f9d05a810..8a32eb7d47b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
**/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
-#ifndef CONFIG_SPARC
- u32 regval;
- u32 i;
-#endif
s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw);
-
-#ifndef CONFIG_SPARC
- /* Disable relaxed ordering */
- for (i = 0; ((i < hw->mac.max_tx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
- }
-
- for (i = 0; ((i < hw->mac.max_rx_queues) &&
- (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
if (ret_val)
return ret_val;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 2c19070d2a0b..9bef255f6a18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
-#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
- /* Disable relaxed ordering */
- for (i = 0; i < hw->mac.max_tx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
- regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
- }
-
- for (i = 0; i < hw->mac.max_rx_queues; i++) {
- u32 regval;
-
- regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
- regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
- IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
- IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
- }
-#endif
return 0;
}
@@ -3800,10 +3781,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.ver_build = build;
fw_cmd.ver_sub = sub;
fw_cmd.hdr.checksum = 0;
- fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
- (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
fw_cmd.pad = 0;
fw_cmd.pad2 = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
@@ -4100,8 +4081,8 @@ bool ixgbe_mng_present(struct ixgbe_hw *hw)
return false;
fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw));
- fwsm &= IXGBE_FWSM_MODE_MASK;
- return fwsm == IXGBE_FWSM_FW_MODE_PT;
+
+ return !!(fwsm & IXGBE_FWSM_FW_MODE_PT);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 72c565712a5f..0aad1c2a3667 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -104,6 +104,7 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
{"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
{"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
+ {"alloc_rx_page", IXGBE_STAT(alloc_rx_page)},
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
@@ -1048,7 +1049,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
- int i, err = 0;
+ int i, j, err = 0;
u32 new_rx_count, new_tx_count;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
@@ -1085,8 +1086,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
/* allocate temporary buffer to store rings in */
- i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
- i = max_t(int, i, adapter->num_xdp_queues);
+ i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues,
+ adapter->num_rx_queues);
temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
if (!temp_ring) {
@@ -1118,8 +1119,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- memcpy(&temp_ring[i], adapter->xdp_ring[i],
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ memcpy(&temp_ring[i], adapter->xdp_ring[j],
sizeof(struct ixgbe_ring));
temp_ring[i].count = new_tx_count;
@@ -1139,10 +1140,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
memcpy(adapter->tx_ring[i], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
- for (i = 0; i < adapter->num_xdp_queues; i++) {
- ixgbe_free_tx_resources(adapter->xdp_ring[i]);
+ for (j = 0; j < adapter->num_xdp_queues; j++, i++) {
+ ixgbe_free_tx_resources(adapter->xdp_ring[j]);
- memcpy(adapter->xdp_ring[i], &temp_ring[i],
+ memcpy(adapter->xdp_ring[j], &temp_ring[i],
sizeof(struct ixgbe_ring));
}
@@ -1916,8 +1917,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
unsigned int size)
{
union ixgbe_adv_rx_desc *rx_desc;
- struct ixgbe_rx_buffer *rx_buffer;
- struct ixgbe_tx_buffer *tx_buffer;
u16 rx_ntc, tx_ntc, count = 0;
/* initialize next to clean and descriptor values */
@@ -1925,7 +1924,38 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
+ while (tx_ntc != tx_ring->next_to_use) {
+ union ixgbe_adv_tx_desc *tx_desc;
+ struct ixgbe_tx_buffer *tx_buffer;
+
+ tx_desc = IXGBE_TX_DESC(tx_ring, tx_ntc);
+
+ /* if DD is not set transmit has not completed */
+ if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
+ return count;
+
+ /* unmap buffer on Tx side */
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* increment Tx next to clean counter */
+ tx_ntc++;
+ if (tx_ntc == tx_ring->count)
+ tx_ntc = 0;
+ }
+
while (rx_desc->wb.upper.length) {
+ struct ixgbe_rx_buffer *rx_buffer;
+
/* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
@@ -1938,6 +1968,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* verify contents of skb */
if (ixgbe_check_lbtest_frame(rx_buffer, size))
count++;
+ else
+ break;
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
@@ -1945,26 +1977,10 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
- /* unmap buffer on Tx side */
- tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
-
- /* Free all the Tx ring sk_buffs */
- dev_kfree_skb_any(tx_buffer->skb);
-
- /* unmap skb header data */
- dma_unmap_single(tx_ring->dev,
- dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buffer, len, 0);
-
- /* increment Rx/Tx next to clean counters */
+ /* increment Rx next to clean counter */
rx_ntc++;
if (rx_ntc == rx_ring->count)
rx_ntc = 0;
- tx_ntc++;
- if (tx_ntc == tx_ring->count)
- tx_ntc = 0;
/* fetch next descriptor */
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index f1bfae0c41d0..8e2a957aca18 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -806,6 +806,7 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
ring->next = head->ring;
head->ring = ring;
head->count++;
+ head->next_update = jiffies + 1;
}
/**
@@ -879,8 +880,11 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
- /* initialize pointer to rings */
- ring = q_vector->ring;
+ /* Initialize setting for adaptive ITR */
+ q_vector->tx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
+ IXGBE_ITR_ADAPTIVE_LATENCY;
+ q_vector->rx.itr = IXGBE_ITR_ADAPTIVE_MAX_USECS |
+ IXGBE_ITR_ADAPTIVE_LATENCY;
/* intialize ITR */
if (txr_count && !rxr_count) {
@@ -897,6 +901,9 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
q_vector->itr = adapter->rx_itr_setting;
}
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3942c6208745..7683c14024aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1620,6 +1620,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
bi->page = page;
bi->page_offset = ixgbe_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
+ rx_ring->rx_stats.alloc_rx_page++;
return true;
}
@@ -2539,50 +2540,174 @@ enum latency_range {
static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring_container *ring_container)
{
- int bytes = ring_container->total_bytes;
- int packets = ring_container->total_packets;
- u32 timepassed_us;
- u64 bytes_perint;
- u8 itr_setting = ring_container->itr;
+ unsigned int itr = IXGBE_ITR_ADAPTIVE_MIN_USECS |
+ IXGBE_ITR_ADAPTIVE_LATENCY;
+ unsigned int avg_wire_size, packets, bytes;
+ unsigned long next_update = jiffies;
- if (packets == 0)
+ /* If we don't have any rings just leave ourselves set for maximum
+ * possible latency so we take ourselves out of the equation.
+ */
+ if (!ring_container->ring)
return;
- /* simple throttlerate management
- * 0-10MB/s lowest (100000 ints/s)
- * 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (12000 ints/s)
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
*/
- /* what was last interrupt timeslice? */
- timepassed_us = q_vector->itr >> 2;
- if (timepassed_us == 0)
- return;
+ if (time_after(next_update, ring_container->next_update))
+ goto clear_counts;
- bytes_perint = bytes / timepassed_us; /* bytes/usec */
+ packets = ring_container->total_packets;
- switch (itr_setting) {
- case lowest_latency:
- if (bytes_perint > 10)
- itr_setting = low_latency;
- break;
- case low_latency:
- if (bytes_perint > 20)
- itr_setting = bulk_latency;
- else if (bytes_perint <= 10)
- itr_setting = lowest_latency;
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * When this occurs just tick up our delay by the minimum value
+ * and hope that this extra delay will prevent us from being called
+ * without any work on our queue.
+ */
+ if (!packets) {
+ itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
+ if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
+ itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
+ itr += ring_container->itr & IXGBE_ITR_ADAPTIVE_LATENCY;
+ goto clear_counts;
+ }
+
+ bytes = ring_container->total_bytes;
+
+ /* If packets are less than 4 or bytes are less than 9000 assume
+ * insufficient data to use bulk rate limiting approach. We are
+ * likely latency driven.
+ */
+ if (packets < 4 && bytes < 9000) {
+ itr = IXGBE_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size;
+ }
+
+ /* Between 4 and 48 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
+ */
+ if (packets < 48) {
+ itr = (q_vector->itr >> 2) + IXGBE_ITR_ADAPTIVE_MIN_INC;
+ if (itr > IXGBE_ITR_ADAPTIVE_MAX_USECS)
+ itr = IXGBE_ITR_ADAPTIVE_MAX_USECS;
+ goto clear_counts;
+ }
+
+ /* Between 48 and 96 is our "goldilocks" zone where we are working
+ * out "just right". Just report that our current ITR is good for us.
+ */
+ if (packets < 96) {
+ itr = q_vector->itr >> 2;
+ goto clear_counts;
+ }
+
+ /* If packet count is 96 or greater we are likely looking at a slight
+ * overrun of the delay we want. Try halving our delay to see if that
+ * will cut the number of packets in half per interrupt.
+ */
+ if (packets < 256) {
+ itr = q_vector->itr >> 3;
+ if (itr < IXGBE_ITR_ADAPTIVE_MIN_USECS)
+ itr = IXGBE_ITR_ADAPTIVE_MIN_USECS;
+ goto clear_counts;
+ }
+
+ /* The paths below assume we are dealing with a bulk ITR since number
+ * of packets is 256 or greater. We are just going to have to compute
+ * a value and try to bring the count under control, though for smaller
+ * packet sizes there isn't much we can do as NAPI polling will likely
+ * be kicking in sooner rather than later.
+ */
+ itr = IXGBE_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+ /* If packet counts are 256 or greater we can assume we have a gross
+ * overestimation of what the rate should be. Instead of trying to fine
+ * tune it just use the formula below to try and dial in an exact value
+ * give the current packet size of the frame.
+ */
+ avg_wire_size = bytes / packets;
+
+ /* The following is a crude approximation of:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to
+ *
+ * (170 * (size + 24)) / (size + 640) = ITR
+ *
+ * We first do some math on the packet size and then finally bitshift
+ * by 8 after rounding up. We also have to account for PCIe link speed
+ * difference as ITR scales based on this.
+ */
+ if (avg_wire_size <= 60) {
+ /* Start at 50k ints/sec */
+ avg_wire_size = 5120;
+ } else if (avg_wire_size <= 316) {
+ /* 50K ints/sec to 16K ints/sec */
+ avg_wire_size *= 40;
+ avg_wire_size += 2720;
+ } else if (avg_wire_size <= 1084) {
+ /* 16K ints/sec to 9.2K ints/sec */
+ avg_wire_size *= 15;
+ avg_wire_size += 11452;
+ } else if (avg_wire_size <= 1980) {
+ /* 9.2K ints/sec to 8K ints/sec */
+ avg_wire_size *= 5;
+ avg_wire_size += 22420;
+ } else {
+ /* plateau at a limit of 8K ints/sec */
+ avg_wire_size = 32256;
+ }
+
+ /* If we are in low latency mode half our delay which doubles the rate
+ * to somewhere between 100K to 16K ints/sec
+ */
+ if (itr & IXGBE_ITR_ADAPTIVE_LATENCY)
+ avg_wire_size >>= 1;
+
+ /* Resultant value is 256 times larger than it needs to be. This
+ * gives us room to adjust the value as needed to either increase
+ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+ *
+ * Use addition as we have already recorded the new latency flag
+ * for the ITR value.
+ */
+ switch (q_vector->adapter->link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ case IXGBE_LINK_SPEED_100_FULL:
+ default:
+ itr += DIV_ROUND_UP(avg_wire_size,
+ IXGBE_ITR_ADAPTIVE_MIN_INC * 256) *
+ IXGBE_ITR_ADAPTIVE_MIN_INC;
break;
- case bulk_latency:
- if (bytes_perint <= 20)
- itr_setting = low_latency;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ case IXGBE_LINK_SPEED_10_FULL:
+ itr += DIV_ROUND_UP(avg_wire_size,
+ IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
+ IXGBE_ITR_ADAPTIVE_MIN_INC;
break;
}
- /* clear work counters since we have the values we need */
+clear_counts:
+ /* write back value */
+ ring_container->itr = itr;
+
+ /* next update should occur within next jiffy */
+ ring_container->next_update = next_update + 1;
+
ring_container->total_bytes = 0;
ring_container->total_packets = 0;
-
- /* write updated itr to ring container */
- ring_container->itr = itr_setting;
}
/**
@@ -2624,34 +2749,19 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
{
- u32 new_itr = q_vector->itr;
- u8 current_itr;
+ u32 new_itr;
ixgbe_update_itr(q_vector, &q_vector->tx);
ixgbe_update_itr(q_vector, &q_vector->rx);
- current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+ /* use the smallest value of new ITR delay calculations */
+ new_itr = min(q_vector->rx.itr, q_vector->tx.itr);
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = IXGBE_100K_ITR;
- break;
- case low_latency:
- new_itr = IXGBE_20K_ITR;
- break;
- case bulk_latency:
- new_itr = IXGBE_12K_ITR;
- break;
- default:
- break;
- }
+ /* Clear latency flag if set, shift into correct position */
+ new_itr &= ~IXGBE_ITR_ADAPTIVE_LATENCY;
+ new_itr <<= 2;
if (new_itr != q_vector->itr) {
- /* do an exponential smoothing */
- new_itr = (10 * new_itr * q_vector->itr) /
- ((9 * new_itr) + q_vector->itr);
-
/* save the algorithm value here */
q_vector->itr = new_itr;
@@ -4904,7 +5014,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask)
IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)))
return;
- vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask;
+ vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask;
IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl);
if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK)
@@ -6794,6 +6904,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+ u64 alloc_rx_page = 0;
u64 bytes = 0, packets = 0, hw_csum_rx_error = 0;
if (test_bit(__IXGBE_DOWN, &adapter->state) ||
@@ -6814,6 +6925,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+ alloc_rx_page += rx_ring->rx_stats.alloc_rx_page;
alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
hw_csum_rx_error += rx_ring->rx_stats.csum_err;
@@ -6821,6 +6933,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
packets += rx_ring->stats.packets;
}
adapter->non_eop_descs = non_eop_descs;
+ adapter->alloc_rx_page = alloc_rx_page;
adapter->alloc_rx_page_failed = alloc_rx_page_failed;
adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
adapter->hw_csum_rx_error = hw_csum_rx_error;
@@ -8552,6 +8665,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return ixgbe_ptp_set_ts_config(adapter, req);
case SIOCGHWTSTAMP:
return ixgbe_ptp_get_ts_config(adapter, req);
+ case SIOCGMIIPHY:
+ if (!adapter->hw.phy.ops.read_reg)
+ return -EOPNOTSUPP;
+ /* fall through */
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -9758,6 +9875,17 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
limit = find_last_bit(&adapter->fwd_bitmask, 32);
adapter->ring_feature[RING_F_VMDQ].limit = limit + 1;
ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter);
+
+ /* go back to full RSS if we're done with our VMQs */
+ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) {
+ int rss = min_t(int, ixgbe_max_rss_indices(adapter),
+ num_online_cpus());
+
+ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->ring_feature[RING_F_RSS].limit = rss;
+ }
+
ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev));
netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n",
fwd_adapter->pool, adapter->num_rx_pools,
@@ -10737,6 +10865,9 @@ skip_bad_vf_detection:
if (!test_bit(__IXGBE_SERVICE_INITED, &adapter->state))
return PCI_ERS_RESULT_DISCONNECT;
+ if (!netif_device_present(netdev))
+ return PCI_ERS_RESULT_DISCONNECT;
+
rtnl_lock();
netif_device_detach(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 6ea0d6a5fb90..b8c5fd2a2115 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -619,12 +619,6 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
usleep_range(5000, 10000);
}
- /* Failed to get SW only semaphore */
- if (swmask == IXGBE_GSSR_SW_MNG_SM) {
- hw_dbg(hw, "Failed to get SW only semaphore\n");
- return IXGBE_ERR_SWFW_SYNC;
- }
-
/* If the resource is not released by the FW/HW the SW can assume that
* the FW/HW malfunctions. In that case the SW should set the SW bit(s)
* of the requested resource(s) while ignoring the corresponding FW/HW
@@ -647,7 +641,8 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask)
*/
if (swfw_sync & swmask) {
u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
- IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM;
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM;
if (swi2c_mask)
rmask |= IXGBE_GSSR_I2C_MASK;
@@ -763,6 +758,8 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
**/
void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
{
+ u32 rmask;
+
/* First try to grab the semaphore but we don't need to bother
* looking to see whether we got the lock or not since we do
* the same thing regardless of whether we got the lock or not.
@@ -771,6 +768,14 @@ void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
*/
ixgbe_get_swfw_sync_semaphore(hw);
ixgbe_release_swfw_sync_semaphore(hw);
+
+ /* Acquire and release all software resources. */
+ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM |
+ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM |
+ IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_I2C_MASK;
+
+ ixgbe_acquire_swfw_sync_X540(hw, rmask);
+ ixgbe_release_swfw_sync_X540(hw, rmask);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 19fbb2f28ea4..cb7da5f9c4da 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -900,6 +900,8 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
/* convert offset from words to bytes */
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
+ buffer.pad2 = 0;
+ buffer.pad3 = 0;
status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT);
@@ -3192,6 +3194,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
+ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ ret_val == IXGBE_ERR_PHY_ADDR_INVALID)
+ return ret_val;
/* Setup function pointers based on detected hardware */
ixgbe_init_mac_link_ops_X550em(hw);
@@ -3394,9 +3399,10 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
ixgbe_clear_tx_pending(hw);
/* PHY ops must be identified and initialized prior to reset */
-
- /* Identify PHY and related function pointers */
status = hw->phy.ops.init(hw);
+ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
+ status == IXGBE_ERR_PHY_ADDR_INVALID)
+ return status;
/* start the external PHY */
if (hw->phy.type == ixgbe_phy_x550em_ext_t) {
@@ -3884,7 +3890,7 @@ static const struct ixgbe_mac_operations mac_ops_X550EM_x_fw = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
};
-static struct ixgbe_mac_operations mac_ops_x550em_a = {
+static const struct ixgbe_mac_operations mac_ops_x550em_a = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_t_x550em,
.led_off = ixgbe_led_off_t_x550em,
@@ -3905,7 +3911,7 @@ static struct ixgbe_mac_operations mac_ops_x550em_a = {
.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
-static struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
+static const struct ixgbe_mac_operations mac_ops_x550em_a_fw = {
X550_COMMON_MAC
.led_on = ixgbe_led_on_generic,
.led_off = ixgbe_led_off_generic,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3d4e4a5d00d1..bf1f04164885 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1742,13 +1742,18 @@ static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
+static int mlx4_en_get_max_num_rx_rings(struct net_device *dev)
+{
+ return min_t(int, num_online_cpus(), MAX_RX_RINGS);
+}
+
static void mlx4_en_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- channel->max_rx = MAX_RX_RINGS;
- channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
+ channel->max_rx = mlx4_en_get_max_num_rx_rings(dev);
+ channel->max_tx = priv->mdev->profile.max_num_tx_rings_p_up;
channel->rx_count = priv->rx_ring_num;
channel->tx_count = priv->tx_ring_num[TX] /
@@ -1777,7 +1782,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
mutex_lock(&mdev->state_lock);
xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0;
if (channel->tx_count * priv->prof->num_up + xdp_count >
- MAX_TX_RINGS) {
+ priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) {
err = -EINVAL;
en_err(priv,
"Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 686e18de9a97..2c2965497ed3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -153,7 +153,7 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
int i;
params->udp_rss = udp_rss;
- params->num_tx_rings_p_up = mlx4_low_memory_profile() ?
+ params->max_num_tx_rings_p_up = mlx4_low_memory_profile() ?
MLX4_EN_MIN_TX_RING_P_UP :
min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
@@ -170,8 +170,8 @@ static void mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].num_up = MLX4_EN_NUM_UP_LOW;
- params->prof[i].num_tx_rings_p_up = params->num_tx_rings_p_up;
- params->prof[i].tx_ring_num[TX] = params->num_tx_rings_p_up *
+ params->prof[i].num_tx_rings_p_up = params->max_num_tx_rings_p_up;
+ params->prof[i].tx_ring_num[TX] = params->max_num_tx_rings_p_up *
params->prof[i].num_up;
params->prof[i].rss_rings = 0;
params->prof[i].inline_thold = inline_thold;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 9c218f1cfc6c..e4c7a80ef5a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3305,7 +3305,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
MLX4_WQE_CTRL_SOLICITED);
- priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
+ priv->num_tx_rings_p_up = mdev->profile.max_num_tx_rings_p_up;
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 5a47f9669621..6883ac75d37f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -53,7 +53,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
if (is_tx) {
context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)
- context->params2 |= MLX4_QP_BIT_FPP;
+ context->params2 |= cpu_to_be32(MLX4_QP_BIT_FPP);
} else {
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8f9cb8abc497..a7866954d106 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -254,8 +254,7 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
- min_t(int, num_of_eqs,
- netif_get_num_default_rss_queues());
+ min_t(int, num_of_eqs, num_online_cpus());
mdev->profile.prof[i].rx_ring_num =
rounddown_pow_of_two(num_rx_rings);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8a32a8f7f9c0..2cc82dc07397 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -718,7 +718,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- ring->doorbell_qpn,
+ (__force u32)ring->doorbell_qpn,
ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 16c09949afd5..634f603f941c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -57,12 +57,12 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: off)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
- u64 val; \
- switch (sizeof(dest)) { \
+ __be64 val; \
+ switch (sizeof(dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
- case 8: val = get_unaligned((u64 *)__p); \
+ case 8: val = get_unaligned((__be64 *)__p); \
(dest) = be64_to_cpu(val); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index fdb3ad0cbe54..245e9ea09ab2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -399,7 +399,7 @@ struct mlx4_en_profile {
u32 active_ports;
u32 small_pkt_int;
u8 no_reset;
- u8 num_tx_rings_p_up;
+ u8 max_num_tx_rings_p_up;
struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 728a2fb1f5c0..203320923340 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -925,7 +925,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
context->flags &= cpu_to_be32(~(0xf << 28));
context->flags |= cpu_to_be32(states[i + 1] << 28);
if (states[i + 1] != MLX4_QP_STATE_RTR)
- context->params2 &= ~MLX4_QP_BIT_FPP;
+ context->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index fabb53379727..04304dd894c6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3185,7 +3185,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev,
optpar = be32_to_cpu(*(__be32 *) inbox->buf);
if (slave != mlx4_master_func_num(dev)) {
- qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
+ qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
/* setting QP rate-limit is disallowed for VFs */
if (qp_ctx->rate_limit_params)
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5a7bea688ec8..7a136ae2547a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -145,10 +145,10 @@ static struct init_tree_node {
}
};
-enum fs_i_mutex_lock_class {
- FS_MUTEX_GRANDPARENT,
- FS_MUTEX_PARENT,
- FS_MUTEX_CHILD
+enum fs_i_lock_class {
+ FS_LOCK_GRANDPARENT,
+ FS_LOCK_PARENT,
+ FS_LOCK_CHILD
};
static const struct rhashtable_params rhash_fte = {
@@ -168,10 +168,16 @@ static const struct rhashtable_params rhash_fg = {
};
-static void del_rule(struct fs_node *node);
-static void del_flow_table(struct fs_node *node);
-static void del_flow_group(struct fs_node *node);
-static void del_fte(struct fs_node *node);
+static void del_hw_flow_table(struct fs_node *node);
+static void del_hw_flow_group(struct fs_node *node);
+static void del_hw_fte(struct fs_node *node);
+static void del_sw_flow_table(struct fs_node *node);
+static void del_sw_flow_group(struct fs_node *node);
+static void del_sw_fte(struct fs_node *node);
+/* Delete rule (destination) is special case that
+ * requires to lock the FTE for all the deletion process.
+ */
+static void del_sw_hw_rule(struct fs_node *node);
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2);
static struct mlx5_flow_rule *
@@ -179,14 +185,16 @@ find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest);
static void tree_init_node(struct fs_node *node,
- unsigned int refcount,
- void (*remove_func)(struct fs_node *))
+ void (*del_hw_func)(struct fs_node *),
+ void (*del_sw_func)(struct fs_node *))
{
- atomic_set(&node->refcount, refcount);
+ atomic_set(&node->refcount, 1);
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
- mutex_init(&node->lock);
- node->remove_func = remove_func;
+ init_rwsem(&node->lock);
+ node->del_hw_func = del_hw_func;
+ node->del_sw_func = del_sw_func;
+ node->active = false;
}
static void tree_add_node(struct fs_node *node, struct fs_node *parent)
@@ -202,50 +210,70 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent)
node->root = parent->root;
}
-static void tree_get_node(struct fs_node *node)
+static int tree_get_node(struct fs_node *node)
{
- atomic_inc(&node->refcount);
+ return atomic_add_unless(&node->refcount, 1, 0);
}
-static void nested_lock_ref_node(struct fs_node *node,
- enum fs_i_mutex_lock_class class)
+static void nested_down_read_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock_nested(&node->lock, class);
+ down_read_nested(&node->lock, class);
atomic_inc(&node->refcount);
}
}
-static void lock_ref_node(struct fs_node *node)
+static void nested_down_write_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock(&node->lock);
+ down_write_nested(&node->lock, class);
atomic_inc(&node->refcount);
}
}
-static void unlock_ref_node(struct fs_node *node)
+static void down_write_ref_node(struct fs_node *node)
{
if (node) {
- atomic_dec(&node->refcount);
- mutex_unlock(&node->lock);
+ down_write(&node->lock);
+ atomic_inc(&node->refcount);
}
}
+static void up_read_ref_node(struct fs_node *node)
+{
+ atomic_dec(&node->refcount);
+ up_read(&node->lock);
+}
+
+static void up_write_ref_node(struct fs_node *node)
+{
+ atomic_dec(&node->refcount);
+ up_write(&node->lock);
+}
+
static void tree_put_node(struct fs_node *node)
{
struct fs_node *parent_node = node->parent;
- lock_ref_node(parent_node);
if (atomic_dec_and_test(&node->refcount)) {
- if (parent_node)
+ if (node->del_hw_func)
+ node->del_hw_func(node);
+ if (parent_node) {
+ /* Only root namespace doesn't have parent and we just
+ * need to free its node.
+ */
+ down_write_ref_node(parent_node);
list_del_init(&node->list);
- if (node->remove_func)
- node->remove_func(node);
- kfree(node);
+ if (node->del_sw_func)
+ node->del_sw_func(node);
+ up_write_ref_node(parent_node);
+ } else {
+ kfree(node);
+ }
node = NULL;
}
- unlock_ref_node(parent_node);
if (!node && parent_node)
tree_put_node(parent_node);
}
@@ -362,6 +390,15 @@ static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
return container_of(ns, struct mlx5_flow_root_namespace, ns);
}
+static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root = find_root(node);
+
+ if (root)
+ return root->dev->priv.steering;
+ return NULL;
+}
+
static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root = find_root(node);
@@ -371,26 +408,36 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
return NULL;
}
-static void del_flow_table(struct fs_node *node)
+static void del_hw_flow_table(struct fs_node *node)
{
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- struct fs_prio *prio;
int err;
fs_get_obj(ft, node);
dev = get_dev(&ft->node);
- err = mlx5_cmd_destroy_flow_table(dev, ft);
- if (err)
- mlx5_core_warn(dev, "flow steering can't destroy ft\n");
- ida_destroy(&ft->fte_allocator);
+ if (node->active) {
+ err = mlx5_cmd_destroy_flow_table(dev, ft);
+ if (err)
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
+ }
+}
+
+static void del_sw_flow_table(struct fs_node *node)
+{
+ struct mlx5_flow_table *ft;
+ struct fs_prio *prio;
+
+ fs_get_obj(ft, node);
+
rhltable_destroy(&ft->fgs_hash);
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
+ kfree(ft);
}
-static void del_rule(struct fs_node *node)
+static void del_sw_hw_rule(struct fs_node *node)
{
struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft;
@@ -406,7 +453,6 @@ static void del_rule(struct fs_node *node)
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_rule(rule);
- list_del(&rule->node.list);
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
mutex_lock(&rule->dest_attr.ft->lock);
list_del(&rule->next_ft);
@@ -434,117 +480,203 @@ out:
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
}
+ kfree(rule);
}
-static void destroy_fte(struct fs_fte *fte, struct mlx5_flow_group *fg)
+static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_table *ft;
- int ret;
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev;
+ struct fs_fte *fte;
+ int err;
- ret = rhashtable_remove_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- WARN_ON(ret);
- fte->status = 0;
+ fs_get_obj(fte, node);
+ fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
- ida_simple_remove(&ft->fte_allocator, fte->index);
+
+ trace_mlx5_fs_del_fte(fte);
+ dev = get_dev(&ft->node);
+ if (node->active) {
+ err = mlx5_cmd_delete_fte(dev, ft,
+ fte->index);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ }
}
-static void del_fte(struct fs_node *node)
+static void del_sw_fte(struct fs_node *node)
{
- struct mlx5_flow_table *ft;
+ struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev;
struct fs_fte *fte;
int err;
fs_get_obj(fte, node);
fs_get_obj(fg, fte->node.parent);
- fs_get_obj(ft, fg->node.parent);
- trace_mlx5_fs_del_fte(fte);
-
- dev = get_dev(&ft->node);
- err = mlx5_cmd_delete_fte(dev, ft,
- fte->index);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
- destroy_fte(fte, fg);
+ err = rhashtable_remove_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ WARN_ON(err);
+ ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
+ kmem_cache_free(steering->ftes_cache, fte);
}
-static void del_flow_group(struct fs_node *node)
+static void del_hw_flow_group(struct fs_node *node)
{
struct mlx5_flow_group *fg;
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- int err;
fs_get_obj(fg, node);
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
trace_mlx5_fs_del_fg(fg);
- if (ft->autogroup.active)
- ft->autogroup.num_groups--;
+ if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
+}
+
+static void del_sw_flow_group(struct fs_node *node)
+{
+ struct mlx5_flow_steering *steering = get_steering(node);
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ fs_get_obj(fg, node);
+ fs_get_obj(ft, fg->node.parent);
rhashtable_destroy(&fg->ftes_hash);
+ ida_destroy(&fg->fte_allocator);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
rhash_fg);
WARN_ON(err);
- if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
- mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
- fg->id, ft->id);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
+{
+ int index;
+ int ret;
+
+ index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
+ if (index < 0)
+ return index;
+
+ fte->index = index + fg->start_index;
+ ret = rhashtable_insert_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ if (ret)
+ goto err_ida_remove;
+
+ tree_add_node(&fte->node, &fg->node);
+ list_add_tail(&fte->node.list, &fg->node.children);
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&fg->fte_allocator, index);
+ return ret;
}
-static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
+static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
u32 *match_value,
- unsigned int index)
+ struct mlx5_flow_act *flow_act)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct fs_fte *fte;
- fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+ fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
if (!fte)
return ERR_PTR(-ENOMEM);
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->flow_tag = flow_act->flow_tag;
- fte->index = index;
fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
fte->modify_id = flow_act->modify_id;
+ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+
return fte;
}
-static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+static void dealloc_flow_group(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_group *fg)
+{
+ rhashtable_destroy(&fg->ftes_hash);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index)
{
struct mlx5_flow_group *fg;
- void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- create_fg_in, match_criteria);
- u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
- create_fg_in,
- match_criteria_enable);
int ret;
- fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+ fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
if (!fg)
return ERR_PTR(-ENOMEM);
ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
if (ret) {
- kfree(fg);
+ kmem_cache_free(steering->fgs_cache, fg);
return ERR_PTR(ret);
- }
+}
+ ida_init(&fg->fte_allocator);
fg->mask.match_criteria_enable = match_criteria_enable;
memcpy(&fg->mask.match_criteria, match_criteria,
sizeof(fg->mask.match_criteria));
fg->node.type = FS_TYPE_FLOW_GROUP;
- fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
- start_flow_index);
- fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
- end_flow_index) - fg->start_index + 1;
+ fg->start_index = start_index;
+ fg->max_ftes = end_index - start_index + 1;
+
+ return fg;
+}
+
+static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index,
+ struct list_head *prev)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *fg;
+ int ret;
+
+ fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
+ start_index, end_index);
+ if (IS_ERR(fg))
+ return fg;
+
+ /* initialize refcnt, add to parent list */
+ ret = rhltable_insert(&ft->fgs_hash,
+ &fg->hash,
+ rhash_fg);
+ if (ret) {
+ dealloc_flow_group(steering, fg);
+ return ERR_PTR(ret);
+ }
+
+ tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
+ tree_add_node(&fg->node, &ft->node);
+ /* Add node to group list */
+ list_add(&fg->node.list, prev);
+ atomic_inc(&ft->node.version);
+
return fg;
}
@@ -575,7 +707,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
- ida_init(&ft->fte_allocator);
return ft;
}
@@ -724,7 +855,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
fs_get_obj(fte, rule->node.parent);
if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
- lock_ref_node(&fte->node);
+ down_write_ref_node(&fte->node);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
@@ -733,7 +864,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
ft, fg->id,
modify_mask,
fte);
- unlock_ref_node(&fte->node);
+ up_write_ref_node(&fte->node);
return err;
}
@@ -870,7 +1001,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
goto unlock_root;
}
- tree_init_node(&ft->node, 1, del_flow_table);
+ tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
@@ -882,17 +1013,17 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = connect_flow_table(root->dev, ft, fs_prio);
if (err)
goto destroy_ft;
- lock_ref_node(&fs_prio->node);
+ ft->node.active = true;
+ down_write_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
- unlock_ref_node(&fs_prio->node);
+ up_write_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
return ft;
destroy_ft:
mlx5_cmd_destroy_flow_table(root->dev, ft);
free_ft:
- ida_destroy(&ft->fte_allocator);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
@@ -960,54 +1091,6 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
-/* Flow table should be locked */
-static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
- u32 *fg_in,
- struct list_head
- *prev_fg,
- bool is_auto_fg)
-{
- struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev = get_dev(&ft->node);
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- fg = alloc_flow_group(fg_in);
- if (IS_ERR(fg))
- return fg;
-
- err = rhltable_insert(&ft->fgs_hash, &fg->hash, rhash_fg);
- if (err)
- goto err_free_fg;
-
- err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
- if (err)
- goto err_remove_fg;
-
- if (ft->autogroup.active)
- ft->autogroup.num_groups++;
- /* Add node to tree */
- tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
- tree_add_node(&fg->node, &ft->node);
- /* Add node to group list */
- list_add(&fg->node.list, prev_fg);
-
- trace_mlx5_fs_add_fg(fg);
- return fg;
-
-err_remove_fg:
- WARN_ON(rhltable_remove(&ft->fgs_hash,
- &fg->hash,
- rhash_fg));
-err_free_fg:
- rhashtable_destroy(&fg->ftes_hash);
- kfree(fg);
-
- return ERR_PTR(err);
-}
-
struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u32 *fg_in)
{
@@ -1016,7 +1099,13 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
fg_in,
match_criteria_enable);
+ int start_index = MLX5_GET(create_flow_group_in, fg_in,
+ start_flow_index);
+ int end_index = MLX5_GET(create_flow_group_in, fg_in,
+ end_flow_index);
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg;
+ int err;
if (!check_valid_mask(match_criteria_enable, match_criteria))
return ERR_PTR(-EINVAL);
@@ -1024,9 +1113,21 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (ft->autogroup.active)
return ERR_PTR(-EPERM);
- lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
- unlock_ref_node(&ft->node);
+ down_write_ref_node(&ft->node);
+ fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
+ start_index, end_index,
+ ft->node.children.prev);
+ up_write_ref_node(&ft->node);
+ if (IS_ERR(fg))
+ return fg;
+
+ err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+ if (err) {
+ tree_put_node(&fg->node);
+ return ERR_PTR(err);
+ }
+ trace_mlx5_fs_add_fg(fg);
+ fg->node.active = true;
return fg;
}
@@ -1111,7 +1212,7 @@ create_flow_handle(struct fs_fte *fte,
/* Add dest to dests list- we need flow tables to be in the
* end of the list for forward to next prio rules.
*/
- tree_init_node(&rule->node, 1, del_rule);
+ tree_init_node(&rule->node, NULL, del_sw_hw_rule);
if (dest &&
dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
list_add(&rule->node.list, &fte->node.children);
@@ -1167,7 +1268,9 @@ add_rule_fte(struct fs_fte *fte,
if (err)
goto free_handle;
+ fte->node.active = true;
fte->status |= FS_FTE_STATUS_EXISTING;
+ atomic_inc(&fte->node.version);
out:
return handle;
@@ -1177,59 +1280,17 @@ free_handle:
return ERR_PTR(err);
}
-static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
- u32 *match_value,
- struct mlx5_flow_act *flow_act)
-{
- struct mlx5_flow_table *ft;
- struct fs_fte *fte;
- int index;
- int ret;
-
- fs_get_obj(ft, fg->node.parent);
- index = ida_simple_get(&ft->fte_allocator, fg->start_index,
- fg->start_index + fg->max_ftes,
- GFP_KERNEL);
- if (index < 0)
- return ERR_PTR(index);
-
- fte = alloc_fte(flow_act, match_value, index);
- if (IS_ERR(fte)) {
- ret = PTR_ERR(fte);
- goto err_alloc;
- }
- ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- if (ret)
- goto err_hash;
-
- return fte;
-
-err_hash:
- kfree(fte);
-err_alloc:
- ida_simple_remove(&ft->fte_allocator, index);
- return ERR_PTR(ret);
-}
-
-static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria)
+static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct list_head *prev = &ft->node.children;
- unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
- void *match_criteria_addr;
+ unsigned int candidate_index = 0;
unsigned int group_size = 0;
- u32 *in;
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return ERR_PTR(-ENOMEM);
-
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
/* We save place for flow groups in addition to max types */
group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
@@ -1247,25 +1308,55 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
prev = &fg->node.list;
}
- if (candidate_index + group_size > ft->max_fte) {
- fg = ERR_PTR(-ENOSPC);
+ if (candidate_index + group_size > ft->max_fte)
+ return ERR_PTR(-ENOSPC);
+
+ fg = alloc_insert_flow_group(ft,
+ spec->match_criteria_enable,
+ spec->match_criteria,
+ candidate_index,
+ candidate_index + group_size - 1,
+ prev);
+ if (IS_ERR(fg))
goto out;
- }
+
+ ft->autogroup.num_groups++;
+
+out:
+ return fg;
+}
+
+static int create_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *match_criteria_addr;
+ int err;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable);
- MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
- MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
- group_size - 1);
+ fg->mask.match_criteria_enable);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
+ fg->max_ftes - 1);
match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
- memcpy(match_criteria_addr, match_criteria,
- MLX5_ST_SZ_BYTES(fte_match_param));
+ memcpy(match_criteria_addr, fg->mask.match_criteria,
+ sizeof(fg->mask.match_criteria));
+
+ err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id);
+ if (!err) {
+ fg->node.active = true;
+ trace_mlx5_fs_add_fg(fg);
+ }
- fg = create_flow_group_common(ft, in, prev, true);
-out:
kvfree(in);
- return fg;
+ return err;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -1340,60 +1431,30 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
struct mlx5_flow_handle *handle;
- struct mlx5_flow_table *ft;
+ int old_action;
int i;
+ int ret;
- if (fte) {
- int old_action;
- int ret;
-
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- ret = check_conflicting_ftes(fte, flow_act);
- if (ret) {
- handle = ERR_PTR(ret);
- goto unlock_fte;
- }
-
- old_action = fte->action;
- fte->action |= flow_act->action;
- handle = add_rule_fte(fte, fg, dest, dest_num,
- old_action != flow_act->action);
- if (IS_ERR(handle)) {
- fte->action = old_action;
- goto unlock_fte;
- } else {
- trace_mlx5_fs_set_fte(fte, false);
- goto add_rules;
- }
- }
- fs_get_obj(ft, fg->node.parent);
+ ret = check_conflicting_ftes(fte, flow_act);
+ if (ret)
+ return ERR_PTR(ret);
- fte = create_fte(fg, match_value, flow_act);
- if (IS_ERR(fte))
- return (void *)fte;
- tree_init_node(&fte->node, 0, del_fte);
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- handle = add_rule_fte(fte, fg, dest, dest_num, false);
+ old_action = fte->action;
+ fte->action |= flow_act->action;
+ handle = add_rule_fte(fte, fg, dest, dest_num,
+ old_action != flow_act->action);
if (IS_ERR(handle)) {
- unlock_ref_node(&fte->node);
- destroy_fte(fte, fg);
- kfree(fte);
+ fte->action = old_action;
return handle;
}
+ trace_mlx5_fs_set_fte(fte, false);
- tree_add_node(&fte->node, &fg->node);
- /* fte list isn't sorted */
- list_add_tail(&fte->node.list, &fg->node.children);
- trace_mlx5_fs_set_fte(fte, true);
-add_rules:
for (i = 0; i < handle->num_rules; i++) {
if (atomic_read(&handle->rule[i]->node.refcount) == 1) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
}
-unlock_fte:
- unlock_ref_node(&fte->node);
return handle;
}
@@ -1441,93 +1502,197 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
}
-static struct mlx5_flow_handle *
-try_add_to_existing_fg(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest,
- int dest_num)
-{
+struct match_list {
+ struct list_head list;
struct mlx5_flow_group *g;
- struct mlx5_flow_handle *rule = ERR_PTR(-ENOENT);
+};
+
+struct match_list_head {
+ struct list_head list;
+ struct match_list first;
+};
+
+static void free_match_list(struct match_list_head *head)
+{
+ if (!list_empty(&head->list)) {
+ struct match_list *iter, *match_tmp;
+
+ list_del(&head->first.list);
+ tree_put_node(&head->first.g->node);
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
+ list) {
+ tree_put_node(&iter->g->node);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+}
+
+static int build_match_list(struct match_list_head *match_head,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
+{
struct rhlist_head *tmp, *list;
- struct match_list {
- struct list_head list;
- struct mlx5_flow_group *g;
- } match_list, *iter;
- LIST_HEAD(match_head);
+ struct mlx5_flow_group *g;
+ int err = 0;
rcu_read_lock();
+ INIT_LIST_HEAD(&match_head->list);
/* Collect all fgs which has a matching match_criteria */
list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
+ /* RCU is atomic, we can't execute FW commands here */
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
- if (likely(list_empty(&match_head))) {
- match_list.g = g;
- list_add_tail(&match_list.list, &match_head);
+ if (likely(list_empty(&match_head->list))) {
+ if (!tree_get_node(&g->node))
+ continue;
+ match_head->first.g = g;
+ list_add_tail(&match_head->first.list,
+ &match_head->list);
continue;
}
- curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
+ curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
- rcu_read_unlock();
- rule = ERR_PTR(-ENOMEM);
- goto free_list;
+ free_match_list(match_head);
+ err = -ENOMEM;
+ goto out;
+ }
+ if (!tree_get_node(&g->node)) {
+ kfree(curr_match);
+ continue;
}
curr_match->g = g;
- list_add_tail(&curr_match->list, &match_head);
+ list_add_tail(&curr_match->list, &match_head->list);
}
+out:
rcu_read_unlock();
+ return err;
+}
+
+static u64 matched_fgs_get_version(struct list_head *match_head)
+{
+ struct match_list *iter;
+ u64 version = 0;
+
+ list_for_each_entry(iter, match_head, list)
+ version += (u64)atomic_read(&iter->g->node.version);
+ return version;
+}
+
+static struct mlx5_flow_handle *
+try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ struct list_head *match_head,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ int ft_version)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_handle *rule;
+ struct match_list *iter;
+ bool take_write = false;
+ struct fs_fte *fte;
+ u64 version;
+ int err;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte))
+ return ERR_PTR(-ENOMEM);
+ list_for_each_entry(iter, match_head, list) {
+ nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
+ ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL);
+ }
+
+search_again_locked:
+ version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
- list_for_each_entry(iter, &match_head, list) {
- struct fs_fte *fte;
+ list_for_each_entry(iter, match_head, list) {
+ struct fs_fte *fte_tmp;
g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
- rhash_fte);
- if (fte) {
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, fte);
- unlock_ref_node(&g->node);
- goto free_list;
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
+ rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+ continue;
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ } else {
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
}
- unlock_ref_node(&g->node);
+
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte_tmp);
+ up_write_ref_node(&fte_tmp->node);
+ tree_put_node(&fte_tmp->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return rule;
}
/* No group with matching fte found. Try to add a new fte to any
* matching fg.
*/
- list_for_each_entry(iter, &match_head, list) {
- g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, NULL);
- if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) {
- unlock_ref_node(&g->node);
- goto free_list;
- }
- unlock_ref_node(&g->node);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ list_for_each_entry(iter, match_head, list)
+ nested_down_write_ref_node(&iter->g->node,
+ FS_LOCK_PARENT);
+ take_write = true;
}
-free_list:
- if (!list_empty(&match_head)) {
- struct match_list *match_tmp;
+ /* Check the ft version, for case that new flow group
+ * was added while the fgs weren't locked
+ */
+ if (atomic_read(&ft->node.version) != ft_version) {
+ rule = ERR_PTR(-EAGAIN);
+ goto out;
+ }
- /* The most common case is having one FG. Since we want to
- * optimize this case, we save the first on the stack.
- * Therefore, no need to free it.
- */
- list_del(&list_first_entry(&match_head, typeof(*iter), list)->list);
- list_for_each_entry_safe(iter, match_tmp, &match_head, list) {
- list_del(&iter->list);
- kfree(iter);
+ /* Check the fgs version, for case the new FTE with the
+ * same values was added while the fgs weren't locked
+ */
+ if (version != matched_fgs_get_version(match_head))
+ goto search_again_locked;
+
+ list_for_each_entry(iter, match_head, list) {
+ g = iter->g;
+
+ if (!g->node.active)
+ continue;
+ err = insert_fte(g, fte);
+ if (err) {
+ if (err == -ENOSPC)
+ continue;
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return ERR_PTR(err);
}
- }
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ return rule;
+ }
+ rule = ERR_PTR(-ENOENT);
+out:
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@@ -1539,8 +1704,14 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
+ struct match_list_head match_head;
+ bool take_write = false;
+ struct fs_fte *fte;
+ int version;
+ int err;
int i;
if (!check_valid_spec(spec))
@@ -1550,33 +1721,73 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!dest_is_valid(&dest[i], flow_act->action, ft))
return ERR_PTR(-EINVAL);
}
+ nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+search_again_locked:
+ version = atomic_read(&ft->node.version);
+
+ /* Collect all fgs which has a matching match_criteria */
+ err = build_match_list(&match_head, ft, spec);
+ if (err)
+ return ERR_PTR(err);
+
+ if (!take_write)
+ up_read_ref_node(&ft->node);
+
+ rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
+ dest_num, version);
+ free_match_list(&match_head);
+ if (!IS_ERR(rule) ||
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+ return rule;
+
+ if (!take_write) {
+ nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+ take_write = true;
+ }
- nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
- rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num);
- if (!IS_ERR(rule))
- goto unlock;
+ if (PTR_ERR(rule) == -EAGAIN ||
+ version != atomic_read(&ft->node.version))
+ goto search_again_locked;
- g = create_autogroup(ft, spec->match_criteria_enable,
- spec->match_criteria);
+ g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
rule = (void *)g;
- goto unlock;
+ up_write_ref_node(&ft->node);
+ return rule;
}
- rule = add_rule_fg(g, spec->match_value, flow_act, dest,
- dest_num, NULL);
- if (IS_ERR(rule)) {
- /* Remove assumes refcount > 0 and autogroup creates a group
- * with a refcount = 0.
- */
- unlock_ref_node(&ft->node);
- tree_get_node(&g->node);
- tree_remove_node(&g->node);
- return rule;
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ up_write_ref_node(&ft->node);
+
+ err = create_auto_flow_group(ft, g);
+ if (err)
+ goto err_release_fg;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ goto err_release_fg;
}
-unlock:
- unlock_ref_node(&ft->node);
+
+ err = insert_fte(g, fte);
+ if (err) {
+ kmem_cache_free(steering->ftes_cache, fte);
+ goto err_release_fg;
+ }
+
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ up_write_ref_node(&g->node);
+ rule = add_rule_fg(g, spec->match_value, flow_act, dest,
+ dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ tree_put_node(&g->node);
return rule;
+
+err_release_fg:
+ up_write_ref_node(&g->node);
+ tree_put_node(&g->node);
+ return ERR_PTR(err);
}
static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
@@ -1817,7 +2028,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return ERR_PTR(-ENOMEM);
fs_prio->node.type = FS_TYPE_PRIO;
- tree_init_node(&fs_prio->node, 1, NULL);
+ tree_init_node(&fs_prio->node, NULL, NULL);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
@@ -1843,7 +2054,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -1968,7 +2179,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
ns = &root_ns->ns;
fs_init_namespace(ns);
mutex_init(&root_ns->chain_lock);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, NULL);
return root_ns;
@@ -2066,8 +2277,10 @@ static void clean_tree(struct fs_node *node)
struct fs_node *iter;
struct fs_node *temp;
+ tree_get_node(node);
list_for_each_entry_safe(iter, temp, &node->children, list)
clean_tree(iter);
+ tree_put_node(node);
tree_remove_node(node);
}
}
@@ -2091,6 +2304,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
mlx5_cleanup_fc_stats(dev);
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
kfree(steering);
}
@@ -2196,6 +2411,16 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 48dd78975062..7a01277519e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -66,6 +66,8 @@ enum fs_fte_status {
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
+ struct kmem_cache *fgs_cache;
+ struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
@@ -81,9 +83,12 @@ struct fs_node {
struct fs_node *parent;
struct fs_node *root;
/* lock the node for writing and traversing */
- struct mutex lock;
+ struct rw_semaphore lock;
atomic_t refcount;
- void (*remove_func)(struct fs_node *);
+ bool active;
+ void (*del_hw_func)(struct fs_node *);
+ void (*del_sw_func)(struct fs_node *);
+ atomic_t version;
};
struct mlx5_flow_rule {
@@ -120,7 +125,6 @@ struct mlx5_flow_table {
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
- struct ida fte_allocator;
struct rhltable fgs_hash;
};
@@ -200,6 +204,7 @@ struct mlx5_flow_group {
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
+ struct ida fte_allocator;
u32 id;
struct rhashtable ftes_hash;
struct rhlist_head hash;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 5cd4df08ce97..321988ac57cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -53,6 +53,7 @@
#include <linux/notifier.h>
#include <linux/dcbnl.h>
#include <linux/inetdevice.h>
+#include <linux/netlink.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
@@ -4298,7 +4299,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
lower_dev,
- upper_dev);
+ upper_dev,
+ extack);
else
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
lower_dev,
@@ -4389,18 +4391,25 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
struct net_device *upper_dev;
int err = 0;
+ extack = netdev_notifier_info_to_extack(&info->info);
+
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!netif_is_bridge_master(upper_dev))
+ if (!netif_is_bridge_master(upper_dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers");
return -EINVAL;
+ }
if (!info->linking)
break;
- if (netdev_has_any_upper_dev(upper_dev))
+ if (netdev_has_any_upper_dev(upper_dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
+ }
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
@@ -4408,7 +4417,8 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
if (info->linking)
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
vlan_dev,
- upper_dev);
+ upper_dev,
+ extack);
else
mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
vlan_dev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index ae67e6046098..8e45183dc9bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -326,7 +326,8 @@ void
mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
- struct net_device *br_dev);
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack);
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
struct net_device *br_dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index e0f8ea4ed7af..6a356f4b99a3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -3640,20 +3640,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib)
{
- struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
- struct mlxsw_sp_lpm_tree *lpm_tree;
-
- /* Aggregate prefix lengths across all virtual routers to make
- * sure we only have used prefix lengths in the LPM tree.
- */
- mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage);
- lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
- fib->proto);
- if (IS_ERR(lpm_tree))
- goto err_tree_get;
- mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
-
-err_tree_get:
if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage))
return;
mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
@@ -5957,7 +5943,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-static u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 3d449180b035..3f2d840cb285 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -70,6 +70,7 @@ u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif);
u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
+u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp);
const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0f9eac5f4ebf..7b8548e25ae7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -46,8 +46,10 @@
#include <linux/workqueue.h>
#include <linux/jiffies.h>
#include <linux/rtnetlink.h>
+#include <linux/netlink.h>
#include <net/switchdev.h>
+#include "spectrum_router.h"
#include "spectrum.h"
#include "core.h"
#include "reg.h"
@@ -78,7 +80,8 @@ struct mlxsw_sp_bridge_device {
struct list_head ports_list;
struct list_head mids_list;
u8 vlan_enabled:1,
- multicast_enabled:1;
+ multicast_enabled:1,
+ mrouter:1;
const struct mlxsw_sp_bridge_ops *ops;
};
@@ -107,7 +110,8 @@ struct mlxsw_sp_bridge_vlan {
struct mlxsw_sp_bridge_ops {
int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port);
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack);
void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
struct mlxsw_sp_port *mlxsw_sp_port);
@@ -168,6 +172,7 @@ mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
bridge_device->dev = br_dev;
bridge_device->vlan_enabled = vlan_enabled;
bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
+ bridge_device->mrouter = br_multicast_router(br_dev);
INIT_LIST_HEAD(&bridge_device->ports_list);
if (vlan_enabled) {
bridge->vlan_enabled_exists = true;
@@ -810,6 +815,60 @@ static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
+static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
+ u16 mid_idx, bool add)
+{
+ char *smid_pl;
+ int err;
+
+ smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
+ if (!smid_pl)
+ return -ENOMEM;
+
+ mlxsw_reg_smid_pack(smid_pl, mid_idx,
+ mlxsw_sp_router_port(mlxsw_sp), add);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
+ kfree(smid_pl);
+ return err;
+}
+
+static void
+mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_bridge_device *bridge_device,
+ bool add)
+{
+ struct mlxsw_sp_mid *mid;
+
+ list_for_each_entry(mid, &bridge_device->mids_list, list)
+ mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
+}
+
+static int
+mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct switchdev_trans *trans,
+ struct net_device *orig_dev,
+ bool is_mrouter)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_bridge_device *bridge_device;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ /* It's possible we failed to enslave the port, yet this
+ * operation is executed due to it being deferred.
+ */
+ bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
+ if (!bridge_device)
+ return 0;
+
+ if (bridge_device->mrouter != is_mrouter)
+ mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
+ is_mrouter);
+ bridge_device->mrouter = is_mrouter;
+ return 0;
+}
+
static int mlxsw_sp_port_attr_set(struct net_device *dev,
const struct switchdev_attr *attr,
struct switchdev_trans *trans)
@@ -847,6 +906,11 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
attr->orig_dev,
attr->u.mc_disabled);
break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
+ err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
+ attr->orig_dev,
+ attr->u.mrouter);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -1241,7 +1305,8 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
}
static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
- long *ports_bitmap)
+ long *ports_bitmap,
+ bool set_router_port)
{
char *smid_pl;
int err, i;
@@ -1256,9 +1321,15 @@ static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
}
+ mlxsw_reg_smid_port_mask_set(smid_pl,
+ mlxsw_sp_router_port(mlxsw_sp), 1);
+
for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
mlxsw_reg_smid_port_set(smid_pl, i, 1);
+ mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
+ set_router_port);
+
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
kfree(smid_pl);
return err;
@@ -1362,7 +1433,8 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
mid->mid = mid_idx;
- err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap);
+ err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
+ bridge_device->mrouter);
kfree(flood_bitmap);
if (err)
return false;
@@ -1735,12 +1807,15 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
static int
mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- if (is_vlan_dev(bridge_port->dev))
+ if (is_vlan_dev(bridge_port->dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
+ }
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
if (WARN_ON(!mlxsw_sp_port_vlan))
@@ -1797,13 +1872,16 @@ mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
static int
mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_bridge_port *bridge_port,
- struct mlxsw_sp_port *mlxsw_sp_port)
+ struct mlxsw_sp_port *mlxsw_sp_port,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
u16 vid;
- if (!is_vlan_dev(bridge_port->dev))
+ if (!is_vlan_dev(bridge_port->dev)) {
+ NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge");
return -EINVAL;
+ }
vid = vlan_dev_vlan_id(bridge_port->dev);
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
@@ -1811,7 +1889,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
return -EINVAL;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
- netdev_err(mlxsw_sp_port->dev, "Can't bridge VLAN uppers of the same port\n");
+ NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port");
return -EINVAL;
}
@@ -1854,7 +1932,8 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *brport_dev,
- struct net_device *br_dev)
+ struct net_device *br_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_bridge_device *bridge_device;
@@ -1867,7 +1946,7 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
bridge_device = bridge_port->bridge_device;
err = bridge_device->ops->port_join(bridge_device, bridge_port,
- mlxsw_sp_port);
+ mlxsw_sp_port, extack);
if (err)
goto err_port_join;
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index becaacf1554d..bd3b2bd408bc 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -14,6 +14,7 @@ nfp-objs := \
nfpcore/nfp_resource.o \
nfpcore/nfp_rtsym.o \
nfpcore/nfp_target.o \
+ nfp_asm.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
index 239dfbe8a0a1..13148f30fc4c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
@@ -110,150 +110,7 @@ nfp_prog_offset_to_index(struct nfp_prog *nfp_prog, unsigned int offset)
return offset - nfp_prog->start_off;
}
-/* --- SW reg --- */
-struct nfp_insn_ur_regs {
- enum alu_dst_ab dst_ab;
- u16 dst;
- u16 areg, breg;
- bool swap;
- bool wr_both;
-};
-
-struct nfp_insn_re_regs {
- enum alu_dst_ab dst_ab;
- u8 dst;
- u8 areg, breg;
- bool swap;
- bool wr_both;
- bool i8;
-};
-
-static u16 nfp_swreg_to_unreg(u32 swreg, bool is_dst)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_NNR:
- return UR_REG_NN | val;
- case NN_REG_XFER:
- return UR_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~0xff) {
- pr_err("immediate too large\n");
- return 0;
- }
- return UR_REG_IMM_encode(val);
- case NN_REG_NONE:
- return is_dst ? UR_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding %08x\n", swreg);
- return 0;
- }
-}
-
-static int
-swreg_to_unrestricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_ur_regs *reg)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_unreg(dst, true);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_unreg(rreg, false);
- reg->breg = nfp_swreg_to_unreg(lreg, false);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_unreg(lreg, false);
- reg->breg = nfp_swreg_to_unreg(rreg, false);
- }
-
- return 0;
-}
-
-static u16 nfp_swreg_to_rereg(u32 swreg, bool is_dst, bool has_imm8, bool *i8)
-{
- u16 val = FIELD_GET(NN_REG_VAL, swreg);
-
- switch (FIELD_GET(NN_REG_TYPE, swreg)) {
- case NN_REG_GPR_A:
- case NN_REG_GPR_B:
- case NN_REG_GPR_BOTH:
- return val;
- case NN_REG_XFER:
- return RE_REG_XFR | val;
- case NN_REG_IMM:
- if (val & ~(0x7f | has_imm8 << 7)) {
- pr_err("immediate too large\n");
- return 0;
- }
- *i8 = val & 0x80;
- return RE_REG_IMM_encode(val & 0x7f);
- case NN_REG_NONE:
- return is_dst ? RE_REG_NO_DST : REG_NONE;
- default:
- pr_err("unrecognized reg encoding\n");
- return 0;
- }
-}
-
-static int
-swreg_to_restricted(u32 dst, u32 lreg, u32 rreg, struct nfp_insn_re_regs *reg,
- bool has_imm8)
-{
- memset(reg, 0, sizeof(*reg));
-
- /* Decode destination */
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM)
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_B)
- reg->dst_ab = ALU_DST_B;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_GPR_BOTH)
- reg->wr_both = true;
- reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
-
- /* Decode source operands */
- if (FIELD_GET(NN_REG_TYPE, lreg) == FIELD_GET(NN_REG_TYPE, rreg))
- return -EFAULT;
-
- if (FIELD_GET(NN_REG_TYPE, lreg) == NN_REG_GPR_B ||
- FIELD_GET(NN_REG_TYPE, rreg) == NN_REG_GPR_A) {
- reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->swap = true;
- } else {
- reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
- reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
- }
-
- return 0;
-}
-
/* --- Emitters --- */
-static const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
- [CMD_TGT_WRITE8] = { 0x00, 0x42 },
- [CMD_TGT_READ8] = { 0x01, 0x43 },
- [CMD_TGT_READ_LE] = { 0x01, 0x40 },
- [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
-};
-
static void
__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, bool sync)
@@ -281,7 +138,7 @@ __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
static void
emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
- u8 mode, u8 xfer, u32 lreg, u32 rreg, u8 size, bool sync)
+ u8 mode, u8 xfer, swreg lreg, swreg rreg, u8 size, bool sync)
{
struct nfp_insn_re_regs reg;
int err;
@@ -296,6 +153,11 @@ emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
nfp_prog->error = -EFAULT;
return;
}
+ if (reg.dst_lmextn || reg.src_lmextn) {
+ pr_err("cmd can't use LMextn\n");
+ nfp_prog->error = -EFAULT;
+ return;
+ }
__emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, sync);
}
@@ -341,7 +203,7 @@ emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
static void
__emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
- u8 byte, bool equal, u16 addr, u8 defer)
+ u8 byte, bool equal, u16 addr, u8 defer, bool src_lmextn)
{
u16 addr_lo, addr_hi;
u64 insn;
@@ -357,32 +219,34 @@ __emit_br_byte(struct nfp_prog *nfp_prog, u8 areg, u8 breg, bool imm8,
FIELD_PREP(OP_BB_EQ, equal) |
FIELD_PREP(OP_BB_DEFBR, defer) |
FIELD_PREP(OP_BB_ADDR_LO, addr_lo) |
- FIELD_PREP(OP_BB_ADDR_HI, addr_hi);
+ FIELD_PREP(OP_BB_ADDR_HI, addr_hi) |
+ FIELD_PREP(OP_BB_SRC_LMEXTN, src_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
emit_br_byte_neq(struct nfp_prog *nfp_prog,
- u32 dst, u8 imm, u8 byte, u16 addr, u8 defer)
+ swreg src, u8 imm, u8 byte, u16 addr, u8 defer)
{
struct nfp_insn_re_regs reg;
int err;
- err = swreg_to_restricted(reg_none(), dst, reg_imm(imm), &reg, true);
+ err = swreg_to_restricted(reg_none(), src, reg_imm(imm), &reg, true);
if (err) {
nfp_prog->error = err;
return;
}
__emit_br_byte(nfp_prog, reg.areg, reg.breg, reg.i8, byte, false, addr,
- defer);
+ defer, reg.src_lmextn);
}
static void
__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
enum immed_width width, bool invert,
- enum immed_shift shift, bool wr_both)
+ enum immed_shift shift, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -393,19 +257,21 @@ __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
FIELD_PREP(OP_IMMED_WIDTH, width) |
FIELD_PREP(OP_IMMED_INV, invert) |
FIELD_PREP(OP_IMMED_SHIFT, shift) |
- FIELD_PREP(OP_IMMED_WR_AB, wr_both);
+ FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
+ FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
+emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
enum immed_width width, bool invert, enum immed_shift shift)
{
struct nfp_insn_ur_regs reg;
int err;
- if (FIELD_GET(NN_REG_TYPE, dst) == NN_REG_IMM) {
+ if (swreg_type(dst) == NN_REG_IMM) {
nfp_prog->error = -EFAULT;
return;
}
@@ -417,13 +283,15 @@ emit_immed(struct nfp_prog *nfp_prog, u32 dst, u16 imm,
}
__emit_immed(nfp_prog, reg.areg, reg.breg, imm >> 8, width,
- invert, shift, reg.wr_both);
+ invert, shift, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
enum shf_sc sc, u8 shift,
- u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both)
+ u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -445,14 +313,16 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_SHF_SHIFT, shift) |
FIELD_PREP(OP_SHF_OP, op) |
FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
- FIELD_PREP(OP_SHF_WR_AB, wr_both);
+ FIELD_PREP(OP_SHF_WR_AB, wr_both) |
+ FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
- enum shf_sc sc, u8 shift)
+emit_shf(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
{
struct nfp_insn_re_regs reg;
int err;
@@ -464,12 +334,14 @@ emit_shf(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum shf_op op, u32 rreg,
}
__emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
- reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
- u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both)
+ u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -480,13 +352,16 @@ __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
FIELD_PREP(OP_ALU_SW, swap) |
FIELD_PREP(OP_ALU_OP, op) |
FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
- FIELD_PREP(OP_ALU_WR_AB, wr_both);
+ FIELD_PREP(OP_ALU_WR_AB, wr_both) |
+ FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
-emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
+emit_alu(struct nfp_prog *nfp_prog, swreg dst,
+ swreg lreg, enum alu_op op, swreg rreg)
{
struct nfp_insn_ur_regs reg;
int err;
@@ -498,13 +373,15 @@ emit_alu(struct nfp_prog *nfp_prog, u32 dst, u32 lreg, enum alu_op op, u32 rreg)
}
__emit_alu(nfp_prog, reg.dst, reg.dst_ab,
- reg.areg, op, reg.breg, reg.swap, reg.wr_both);
+ reg.areg, op, reg.breg, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
- bool zero, bool swap, bool wr_both)
+ bool zero, bool swap, bool wr_both,
+ bool dst_lmextn, bool src_lmextn)
{
u64 insn;
@@ -517,35 +394,44 @@ __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
FIELD_PREP(OP_LDF_ZF, zero) |
FIELD_PREP(OP_LDF_BMASK, bmask) |
FIELD_PREP(OP_LDF_SHF, shift) |
- FIELD_PREP(OP_LDF_WR_AB, wr_both);
+ FIELD_PREP(OP_LDF_WR_AB, wr_both) |
+ FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
+ FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
nfp_prog_push(nfp_prog, insn);
}
static void
emit_ld_field_any(struct nfp_prog *nfp_prog, enum shf_sc sc, u8 shift,
- u32 dst, u8 bmask, u32 src, bool zero)
+ swreg dst, u8 bmask, swreg src, bool zero)
{
struct nfp_insn_re_regs reg;
int err;
- err = swreg_to_restricted(reg_none(), dst, src, &reg, true);
+ /* Note: ld_field is special as it uses one of the src regs as dst */
+ err = swreg_to_restricted(dst, dst, src, &reg, true);
if (err) {
nfp_prog->error = err;
return;
}
__emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
- reg.i8, zero, reg.swap, reg.wr_both);
+ reg.i8, zero, reg.swap, reg.wr_both,
+ reg.dst_lmextn, reg.src_lmextn);
}
static void
-emit_ld_field(struct nfp_prog *nfp_prog, u32 dst, u8 bmask, u32 src,
+emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
enum shf_sc sc, u8 shift)
{
emit_ld_field_any(nfp_prog, sc, shift, dst, bmask, src, false);
}
+static void emit_nop(struct nfp_prog *nfp_prog)
+{
+ __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
+}
+
/* --- Wrappers --- */
static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
{
@@ -565,7 +451,7 @@ static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
return true;
}
-static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
+static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
{
enum immed_shift shift;
u16 val;
@@ -586,7 +472,7 @@ static void wrp_immed(struct nfp_prog *nfp_prog, u32 dst, u32 imm)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(UR_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -599,7 +485,7 @@ static u32 ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
* If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding.
*/
-static u32 re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, u32 tmp_reg)
+static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
{
if (FIELD_FIT(RE_REG_IMM_MAX, imm))
return reg_imm(imm);
@@ -629,7 +515,7 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
{
unsigned int i;
u16 shift, sz;
- u32 tmp_reg;
+ swreg tmp_reg;
/* We load the value from the address indicated in @offset and then
* shift out the data we don't need. Note: this is big endian!
@@ -646,22 +532,22 @@ construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset,
emit_alu(nfp_prog, imm_a(nfp_prog),
imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, imm_a(nfp_prog));
+ plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
/* Load data */
emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
+ pptr_reg(nfp_prog), imm_b(nfp_prog), sz - 1, true);
} else {
/* Check packet length */
tmp_reg = ur_load_imm_any(nfp_prog, offset + size,
imm_a(nfp_prog));
emit_alu(nfp_prog, reg_none(),
- NFP_BPF_ABI_LEN, ALU_OP_SUB, tmp_reg);
+ plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
wrp_br_special(nfp_prog, BR_BLO, OP_BR_GO_ABORT);
/* Load data */
tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
- pkt_reg(nfp_prog), tmp_reg, sz - 1, true);
+ pptr_reg(nfp_prog), tmp_reg, sz - 1, true);
}
i = 0;
@@ -684,20 +570,10 @@ static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
return construct_data_ind_ld(nfp_prog, offset, 0, false, size);
}
-static int wrp_set_mark(struct nfp_prog *nfp_prog, u8 src)
-{
- emit_alu(nfp_prog, NFP_BPF_ABI_MARK,
- reg_none(), ALU_OP_NONE, reg_b(src));
- emit_alu(nfp_prog, NFP_BPF_ABI_FLAGS,
- NFP_BPF_ABI_FLAGS, ALU_OP_OR, reg_imm(NFP_BPF_ABI_FLAG_MARK));
-
- return 0;
-}
-
static void
wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
{
- u32 tmp_reg;
+ swreg tmp_reg;
if (alu_op == ALU_OP_AND) {
if (!imm)
@@ -815,7 +691,7 @@ wrp_cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
u8 reg = insn->dst_reg * 2;
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -967,12 +843,24 @@ static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
-
- if (insn->imm != 32)
- return 1; /* TODO */
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2 + 1, insn->dst_reg * 2);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), 0);
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, 32 - insn->imm);
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst + 1, dst);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_L_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst), 0);
+ }
return 0;
}
@@ -980,12 +868,24 @@ static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
-
- if (insn->imm != 32)
- return 1; /* TODO */
-
- wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->dst_reg * 2 + 1);
- wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
+ u8 dst = insn->dst_reg * 2;
+
+ if (insn->imm < 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_a(dst + 1), SHF_OP_NONE, reg_b(dst),
+ SHF_SC_R_DSHF, insn->imm);
+ emit_shf(nfp_prog, reg_both(dst + 1),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm);
+ } else if (insn->imm == 32) {
+ wrp_reg_mov(nfp_prog, dst, dst + 1);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ } else if (insn->imm > 32) {
+ emit_shf(nfp_prog, reg_both(dst),
+ reg_none(), SHF_OP_NONE, reg_b(dst + 1),
+ SHF_SC_R_SHF, insn->imm - 32);
+ wrp_immed(nfp_prog, reg_both(dst + 1), 0);
+ }
return 0;
}
@@ -1130,7 +1030,7 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (meta->insn.off == offsetof(struct sk_buff, len))
emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_LEN);
+ reg_none(), ALU_OP_NONE, plen_reg(nfp_prog));
else
return -EOPNOTSUPP;
@@ -1139,18 +1039,18 @@ static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- u32 dst = reg_both(meta->insn.dst_reg * 2);
+ swreg dst = reg_both(meta->insn.dst_reg * 2);
if (meta->insn.off != offsetof(struct xdp_md, data) &&
meta->insn.off != offsetof(struct xdp_md, data_end))
return -EOPNOTSUPP;
- emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+ emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, pptr_reg(nfp_prog));
if (meta->insn.off == offsetof(struct xdp_md, data))
return 0;
- emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN);
+ emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, plen_reg(nfp_prog));
return 0;
}
@@ -1171,9 +1071,6 @@ static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
- if (meta->insn.off == offsetof(struct sk_buff, mark))
- return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
-
return -EOPNOTSUPP;
}
@@ -1202,8 +1099,10 @@ static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 or1 = reg_a(insn->dst_reg * 2), or2 = reg_b(insn->dst_reg * 2 + 1);
- u32 tmp_reg;
+ swreg or1, or2, tmp_reg;
+
+ or1 = reg_a(insn->dst_reg * 2);
+ or2 = reg_b(insn->dst_reg * 2 + 1);
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1252,7 +1151,7 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1283,7 +1182,7 @@ static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
const struct bpf_insn *insn = &meta->insn;
u64 imm = insn->imm; /* sign extend */
- u32 tmp_reg;
+ swreg tmp_reg;
if (insn->off < 0) /* TODO */
return -EOPNOTSUPP;
@@ -1510,8 +1409,9 @@ static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
static void nfp_intro(struct nfp_prog *nfp_prog)
{
- emit_alu(nfp_prog, pkt_reg(nfp_prog),
- reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
+ wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
+ emit_alu(nfp_prog, plen_reg(nfp_prog),
+ plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
}
static void nfp_outro_tc_legacy(struct nfp_prog *nfp_prog)
@@ -1656,7 +1556,7 @@ static void nfp_outro(struct nfp_prog *nfp_prog)
static int nfp_translate(struct nfp_prog *nfp_prog)
{
struct nfp_insn_meta *meta;
- int err;
+ int i, err;
nfp_intro(nfp_prog);
if (nfp_prog->error)
@@ -1688,6 +1588,11 @@ static int nfp_translate(struct nfp_prog *nfp_prog)
if (nfp_prog->error)
return nfp_prog->error;
+ for (i = 0; i < NFP_USTORE_PREFETCH_WINDOW; i++)
+ emit_nop(nfp_prog);
+ if (nfp_prog->error)
+ return nfp_prog->error;
+
return nfp_fixup_branches(nfp_prog);
}
@@ -1737,38 +1642,6 @@ static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
}
}
-/* Try to rename registers so that program uses only low ones */
-static int nfp_bpf_opt_reg_rename(struct nfp_prog *nfp_prog)
-{
- bool reg_used[MAX_BPF_REG] = {};
- u8 tgt_reg[MAX_BPF_REG] = {};
- struct nfp_insn_meta *meta;
- unsigned int i, j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- if (meta->skip)
- continue;
-
- reg_used[meta->insn.src_reg] = true;
- reg_used[meta->insn.dst_reg] = true;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(tgt_reg); i++) {
- if (!reg_used[i])
- continue;
-
- tgt_reg[i] = j++;
- }
- nfp_prog->num_regs = j;
-
- list_for_each_entry(meta, &nfp_prog->insns, l) {
- meta->insn.src_reg = tgt_reg[meta->insn.src_reg];
- meta->insn.dst_reg = tgt_reg[meta->insn.dst_reg];
- }
-
- return 0;
-}
-
/* Remove masking after load since our load guarantees this is not needed */
static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
{
@@ -1845,20 +1718,33 @@ static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
{
- int ret;
-
nfp_bpf_opt_reg_init(nfp_prog);
- ret = nfp_bpf_opt_reg_rename(nfp_prog);
- if (ret)
- return ret;
-
nfp_bpf_opt_ld_mask(nfp_prog);
nfp_bpf_opt_ld_shift(nfp_prog);
return 0;
}
+static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
+{
+ int i;
+
+ for (i = 0; i < nfp_prog->prog_len; i++) {
+ int err;
+
+ err = nfp_ustore_check_valid_no_ecc(nfp_prog->prog[i]);
+ if (err)
+ return err;
+
+ nfp_prog->prog[i] = nfp_ustore_calc_ecc_insn(nfp_prog->prog[i]);
+
+ ustore[i] = cpu_to_le64(nfp_prog->prog[i]);
+ }
+
+ return 0;
+}
+
/**
* nfp_bpf_jit() - translate BPF code into NFP assembly
* @filter: kernel BPF filter struct
@@ -1899,10 +1785,8 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
if (ret)
goto out;
- if (nfp_prog->num_regs <= 7)
- nfp_prog->regs_per_thread = 16;
- else
- nfp_prog->regs_per_thread = 32;
+ nfp_prog->num_regs = MAX_BPF_REG;
+ nfp_prog->regs_per_thread = 32;
nfp_prog->prog = prog_mem;
nfp_prog->__prog_alloc_len = prog_sz;
@@ -1912,10 +1796,13 @@ nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
pr_err("Translation failed with error %d (translated: %u)\n",
ret, nfp_prog->n_translated);
ret = -EINVAL;
+ goto out;
}
+ ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem);
+
res->n_instr = nfp_prog->prog_len;
- res->dense_mode = nfp_prog->num_regs <= 7;
+ res->dense_mode = false;
out:
nfp_prog_free(nfp_prog);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index be2cf10a2cd7..074726980994 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -89,14 +89,6 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
struct nfp_net_bpf_priv *priv;
int ret;
- /* Limit to single port, otherwise it's just a NIC */
- if (id > 0) {
- nfp_warn(app->cpp,
- "BPF NIC doesn't support more than one port right now\n");
- nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
- return PTR_ERR_OR_ZERO(nn->port);
- }
-
priv = kmalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 4051e943f363..b7a112acbdb7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -39,6 +39,7 @@
#include <linux/list.h>
#include <linux/types.h>
+#include "../nfp_asm.h"
#include "../nfp_net.h"
/* For branch fixup logic use up-most byte of branch instruction as scratch
@@ -53,9 +54,13 @@ enum br_special {
};
enum static_regs {
- STATIC_REG_PKT = 1,
-#define REG_PKT_BANK ALU_DST_A
- STATIC_REG_IMM = 2, /* Bank AB */
+ STATIC_REG_IMM = 21, /* Bank AB */
+ STATIC_REG_PKT_LEN = 22, /* Bank B */
+};
+
+enum pkt_vec {
+ PKT_VEC_PKT_LEN = 0,
+ PKT_VEC_PKT_PTR = 2,
};
enum nfp_bpf_action_type {
@@ -65,39 +70,17 @@ enum nfp_bpf_action_type {
NN_ACT_XDP,
};
-/* Software register representation, hardware encoding in asm.h */
-#define NN_REG_TYPE GENMASK(31, 24)
-#define NN_REG_VAL GENMASK(7, 0)
-
-enum nfp_bpf_reg_type {
- NN_REG_GPR_A = BIT(0),
- NN_REG_GPR_B = BIT(1),
- NN_REG_NNR = BIT(2),
- NN_REG_XFER = BIT(3),
- NN_REG_IMM = BIT(4),
- NN_REG_NONE = BIT(5),
-};
-
-#define NN_REG_GPR_BOTH (NN_REG_GPR_A | NN_REG_GPR_B)
-
-#define reg_both(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_BOTH))
-#define reg_a(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_A))
-#define reg_b(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_GPR_B))
-#define reg_nnr(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_NNR))
-#define reg_xfer(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_XFER))
-#define reg_imm(x) ((x) | FIELD_PREP(NN_REG_TYPE, NN_REG_IMM))
-#define reg_none() (FIELD_PREP(NN_REG_TYPE, NN_REG_NONE))
+#define pv_len(np) reg_lm(1, PKT_VEC_PKT_LEN)
+#define pv_ctm_ptr(np) reg_lm(1, PKT_VEC_PKT_PTR)
-#define pkt_reg(np) reg_a((np)->regs_per_thread - STATIC_REG_PKT)
-#define imm_a(np) reg_a((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_b(np) reg_b((np)->regs_per_thread - STATIC_REG_IMM)
-#define imm_both(np) reg_both((np)->regs_per_thread - STATIC_REG_IMM)
+#define plen_reg(np) reg_b(STATIC_REG_PKT_LEN)
+#define pptr_reg(np) pv_ctm_ptr(np)
+#define imm_a(np) reg_a(STATIC_REG_IMM)
+#define imm_b(np) reg_b(STATIC_REG_IMM)
+#define imm_both(np) reg_both(STATIC_REG_IMM)
-#define NFP_BPF_ABI_FLAGS reg_nnr(0)
+#define NFP_BPF_ABI_FLAGS reg_imm(0)
#define NFP_BPF_ABI_FLAG_MARK 1
-#define NFP_BPF_ABI_MARK reg_nnr(1)
-#define NFP_BPF_ABI_PKT reg_nnr(2)
-#define NFP_BPF_ABI_LEN reg_nnr(3)
struct nfp_prog;
struct nfp_insn_meta;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 38f3835ae176..1194c47ef827 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -36,6 +36,7 @@
#include <net/switchdev.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_vlan.h>
#include <net/tc_act/tc_tunnel_key.h>
@@ -223,6 +224,247 @@ nfp_fl_set_vxlan(struct nfp_fl_set_vxlan *set_vxlan,
return 0;
}
+static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
+{
+ u32 oldvalue = get_unaligned((u32 *)p_exact);
+ u32 oldmask = get_unaligned((u32 *)p_mask);
+
+ value &= mask;
+ value |= oldvalue & ~mask;
+
+ put_unaligned(oldmask | mask, (u32 *)p_mask);
+ put_unaligned(value, (u32 *)p_exact);
+}
+
+static int
+nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_eth *set_eth)
+{
+ u16 tmp_set_eth_op;
+ u32 exact, mask;
+
+ if (off + 4 > ETH_ALEN * 2)
+ return -EOPNOTSUPP;
+
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
+ &set_eth->eth_addr_mask[off]);
+
+ set_eth->reserved = cpu_to_be16(0);
+ tmp_set_eth_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
+ sizeof(*set_eth) >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID,
+ NFP_FL_ACTION_OPCODE_SET_ETHERNET);
+ set_eth->a_op = cpu_to_be16(tmp_set_eth_op);
+
+ return 0;
+}
+
+static int
+nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ip4_addrs *set_ip_addr)
+{
+ u16 tmp_set_ipv4_op;
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ switch (off) {
+ case offsetof(struct iphdr, daddr):
+ set_ip_addr->ipv4_dst_mask = mask;
+ set_ip_addr->ipv4_dst = exact;
+ break;
+ case offsetof(struct iphdr, saddr):
+ set_ip_addr->ipv4_src_mask = mask;
+ set_ip_addr->ipv4_src = exact;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ set_ip_addr->reserved = cpu_to_be16(0);
+ tmp_set_ipv4_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
+ sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID,
+ NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS);
+ set_ip_addr->a_op = cpu_to_be16(tmp_set_ipv4_op);
+
+ return 0;
+}
+
+static void
+nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_addr *ip6)
+{
+ u16 tmp_set_op;
+
+ ip6->ipv6[idx % 4].mask = mask;
+ ip6->ipv6[idx % 4].exact = exact;
+
+ ip6->reserved = cpu_to_be16(0);
+ tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW, sizeof(*ip6) >>
+ NFP_FL_LW_SIZ) |
+ FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode_tag);
+ ip6->a_op = cpu_to_be16(tmp_set_op);
+}
+
+static int
+nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_ipv6_addr *ip_dst,
+ struct nfp_fl_set_ipv6_addr *ip_src)
+{
+ __be32 exact, mask;
+
+ /* We are expecting tcf_pedit to return a big endian value */
+ mask = (__force __be32)~tcf_pedit_mask(action, idx);
+ exact = (__force __be32)tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ if (off < offsetof(struct ipv6hdr, saddr))
+ return -EOPNOTSUPP;
+ else if (off < offsetof(struct ipv6hdr, daddr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
+ exact, mask, ip_src);
+ else if (off < offsetof(struct ipv6hdr, daddr) +
+ sizeof(struct in6_addr))
+ nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
+ exact, mask, ip_dst);
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int
+nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
+ struct nfp_fl_set_tport *set_tport, int opcode)
+{
+ u32 exact, mask;
+ u16 tmp_set_op;
+
+ if (off)
+ return -EOPNOTSUPP;
+
+ mask = ~tcf_pedit_mask(action, idx);
+ exact = tcf_pedit_val(action, idx);
+
+ if (exact & ~mask)
+ return -EOPNOTSUPP;
+
+ nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
+ set_tport->tp_port_mask);
+
+ set_tport->reserved = cpu_to_be16(0);
+ tmp_set_op = FIELD_PREP(NFP_FL_ACT_LEN_LW,
+ sizeof(*set_tport) >> NFP_FL_LW_SIZ);
+ tmp_set_op |= FIELD_PREP(NFP_FL_ACT_JMP_ID, opcode);
+ set_tport->a_op = cpu_to_be16(tmp_set_op);
+
+ return 0;
+}
+
+static int
+nfp_fl_pedit(const struct tc_action *action, char *nfp_action, int *a_len)
+{
+ struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ip4_addrs set_ip_addr;
+ struct nfp_fl_set_tport set_tport;
+ struct nfp_fl_set_eth set_eth;
+ enum pedit_header_type htype;
+ int idx, nkeys, err;
+ size_t act_size;
+ u32 offset, cmd;
+
+ memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
+ memset(&set_ip6_src, 0, sizeof(set_ip6_src));
+ memset(&set_ip_addr, 0, sizeof(set_ip_addr));
+ memset(&set_tport, 0, sizeof(set_tport));
+ memset(&set_eth, 0, sizeof(set_eth));
+ nkeys = tcf_pedit_nkeys(action);
+
+ for (idx = 0; idx < nkeys; idx++) {
+ cmd = tcf_pedit_cmd(action, idx);
+ htype = tcf_pedit_htype(action, idx);
+ offset = tcf_pedit_offset(action, idx);
+
+ if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
+ return -EOPNOTSUPP;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ err = nfp_fl_set_eth(action, idx, offset, &set_eth);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
+ &set_ip6_src);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ break;
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ err = nfp_fl_set_tport(action, idx, offset, &set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ if (err)
+ return err;
+ }
+
+ if (set_eth.a_op) {
+ act_size = sizeof(set_eth);
+ memcpy(nfp_action, &set_eth, act_size);
+ *a_len += act_size;
+ } else if (set_ip_addr.a_op) {
+ act_size = sizeof(set_ip_addr);
+ memcpy(nfp_action, &set_ip_addr, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.a_op && set_ip6_src.a_op) {
+ /* TC compiles set src and dst IPv6 address as a single action,
+ * the hardware requires this to be 2 separate actions.
+ */
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+
+ act_size = sizeof(set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
+ act_size);
+ *a_len += act_size;
+ } else if (set_ip6_dst.a_op) {
+ act_size = sizeof(set_ip6_dst);
+ memcpy(nfp_action, &set_ip6_dst, act_size);
+ *a_len += act_size;
+ } else if (set_ip6_src.a_op) {
+ act_size = sizeof(set_ip6_src);
+ memcpy(nfp_action, &set_ip6_src, act_size);
+ *a_len += act_size;
+ } else if (set_tport.a_op) {
+ act_size = sizeof(set_tport);
+ memcpy(nfp_action, &set_tport, act_size);
+ *a_len += act_size;
+ }
+
+ return 0;
+}
+
static int
nfp_flower_loop_action(const struct tc_action *a,
struct nfp_fl_payload *nfp_fl, int *a_len,
@@ -301,6 +543,9 @@ nfp_flower_loop_action(const struct tc_action *a,
} else if (is_tcf_tunnel_release(a)) {
/* Tunnel decap is handled by default so accept action. */
return 0;
+ } else if (is_tcf_pedit(a)) {
+ if (nfp_fl_pedit(a, &nfp_fl->action_data[*a_len], a_len))
+ return -EOPNOTSUPP;
} else {
/* Currently we do not handle any other actions. */
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 504ddaa21701..f7b7242a22bc 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -57,6 +57,11 @@
#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
+#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
+#define NFP_FLOWER_MASK_MPLS_TC GENMASK(11, 9)
+#define NFP_FLOWER_MASK_MPLS_BOS BIT(8)
+#define NFP_FLOWER_MASK_MPLS_Q BIT(0)
+
#define NFP_FL_SC_ACT_DROP 0x80000000
#define NFP_FL_SC_ACT_USER 0x7D000000
#define NFP_FL_SC_ACT_POPV 0x6A000000
@@ -72,6 +77,12 @@
#define NFP_FL_ACTION_OPCODE_PUSH_VLAN 1
#define NFP_FL_ACTION_OPCODE_POP_VLAN 2
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
+#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_UDP 14
+#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_TUNNEL 17
#define NFP_FL_ACTION_OPCODE_NUM 32
@@ -102,6 +113,38 @@ enum nfp_flower_tun_type {
NFP_FL_TUNNEL_VXLAN = 2,
};
+struct nfp_fl_set_eth {
+ __be16 a_op;
+ __be16 reserved;
+ u8 eth_addr_mask[ETH_ALEN * 2];
+ u8 eth_addr_val[ETH_ALEN * 2];
+};
+
+struct nfp_fl_set_ip4_addrs {
+ __be16 a_op;
+ __be16 reserved;
+ __be32 ipv4_src_mask;
+ __be32 ipv4_src;
+ __be32 ipv4_dst_mask;
+ __be32 ipv4_dst;
+};
+
+struct nfp_fl_set_ipv6_addr {
+ __be16 a_op;
+ __be16 reserved;
+ struct {
+ __be32 mask;
+ __be32 exact;
+ } ipv6[4];
+};
+
+struct nfp_fl_set_tport {
+ __be16 a_op;
+ __be16 reserved;
+ u8 tp_port_mask[4];
+ u8 tp_port_val[4];
+};
+
struct nfp_fl_output {
__be16 a_op;
__be16 flags;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 865a815ab92a..60614d4f0e22 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -111,8 +111,21 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
ether_addr_copy(frame->mac_src, &addr->src[0]);
}
- if (mask_version)
- frame->mpls_lse = cpu_to_be32(~0);
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_dissector_key_mpls *mpls;
+ u32 t_mpls;
+
+ mpls = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_MPLS,
+ target);
+
+ t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, mpls->mpls_label) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, mpls->mpls_tc) |
+ FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, mpls->mpls_bos) |
+ NFP_FLOWER_MASK_MPLS_Q;
+
+ frame->mpls_lse = cpu_to_be32(t_mpls);
+ }
}
static void
@@ -143,7 +156,6 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
struct flow_dissector_key_ipv4_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv4));
if (dissector_uses_key(flow->dissector,
@@ -161,6 +173,16 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
}
static void
@@ -172,7 +194,6 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
struct flow_dissector_key_ipv6_addrs *addr;
struct flow_dissector_key_basic *basic;
- /* Wildcard LABEL/TOS/TTL for now. */
memset(frame, 0, sizeof(struct nfp_flower_ipv6));
if (dissector_uses_key(flow->dissector,
@@ -190,6 +211,16 @@ nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *frame,
target);
frame->proto = basic->ip_proto;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_dissector_key_ip *flow_ip;
+
+ flow_ip = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_IP,
+ target);
+ frame->tos = flow_ip->tos;
+ frame->ttl = flow_ip->ttl;
+ }
}
static void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 3d9537ebdea4..6f239c27964e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -57,6 +57,7 @@
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
+ BIT(FLOW_DISSECTOR_KEY_MPLS) | \
BIT(FLOW_DISSECTOR_KEY_IP))
#define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
@@ -134,7 +135,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
{
struct flow_dissector_key_basic *mask_basic = NULL;
struct flow_dissector_key_basic *key_basic = NULL;
- struct flow_dissector_key_ip *mask_ip = NULL;
u32 key_layer_two;
u8 key_layer;
int key_size;
@@ -206,28 +206,15 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
flow->key);
}
- if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_IP))
- mask_ip = skb_flow_dissector_target(flow->dissector,
- FLOW_DISSECTOR_KEY_IP,
- flow->mask);
-
if (mask_basic && mask_basic->n_proto) {
/* Ethernet type is present in the key. */
switch (key_basic->n_proto) {
case cpu_to_be16(ETH_P_IP):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV4;
key_size += sizeof(struct nfp_flower_ipv4);
break;
case cpu_to_be16(ETH_P_IPV6):
- if (mask_ip && mask_ip->tos)
- return -EOPNOTSUPP;
- if (mask_ip && mask_ip->ttl)
- return -EOPNOTSUPP;
key_layer |= NFP_FLOWER_LAYER_IPV6;
key_size += sizeof(struct nfp_flower_ipv6);
break;
@@ -238,11 +225,6 @@ nfp_flower_calculate_key_layers(struct nfp_fl_key_ls *ret_key_ls,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
- /* Currently we do not offload MPLS. */
- case cpu_to_be16(ETH_P_MPLS_UC):
- case cpu_to_be16(ETH_P_MPLS_MC):
- return -EOPNOTSUPP;
-
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index af640b5c2108..857bb33020ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -36,6 +36,8 @@
#include <net/devlink.h>
+#include <trace/events/devlink.h>
+
#include "nfp_net_repr.h"
struct bpf_prog;
@@ -271,11 +273,17 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
+ skb->data, skb->len);
+
return nfp_ctrl_tx(app->ctrl, skb);
}
static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
{
+ trace_devlink_hwmsg(priv_to_devlink(app->pf), true, 0,
+ skb->data, skb->len);
+
app->type->ctrl_msg_rx(app, skb);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
new file mode 100644
index 000000000000..de76e7444fc2
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2016-2017 Netronome Systems, Inc.
+ *
+ * This software is dual licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree or the BSD 2-Clause License provided below. You have the
+ * option to license this software under the complete terms of either license.
+ *
+ * The BSD 2-Clause License:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include "nfp_asm.h"
+
+const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
+ [CMD_TGT_WRITE8] = { 0x00, 0x42 },
+ [CMD_TGT_READ8] = { 0x01, 0x43 },
+ [CMD_TGT_READ_LE] = { 0x01, 0x40 },
+ [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
+};
+
+static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
+{
+ bool lm_id, lm_dec = false;
+ u16 val = swreg_value(reg);
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_NNR:
+ return UR_REG_NN | val;
+ case NN_REG_XFER:
+ return UR_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ switch (swreg_lm_mode(reg)) {
+ case NN_LM_MOD_NONE:
+ if (val & ~UR_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+ return UR_REG_LM | FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ val;
+ case NN_LM_MOD_DEC:
+ lm_dec = true;
+ /* fall through */
+ case NN_LM_MOD_INC:
+ if (val) {
+ pr_err("LM offset in inc/dev mode\n");
+ return 0;
+ }
+ return UR_REG_LM | UR_REG_LM_POST_MOD |
+ FIELD_PREP(UR_REG_LM_IDX, lm_id) |
+ FIELD_PREP(UR_REG_LM_POST_MOD_DEC, lm_dec);
+ default:
+ pr_err("bad LM mode for unrestricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+ case NN_REG_IMM:
+ if (val & ~0xff) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ return UR_REG_IMM_encode(val);
+ case NN_REG_NONE:
+ return is_dst ? UR_REG_NO_DST : REG_NONE;
+ }
+
+ pr_err("unrecognized reg encoding %08x\n", reg);
+ return 0;
+}
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_unreg(dst, true);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_unreg(rreg, false);
+ reg->breg = nfp_swreg_to_unreg(lreg, false);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_unreg(lreg, false);
+ reg->breg = nfp_swreg_to_unreg(rreg, false);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+static u16 nfp_swreg_to_rereg(swreg reg, bool is_dst, bool has_imm8, bool *i8)
+{
+ u16 val = swreg_value(reg);
+ bool lm_id;
+
+ switch (swreg_type(reg)) {
+ case NN_REG_GPR_A:
+ case NN_REG_GPR_B:
+ case NN_REG_GPR_BOTH:
+ return val;
+ case NN_REG_XFER:
+ return RE_REG_XFR | val;
+ case NN_REG_LMEM:
+ lm_id = swreg_lm_idx(reg);
+
+ if (swreg_lm_mode(reg) != NN_LM_MOD_NONE) {
+ pr_err("bad LM mode for restricted operands %d\n",
+ swreg_lm_mode(reg));
+ return 0;
+ }
+
+ if (val & ~RE_REG_LM_IDX_MAX) {
+ pr_err("LM offset too large\n");
+ return 0;
+ }
+
+ return RE_REG_LM | FIELD_PREP(RE_REG_LM_IDX, lm_id) | val;
+ case NN_REG_IMM:
+ if (val & ~(0x7f | has_imm8 << 7)) {
+ pr_err("immediate too large\n");
+ return 0;
+ }
+ *i8 = val & 0x80;
+ return RE_REG_IMM_encode(val & 0x7f);
+ case NN_REG_NONE:
+ return is_dst ? RE_REG_NO_DST : REG_NONE;
+ case NN_REG_NNR:
+ pr_err("NNRs used with restricted encoding\n");
+ return 0;
+ }
+
+ pr_err("unrecognized reg encoding\n");
+ return 0;
+}
+
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8)
+{
+ memset(reg, 0, sizeof(*reg));
+
+ /* Decode destination */
+ if (swreg_type(dst) == NN_REG_IMM)
+ return -EFAULT;
+
+ if (swreg_type(dst) == NN_REG_GPR_B)
+ reg->dst_ab = ALU_DST_B;
+ if (swreg_type(dst) == NN_REG_GPR_BOTH)
+ reg->wr_both = true;
+ reg->dst = nfp_swreg_to_rereg(dst, true, false, NULL);
+
+ /* Decode source operands */
+ if (swreg_type(lreg) == swreg_type(rreg))
+ return -EFAULT;
+
+ if (swreg_type(lreg) == NN_REG_GPR_B ||
+ swreg_type(rreg) == NN_REG_GPR_A) {
+ reg->areg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->swap = true;
+ } else {
+ reg->areg = nfp_swreg_to_rereg(lreg, false, has_imm8, &reg->i8);
+ reg->breg = nfp_swreg_to_rereg(rreg, false, has_imm8, &reg->i8);
+ }
+
+ reg->dst_lmextn = swreg_lmextn(dst);
+ reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+
+ return 0;
+}
+
+#define NFP_USTORE_ECC_POLY_WORDS 7
+#define NFP_USTORE_OP_BITS 45
+
+static const u64 nfp_ustore_ecc_polynomials[NFP_USTORE_ECC_POLY_WORDS] = {
+ 0x0ff800007fffULL,
+ 0x11f801ff801fULL,
+ 0x1e387e0781e1ULL,
+ 0x17cb8e388e22ULL,
+ 0x1af5b2c93244ULL,
+ 0x1f56d5525488ULL,
+ 0x0daf69a46910ULL,
+};
+
+static bool parity(u64 value)
+{
+ return hweight64(value) & 1;
+}
+
+int nfp_ustore_check_valid_no_ecc(u64 insn)
+{
+ if (insn & ~GENMASK_ULL(NFP_USTORE_OP_BITS, 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 nfp_ustore_calc_ecc_insn(u64 insn)
+{
+ u8 ecc = 0;
+ int i;
+
+ for (i = 0; i < NFP_USTORE_ECC_POLY_WORDS; i++)
+ ecc |= parity(nfp_ustore_ecc_polynomials[i] & insn) << i;
+
+ return insn | (u64)ecc << NFP_USTORE_OP_BITS;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.h b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
index d2b535739d2b..c4c18dd5630a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.h
@@ -34,6 +34,7 @@
#ifndef __NFP_ASM_H__
#define __NFP_ASM_H__ 1
+#include <linux/bitfield.h>
#include <linux/types.h>
#define REG_NONE 0
@@ -43,23 +44,31 @@
#define RE_REG_IMM_encode(x) \
(RE_REG_IMM | ((x) & 0x1f) | (((x) & 0x60) << 1))
#define RE_REG_IMM_MAX 0x07fULL
+#define RE_REG_LM 0x050
+#define RE_REG_LM_IDX 0x008
+#define RE_REG_LM_IDX_MAX 0x7
#define RE_REG_XFR 0x080
#define UR_REG_XFR 0x180
+#define UR_REG_LM 0x200
+#define UR_REG_LM_IDX 0x020
+#define UR_REG_LM_POST_MOD 0x010
+#define UR_REG_LM_POST_MOD_DEC 0x001
+#define UR_REG_LM_IDX_MAX 0xf
#define UR_REG_NN 0x280
#define UR_REG_NO_DST 0x300
#define UR_REG_IMM UR_REG_NO_DST
#define UR_REG_IMM_encode(x) (UR_REG_IMM | (x))
#define UR_REG_IMM_MAX 0x0ffULL
-#define OP_BR_BASE 0x0d800000020ULL
-#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
-#define OP_BR_MASK 0x0000000001fULL
-#define OP_BR_EV_PIP 0x00000000300ULL
-#define OP_BR_CSS 0x0000003c000ULL
-#define OP_BR_DEFBR 0x00000300000ULL
-#define OP_BR_ADDR_LO 0x007ffc00000ULL
-#define OP_BR_ADDR_HI 0x10000000000ULL
+#define OP_BR_BASE 0x0d800000020ULL
+#define OP_BR_BASE_MASK 0x0f8000c3ce0ULL
+#define OP_BR_MASK 0x0000000001fULL
+#define OP_BR_EV_PIP 0x00000000300ULL
+#define OP_BR_CSS 0x0000003c000ULL
+#define OP_BR_DEFBR 0x00000300000ULL
+#define OP_BR_ADDR_LO 0x007ffc00000ULL
+#define OP_BR_ADDR_HI 0x10000000000ULL
#define nfp_is_br(_insn) \
(((_insn) & OP_BR_BASE_MASK) == OP_BR_BASE)
@@ -82,30 +91,33 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2,
};
-#define OP_BBYTE_BASE 0x0c800000000ULL
-#define OP_BB_A_SRC 0x000000000ffULL
-#define OP_BB_BYTE 0x00000000300ULL
-#define OP_BB_B_SRC 0x0000003fc00ULL
-#define OP_BB_I8 0x00000040000ULL
-#define OP_BB_EQ 0x00000080000ULL
-#define OP_BB_DEFBR 0x00000300000ULL
-#define OP_BB_ADDR_LO 0x007ffc00000ULL
-#define OP_BB_ADDR_HI 0x10000000000ULL
-
-#define OP_BALU_BASE 0x0e800000000ULL
-#define OP_BA_A_SRC 0x000000003ffULL
-#define OP_BA_B_SRC 0x000000ffc00ULL
-#define OP_BA_DEFBR 0x00000300000ULL
-#define OP_BA_ADDR_HI 0x0007fc00000ULL
-
-#define OP_IMMED_A_SRC 0x000000003ffULL
-#define OP_IMMED_B_SRC 0x000000ffc00ULL
-#define OP_IMMED_IMM 0x0000ff00000ULL
-#define OP_IMMED_WIDTH 0x00060000000ULL
-#define OP_IMMED_INV 0x00080000000ULL
-#define OP_IMMED_SHIFT 0x00600000000ULL
-#define OP_IMMED_BASE 0x0f000000000ULL
-#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_BBYTE_BASE 0x0c800000000ULL
+#define OP_BB_A_SRC 0x000000000ffULL
+#define OP_BB_BYTE 0x00000000300ULL
+#define OP_BB_B_SRC 0x0000003fc00ULL
+#define OP_BB_I8 0x00000040000ULL
+#define OP_BB_EQ 0x00000080000ULL
+#define OP_BB_DEFBR 0x00000300000ULL
+#define OP_BB_ADDR_LO 0x007ffc00000ULL
+#define OP_BB_ADDR_HI 0x10000000000ULL
+#define OP_BB_SRC_LMEXTN 0x40000000000ULL
+
+#define OP_BALU_BASE 0x0e800000000ULL
+#define OP_BA_A_SRC 0x000000003ffULL
+#define OP_BA_B_SRC 0x000000ffc00ULL
+#define OP_BA_DEFBR 0x00000300000ULL
+#define OP_BA_ADDR_HI 0x0007fc00000ULL
+
+#define OP_IMMED_A_SRC 0x000000003ffULL
+#define OP_IMMED_B_SRC 0x000000ffc00ULL
+#define OP_IMMED_IMM 0x0000ff00000ULL
+#define OP_IMMED_WIDTH 0x00060000000ULL
+#define OP_IMMED_INV 0x00080000000ULL
+#define OP_IMMED_SHIFT 0x00600000000ULL
+#define OP_IMMED_BASE 0x0f000000000ULL
+#define OP_IMMED_WR_AB 0x20000000000ULL
+#define OP_IMMED_SRC_LMEXTN 0x40000000000ULL
+#define OP_IMMED_DST_LMEXTN 0x80000000000ULL
enum immed_width {
IMMED_WIDTH_ALL = 0,
@@ -119,17 +131,19 @@ enum immed_shift {
IMMED_SHIFT_2B = 2,
};
-#define OP_SHF_BASE 0x08000000000ULL
-#define OP_SHF_A_SRC 0x000000000ffULL
-#define OP_SHF_SC 0x00000000300ULL
-#define OP_SHF_B_SRC 0x0000003fc00ULL
-#define OP_SHF_I8 0x00000040000ULL
-#define OP_SHF_SW 0x00000080000ULL
-#define OP_SHF_DST 0x0000ff00000ULL
-#define OP_SHF_SHIFT 0x001f0000000ULL
-#define OP_SHF_OP 0x00e00000000ULL
-#define OP_SHF_DST_AB 0x01000000000ULL
-#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_BASE 0x08000000000ULL
+#define OP_SHF_A_SRC 0x000000000ffULL
+#define OP_SHF_SC 0x00000000300ULL
+#define OP_SHF_B_SRC 0x0000003fc00ULL
+#define OP_SHF_I8 0x00000040000ULL
+#define OP_SHF_SW 0x00000080000ULL
+#define OP_SHF_DST 0x0000ff00000ULL
+#define OP_SHF_SHIFT 0x001f0000000ULL
+#define OP_SHF_OP 0x00e00000000ULL
+#define OP_SHF_DST_AB 0x01000000000ULL
+#define OP_SHF_WR_AB 0x20000000000ULL
+#define OP_SHF_SRC_LMEXTN 0x40000000000ULL
+#define OP_SHF_DST_LMEXTN 0x80000000000ULL
enum shf_op {
SHF_OP_NONE = 0,
@@ -144,14 +158,16 @@ enum shf_sc {
SHF_SC_R_DSHF = 3,
};
-#define OP_ALU_A_SRC 0x000000003ffULL
-#define OP_ALU_B_SRC 0x000000ffc00ULL
-#define OP_ALU_DST 0x0003ff00000ULL
-#define OP_ALU_SW 0x00040000000ULL
-#define OP_ALU_OP 0x00f80000000ULL
-#define OP_ALU_DST_AB 0x01000000000ULL
-#define OP_ALU_BASE 0x0a000000000ULL
-#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_A_SRC 0x000000003ffULL
+#define OP_ALU_B_SRC 0x000000ffc00ULL
+#define OP_ALU_DST 0x0003ff00000ULL
+#define OP_ALU_SW 0x00040000000ULL
+#define OP_ALU_OP 0x00f80000000ULL
+#define OP_ALU_DST_AB 0x01000000000ULL
+#define OP_ALU_BASE 0x0a000000000ULL
+#define OP_ALU_WR_AB 0x20000000000ULL
+#define OP_ALU_SRC_LMEXTN 0x40000000000ULL
+#define OP_ALU_DST_LMEXTN 0x80000000000ULL
enum alu_op {
ALU_OP_NONE = 0x00,
@@ -170,26 +186,28 @@ enum alu_dst_ab {
ALU_DST_B = 1,
};
-#define OP_LDF_BASE 0x0c000000000ULL
-#define OP_LDF_A_SRC 0x000000000ffULL
-#define OP_LDF_SC 0x00000000300ULL
-#define OP_LDF_B_SRC 0x0000003fc00ULL
-#define OP_LDF_I8 0x00000040000ULL
-#define OP_LDF_SW 0x00000080000ULL
-#define OP_LDF_ZF 0x00000100000ULL
-#define OP_LDF_BMASK 0x0000f000000ULL
-#define OP_LDF_SHF 0x001f0000000ULL
-#define OP_LDF_WR_AB 0x20000000000ULL
-
-#define OP_CMD_A_SRC 0x000000000ffULL
-#define OP_CMD_CTX 0x00000000300ULL
-#define OP_CMD_B_SRC 0x0000003fc00ULL
-#define OP_CMD_TOKEN 0x000000c0000ULL
-#define OP_CMD_XFER 0x00001f00000ULL
-#define OP_CMD_CNT 0x0000e000000ULL
-#define OP_CMD_SIG 0x000f0000000ULL
-#define OP_CMD_TGT_CMD 0x07f00000000ULL
-#define OP_CMD_MODE 0x1c0000000000ULL
+#define OP_LDF_BASE 0x0c000000000ULL
+#define OP_LDF_A_SRC 0x000000000ffULL
+#define OP_LDF_SC 0x00000000300ULL
+#define OP_LDF_B_SRC 0x0000003fc00ULL
+#define OP_LDF_I8 0x00000040000ULL
+#define OP_LDF_SW 0x00000080000ULL
+#define OP_LDF_ZF 0x00000100000ULL
+#define OP_LDF_BMASK 0x0000f000000ULL
+#define OP_LDF_SHF 0x001f0000000ULL
+#define OP_LDF_WR_AB 0x20000000000ULL
+#define OP_LDF_SRC_LMEXTN 0x40000000000ULL
+#define OP_LDF_DST_LMEXTN 0x80000000000ULL
+
+#define OP_CMD_A_SRC 0x000000000ffULL
+#define OP_CMD_CTX 0x00000000300ULL
+#define OP_CMD_B_SRC 0x0000003fc00ULL
+#define OP_CMD_TOKEN 0x000000c0000ULL
+#define OP_CMD_XFER 0x00001f00000ULL
+#define OP_CMD_CNT 0x0000e000000ULL
+#define OP_CMD_SIG 0x000f0000000ULL
+#define OP_CMD_TGT_CMD 0x07f00000000ULL
+#define OP_CMD_MODE 0x1c0000000000ULL
struct cmd_tgt_act {
u8 token;
@@ -204,6 +222,8 @@ enum cmd_tgt_map {
__CMD_TGT_MAP_SIZE,
};
+extern const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE];
+
enum cmd_mode {
CMD_MODE_40b_AB = 0,
CMD_MODE_40b_BA = 1,
@@ -215,11 +235,13 @@ enum cmd_ctx_swap {
CMD_CTX_NO_SWAP = 3,
};
-#define OP_LCSR_BASE 0x0fc00000000ULL
-#define OP_LCSR_A_SRC 0x000000003ffULL
-#define OP_LCSR_B_SRC 0x000000ffc00ULL
-#define OP_LCSR_WRITE 0x00000200000ULL
-#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_BASE 0x0fc00000000ULL
+#define OP_LCSR_A_SRC 0x000000003ffULL
+#define OP_LCSR_B_SRC 0x000000ffc00ULL
+#define OP_LCSR_WRITE 0x00000200000ULL
+#define OP_LCSR_ADDR 0x001ffc00000ULL
+#define OP_LCSR_SRC_LMEXTN 0x40000000000ULL
+#define OP_LCSR_DST_LMEXTN 0x80000000000ULL
enum lcsr_wr_src {
LCSR_WR_AREG,
@@ -227,7 +249,122 @@ enum lcsr_wr_src {
LCSR_WR_IMM,
};
-#define OP_CARB_BASE 0x0e000000000ULL
-#define OP_CARB_OR 0x00000010000ULL
+#define OP_CARB_BASE 0x0e000000000ULL
+#define OP_CARB_OR 0x00000010000ULL
+
+/* Software register representation, independent of operand type */
+#define NN_REG_TYPE GENMASK(31, 24)
+#define NN_REG_LM_IDX GENMASK(23, 22)
+#define NN_REG_LM_IDX_HI BIT(23)
+#define NN_REG_LM_IDX_LO BIT(22)
+#define NN_REG_LM_MOD GENMASK(21, 20)
+#define NN_REG_VAL GENMASK(7, 0)
+
+enum nfp_bpf_reg_type {
+ NN_REG_GPR_A = BIT(0),
+ NN_REG_GPR_B = BIT(1),
+ NN_REG_GPR_BOTH = NN_REG_GPR_A | NN_REG_GPR_B,
+ NN_REG_NNR = BIT(2),
+ NN_REG_XFER = BIT(3),
+ NN_REG_IMM = BIT(4),
+ NN_REG_NONE = BIT(5),
+ NN_REG_LMEM = BIT(6),
+};
+
+enum nfp_bpf_lm_mode {
+ NN_LM_MOD_NONE = 0,
+ NN_LM_MOD_INC,
+ NN_LM_MOD_DEC,
+};
+
+#define reg_both(x) __enc_swreg((x), NN_REG_GPR_BOTH)
+#define reg_a(x) __enc_swreg((x), NN_REG_GPR_A)
+#define reg_b(x) __enc_swreg((x), NN_REG_GPR_B)
+#define reg_nnr(x) __enc_swreg((x), NN_REG_NNR)
+#define reg_xfer(x) __enc_swreg((x), NN_REG_XFER)
+#define reg_imm(x) __enc_swreg((x), NN_REG_IMM)
+#define reg_none() __enc_swreg(0, NN_REG_NONE)
+#define reg_lm(x, off) __enc_swreg_lm((x), NN_LM_MOD_NONE, (off))
+#define reg_lm_inc(x) __enc_swreg_lm((x), NN_LM_MOD_INC, 0)
+#define reg_lm_dec(x) __enc_swreg_lm((x), NN_LM_MOD_DEC, 0)
+#define __reg_lm(x, mod, off) __enc_swreg_lm((x), (mod), (off))
+
+typedef __u32 __bitwise swreg;
+
+static inline swreg __enc_swreg(u16 id, u8 type)
+{
+ return (__force swreg)(id | FIELD_PREP(NN_REG_TYPE, type));
+}
+
+static inline swreg __enc_swreg_lm(u8 id, enum nfp_bpf_lm_mode mode, u8 off)
+{
+ WARN_ON(id > 3 || (off && mode != NN_LM_MOD_NONE));
+
+ return (__force swreg)(FIELD_PREP(NN_REG_TYPE, NN_REG_LMEM) |
+ FIELD_PREP(NN_REG_LM_IDX, id) |
+ FIELD_PREP(NN_REG_LM_MOD, mode) |
+ off);
+}
+
+static inline u32 swreg_raw(swreg reg)
+{
+ return (__force u32)reg;
+}
+
+static inline enum nfp_bpf_reg_type swreg_type(swreg reg)
+{
+ return FIELD_GET(NN_REG_TYPE, swreg_raw(reg));
+}
+
+static inline u16 swreg_value(swreg reg)
+{
+ return FIELD_GET(NN_REG_VAL, swreg_raw(reg));
+}
+
+static inline bool swreg_lm_idx(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_LO, swreg_raw(reg));
+}
+
+static inline bool swreg_lmextn(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_IDX_HI, swreg_raw(reg));
+}
+
+static inline enum nfp_bpf_lm_mode swreg_lm_mode(swreg reg)
+{
+ return FIELD_GET(NN_REG_LM_MOD, swreg_raw(reg));
+}
+
+struct nfp_insn_ur_regs {
+ enum alu_dst_ab dst_ab;
+ u16 dst;
+ u16 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+struct nfp_insn_re_regs {
+ enum alu_dst_ab dst_ab;
+ u8 dst;
+ u8 areg, breg;
+ bool swap;
+ bool wr_both;
+ bool i8;
+ bool dst_lmextn;
+ bool src_lmextn;
+};
+
+int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_ur_regs *reg);
+int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
+ struct nfp_insn_re_regs *reg, bool has_imm8);
+
+#define NFP_USTORE_PREFETCH_WINDOW 8
+
+int nfp_ustore_check_valid_no_ecc(u64 insn);
+u64 nfp_ustore_calc_ecc_insn(u64 insn);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index b0a452ba9039..782d452e0fc2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -255,7 +255,7 @@
* @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/
#define NFP_NET_CFG_BPF_ABI 0x0080
-#define NFP_NET_BPF_ABI 1
+#define NFP_NET_BPF_ABI 2
#define NFP_NET_CFG_BPF_CAP 0x0081
#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
#define NFP_NET_CFG_BPF_MAX_LEN 0x0082
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 8f6ccc0c39e5..6e15d3c10ebf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -2308,7 +2308,7 @@ static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n",
app->selector, app->protocol, app->priority);
- if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
+ if (app->priority >= QED_MAX_PFC_PRIORITIES) {
DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 8fc9c811f6e3..b2b1f87864ef 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1415,7 +1415,12 @@ int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
+ kfree(iwarp_info->mpa_bufs);
+ kfree(iwarp_info->partial_fpdus);
+ kfree(iwarp_info->mpa_intermediate_buf);
}
int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
@@ -1713,6 +1718,569 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
return 0;
}
+static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
+ u16 cid)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_fpdu *partial_fpdu;
+ u32 idx;
+
+ idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
+ if (idx >= iwarp_info->max_num_partial_fpdus) {
+ DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
+ iwarp_info->max_num_partial_fpdus);
+ return NULL;
+ }
+
+ partial_fpdu = &iwarp_info->partial_fpdus[idx];
+
+ return partial_fpdu;
+}
+
+enum qed_iwarp_mpa_pkt_type {
+ QED_IWARP_MPA_PKT_PACKED,
+ QED_IWARP_MPA_PKT_PARTIAL,
+ QED_IWARP_MPA_PKT_UNALIGNED
+};
+
+#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
+#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
+#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
+
+/* Pad to multiple of 4 */
+#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
+#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \
+ (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \
+ QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
+ QED_IWARP_MPA_CRC32_DIGEST_SIZE)
+
+/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
+#define QED_IWARP_MAX_BDS_PER_FPDU 3
+
+char *pkt_type_str[] = {
+ "QED_IWARP_MPA_PKT_PACKED",
+ "QED_IWARP_MPA_PKT_PARTIAL",
+ "QED_IWARP_MPA_PKT_UNALIGNED"
+};
+
+static int
+qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct qed_iwarp_ll2_buff *buf);
+
+static enum qed_iwarp_mpa_pkt_type
+qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ u16 tcp_payload_len, u8 *mpa_data)
+{
+ enum qed_iwarp_mpa_pkt_type pkt_type;
+ u16 mpa_len;
+
+ if (fpdu->incomplete_bytes) {
+ pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
+ goto out;
+ }
+
+ /* special case of one byte remaining...
+ * lower byte will be read next packet
+ */
+ if (tcp_payload_len == 1) {
+ fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
+ pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
+ goto out;
+ }
+
+ mpa_len = ntohs(*((u16 *)(mpa_data)));
+ fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
+
+ if (fpdu->fpdu_length <= tcp_payload_len)
+ pkt_type = QED_IWARP_MPA_PKT_PACKED;
+ else
+ pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
+
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
+ pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
+
+ return pkt_type;
+}
+
+static void
+qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *pkt_data,
+ u16 tcp_payload_size, u8 placement_offset)
+{
+ fpdu->mpa_buf = buf;
+ fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
+ fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
+ fpdu->mpa_frag = buf->data_phys_addr + pkt_data->first_mpa_offset;
+ fpdu->mpa_frag_virt = (u8 *)(buf->data) + pkt_data->first_mpa_offset;
+
+ if (tcp_payload_size == 1)
+ fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
+ else if (tcp_payload_size < fpdu->fpdu_length)
+ fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
+ else
+ fpdu->incomplete_bytes = 0; /* complete fpdu */
+
+ fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
+}
+
+static int
+qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *pkt_data,
+ struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
+{
+ u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
+ int rc;
+
+ /* need to copy the data from the partial packet stored in fpdu
+ * to the new buf, for this we also need to move the data currently
+ * placed on the buf. The assumption is that the buffer is big enough
+ * since fpdu_length <= mss, we use an intermediate buffer since
+ * we may need to copy the new data to an overlapping location
+ */
+ if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
+ DP_ERR(p_hwfn,
+ "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
+ buf->buff_size, fpdu->mpa_frag_len,
+ tcp_payload_size, fpdu->incomplete_bytes);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
+ fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
+ (u8 *)(buf->data) + pkt_data->first_mpa_offset,
+ tcp_payload_size);
+
+ memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
+ memcpy(tmp_buf + fpdu->mpa_frag_len,
+ (u8 *)(buf->data) + pkt_data->first_mpa_offset,
+ tcp_payload_size);
+
+ rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
+ if (rc)
+ return rc;
+
+ /* If we managed to post the buffer copy the data to the new buffer
+ * o/w this will occur in the next round...
+ */
+ memcpy((u8 *)(buf->data), tmp_buf,
+ fpdu->mpa_frag_len + tcp_payload_size);
+
+ fpdu->mpa_buf = buf;
+ /* fpdu->pkt_hdr remains as is */
+ /* fpdu->mpa_frag is overridden with new buf */
+ fpdu->mpa_frag = buf->data_phys_addr;
+ fpdu->mpa_frag_virt = buf->data;
+ fpdu->mpa_frag_len += tcp_payload_size;
+
+ fpdu->incomplete_bytes -= tcp_payload_size;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
+ buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
+ fpdu->incomplete_bytes);
+
+ return 0;
+}
+
+static void
+qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
+{
+ u16 mpa_len;
+
+ /* Update incomplete packets if needed */
+ if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
+ /* Missing lower byte is now available */
+ mpa_len = fpdu->fpdu_length | *mpa_data;
+ fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
+ fpdu->mpa_frag_len = fpdu->fpdu_length;
+ /* one byte of hdr */
+ fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
+ mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
+ }
+}
+
+#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
+ (GET_FIELD((_curr_pkt)->flags, \
+ UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
+
+/* This function is used to recycle a buffer using the ll2 drop option. It
+ * uses the mechanism to ensure that all buffers posted to tx before this one
+ * were completed. The buffer sent here will be sent as a cookie in the tx
+ * completion function and can then be reposted to rx chain when done. The flow
+ * that requires this is the flow where a FPDU splits over more than 3 tcp
+ * segments. In this case the driver needs to re-post a rx buffer instead of
+ * the one received, but driver can't simply repost a buffer it copied from
+ * as there is a case where the buffer was originally a packed FPDU, and is
+ * partially posted to FW. Driver needs to ensure FW is done with it.
+ */
+static int
+qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct qed_iwarp_ll2_buff *buf)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ buf->piggy_buf = NULL;
+ tx_pkt.cookie = buf;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't drop packet rc=%d\n", rc);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
+ (unsigned long int)tx_pkt.first_frag,
+ tx_pkt.first_frag_len, buf, rc);
+
+ return rc;
+}
+
+static int
+qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
+
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ tx_pkt.enable_ip_cksum = true;
+ tx_pkt.enable_l4_cksum = true;
+ tx_pkt.calc_ip_len = true;
+ /* vlan overload with enum iwarp_ll2_tx_queues */
+ tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send right edge rc=%d\n", rc);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
+ tx_pkt.num_of_bds,
+ (unsigned long int)tx_pkt.first_frag,
+ tx_pkt.first_frag_len, rc);
+
+ return rc;
+}
+
+static int
+qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *curr_pkt,
+ struct qed_iwarp_ll2_buff *buf,
+ u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+
+ /* An unaligned packet means it's split over two tcp segments. So the
+ * complete packet requires 3 bds, one for the header, one for the
+ * part of the fpdu of the first tcp segment, and the last fragment
+ * will point to the remainder of the fpdu. A packed pdu, requires only
+ * two bds, one for the header and one for the data.
+ */
+ tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
+
+ /* Send the mpa_buf only with the last fpdu (in case of packed) */
+ if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
+ tcp_payload_size <= fpdu->fpdu_length)
+ tx_pkt.cookie = fpdu->mpa_buf;
+
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ tx_pkt.enable_ip_cksum = true;
+ tx_pkt.enable_l4_cksum = true;
+ tx_pkt.calc_ip_len = true;
+ /* vlan overload with enum iwarp_ll2_tx_queues */
+ tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
+
+ /* special case of unaligned packet and not packed, need to send
+ * both buffers as cookie to release.
+ */
+ if (tcp_payload_size == fpdu->incomplete_bytes)
+ fpdu->mpa_buf->piggy_buf = buf;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ /* Set first fragment to header */
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ goto out;
+
+ /* Set second fragment to first part of packet */
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
+ fpdu->mpa_frag,
+ fpdu->mpa_frag_len);
+ if (rc)
+ goto out;
+
+ if (!fpdu->incomplete_bytes)
+ goto out;
+
+ /* Set third fragment to second part of the packet */
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
+ ll2_handle,
+ buf->data_phys_addr +
+ curr_pkt->first_mpa_offset,
+ fpdu->incomplete_bytes);
+out:
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
+ tx_pkt.num_of_bds,
+ tx_pkt.first_frag_len,
+ fpdu->mpa_frag_len,
+ fpdu->incomplete_bytes, rc);
+
+ return rc;
+}
+
+static void
+qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
+ struct unaligned_opaque_data *curr_pkt,
+ u32 opaque_data0, u32 opaque_data1)
+{
+ u64 opaque_data;
+
+ opaque_data = HILO_64(opaque_data1, opaque_data0);
+ *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
+
+ curr_pkt->first_mpa_offset = curr_pkt->tcp_payload_offset +
+ le16_to_cpu(curr_pkt->first_mpa_offset);
+ curr_pkt->cid = le32_to_cpu(curr_pkt->cid);
+}
+
+/* This function is called when an unaligned or incomplete MPA packet arrives
+ * driver needs to align the packet, perhaps using previous data and send
+ * it down to FW once it is aligned.
+ */
+static int
+qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf)
+{
+ struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
+ struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
+ enum qed_iwarp_mpa_pkt_type pkt_type;
+ struct qed_iwarp_fpdu *fpdu;
+ int rc = -EINVAL;
+ u8 *mpa_data;
+
+ fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, curr_pkt->cid & 0xffff);
+ if (!fpdu) { /* something corrupt with cid, post rx back */
+ DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
+ curr_pkt->cid);
+ goto err;
+ }
+
+ do {
+ mpa_data = ((u8 *)(buf->data) + curr_pkt->first_mpa_offset);
+
+ pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
+ mpa_buf->tcp_payload_len,
+ mpa_data);
+
+ switch (pkt_type) {
+ case QED_IWARP_MPA_PKT_PARTIAL:
+ qed_iwarp_init_fpdu(buf, fpdu,
+ curr_pkt,
+ mpa_buf->tcp_payload_len,
+ mpa_buf->placement_offset);
+
+ if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ }
+
+ rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:reset rc=%d\n", rc);
+ memset(fpdu, 0, sizeof(*fpdu));
+ break;
+ }
+
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ case QED_IWARP_MPA_PKT_PACKED:
+ qed_iwarp_init_fpdu(buf, fpdu,
+ curr_pkt,
+ mpa_buf->tcp_payload_len,
+ mpa_buf->placement_offset);
+
+ rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
+ mpa_buf->tcp_payload_len,
+ pkt_type);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:reset rc=%d\n", rc);
+ memset(fpdu, 0, sizeof(*fpdu));
+ break;
+ }
+
+ mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
+ curr_pkt->first_mpa_offset += fpdu->fpdu_length;
+ break;
+ case QED_IWARP_MPA_PKT_UNALIGNED:
+ qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
+ if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
+ /* special handling of fpdu split over more
+ * than 2 segments
+ */
+ if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
+ rc = qed_iwarp_win_right_edge(p_hwfn,
+ fpdu);
+ /* packet will be re-processed later */
+ if (rc)
+ return rc;
+ }
+
+ rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
+ buf,
+ mpa_buf->tcp_payload_len);
+ if (rc) /* packet will be re-processed later */
+ return rc;
+
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ }
+
+ rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
+ mpa_buf->tcp_payload_len,
+ pkt_type);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:delay rc=%d\n", rc);
+ /* don't reset fpdu -> we need it for next
+ * classify
+ */
+ break;
+ }
+
+ mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
+ curr_pkt->first_mpa_offset += fpdu->incomplete_bytes;
+ /* The framed PDU was sent - no more incomplete bytes */
+ fpdu->incomplete_bytes = 0;
+ break;
+ }
+ } while (mpa_buf->tcp_payload_len && !rc);
+
+ return rc;
+
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn,
+ buf,
+ p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
+ return rc;
+}
+
+static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
+ int rc;
+
+ while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
+ mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
+ struct qed_iwarp_ll2_mpa_buf,
+ list_entry);
+
+ rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
+
+ /* busy means break and continue processing later, don't
+ * remove the buf from the pending list.
+ */
+ if (rc == -EBUSY)
+ break;
+
+ list_del(&mpa_buf->list_entry);
+ list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_list);
+
+ if (rc) { /* different error, don't continue */
+ DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
+ break;
+ }
+ }
+}
+
+static void
+qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+{
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf;
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
+ struct qed_iwarp_ll2_mpa_buf, list_entry);
+ if (!mpa_buf) {
+ DP_ERR(p_hwfn, "No free mpa buf\n");
+ goto err;
+ }
+
+ list_del(&mpa_buf->list_entry);
+ qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
+ data->opaque_data_0, data->opaque_data_1);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
+ data->length.packet_length, mpa_buf->data.first_mpa_offset,
+ mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
+ mpa_buf->data.cid);
+
+ mpa_buf->ll2_buf = data->cookie;
+ mpa_buf->tcp_payload_len = data->length.packet_length -
+ mpa_buf->data.first_mpa_offset;
+ mpa_buf->data.first_mpa_offset += data->u.placement_offset;
+ mpa_buf->placement_offset = data->u.placement_offset;
+
+ list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
+
+ qed_iwarp_process_pending_pkts(p_hwfn);
+ return;
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
+ iwarp_info->ll2_mpa_handle);
+}
+
static void
qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
{
@@ -1855,10 +2423,25 @@ static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
bool b_last_fragment, bool b_last_packet)
{
struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_iwarp_ll2_buff *piggy;
struct qed_hwfn *p_hwfn = cxt;
+ if (!buffer) /* can happen in packed mpa unaligned... */
+ return;
+
/* this was originally an rx packet, post it back */
+ piggy = buffer->piggy_buf;
+ if (piggy) {
+ buffer->piggy_buf = NULL;
+ qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
+ }
+
qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
+
+ if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
+ qed_iwarp_process_pending_pkts(p_hwfn);
+
+ return;
}
static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
@@ -1871,12 +2454,44 @@ static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
if (!buffer)
return;
+ if (buffer->piggy_buf) {
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ buffer->piggy_buf->buff_size,
+ buffer->piggy_buf->data,
+ buffer->piggy_buf->data_phys_addr);
+
+ kfree(buffer->piggy_buf);
+ }
+
dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
buffer->data, buffer->data_phys_addr);
kfree(buffer);
}
+/* The only slowpath for iwarp ll2 is unalign flush. When this completion
+ * is received, need to reset the FPDU.
+ */
+void
+qed_iwarp_ll2_slowpath(void *cxt,
+ u8 connection_handle,
+ u32 opaque_data_0, u32 opaque_data_1)
+{
+ struct unaligned_opaque_data unalign_data;
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_iwarp_fpdu *fpdu;
+
+ qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
+ opaque_data_0, opaque_data_1);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n",
+ unalign_data.cid);
+
+ fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)unalign_data.cid);
+ if (fpdu)
+ memset(fpdu, 0, sizeof(*fpdu));
+}
+
static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
@@ -1902,6 +2517,16 @@ static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
}
+ if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_mpa_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
+ iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
qed_llh_remove_mac_filter(p_hwfn,
p_ptt, p_hwfn->p_rdma_info->iwarp.mac_addr);
return rc;
@@ -1953,12 +2578,15 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
struct qed_iwarp_info *iwarp_info;
struct qed_ll2_acquire_data data;
struct qed_ll2_cbs cbs;
+ u32 mpa_buff_size;
u16 n_ooo_bufs;
int rc = 0;
+ int i;
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
+ iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
iwarp_info->max_mtu = params->max_mtu;
@@ -2029,6 +2657,68 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
+ /* Start Unaligned MPA connection */
+ cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
+ cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_IWARP;
+ data.input.mtu = params->max_mtu;
+ /* FW requires that once a packet arrives OOO, it must have at
+ * least 2 rx buffers available on the unaligned connection
+ * for handling the case that it is a partial fpdu.
+ */
+ data.input.rx_num_desc = n_ooo_bufs * 2;
+ data.input.tx_num_desc = data.input.rx_num_desc;
+ data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
+ data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
+ data.input.secondary_queue = true;
+ data.cbs = &cbs;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc)
+ goto err;
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
+ if (rc)
+ goto err;
+
+ mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
+ rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
+ data.input.rx_num_desc,
+ mpa_buff_size,
+ iwarp_info->ll2_mpa_handle);
+ if (rc)
+ goto err;
+
+ iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
+ sizeof(*iwarp_info->partial_fpdus),
+ GFP_KERNEL);
+ if (!iwarp_info->partial_fpdus)
+ goto err;
+
+ iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
+
+ iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL);
+ if (!iwarp_info->mpa_intermediate_buf)
+ goto err;
+
+ /* The mpa_bufs array serves for pending RX packets received on the
+ * mpa ll2 that don't have place on the tx ring and require later
+ * processing. We can't fail on allocation of such a struct therefore
+ * we allocate enough to take care of all rx packets
+ */
+ iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
+ sizeof(*iwarp_info->mpa_bufs),
+ GFP_KERNEL);
+ if (!iwarp_info->mpa_bufs)
+ goto err;
+
+ INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
+ INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
+ for (i = 0; i < data.input.rx_num_desc; i++)
+ list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
+ &iwarp_info->mpa_buf_list);
return rc;
err:
qed_iwarp_ll2_stop(p_hwfn, p_ptt);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
index 9e2bfde894df..c1ecd743305f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -55,15 +55,43 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
#define QED_IWARP_HANDLE_INVAL (0xff)
struct qed_iwarp_ll2_buff {
+ struct qed_iwarp_ll2_buff *piggy_buf;
void *data;
dma_addr_t data_phys_addr;
u32 buff_size;
};
+struct qed_iwarp_ll2_mpa_buf {
+ struct list_head list_entry;
+ struct qed_iwarp_ll2_buff *ll2_buf;
+ struct unaligned_opaque_data data;
+ u16 tcp_payload_len;
+ u8 placement_offset;
+};
+
+/* In some cases a fpdu will arrive with only one byte of the header, in this
+ * case the fpdu_length will be partial (contain only higher byte and
+ * incomplete bytes will contain the invalid value
+ */
+#define QED_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
+
+struct qed_iwarp_fpdu {
+ struct qed_iwarp_ll2_buff *mpa_buf;
+ void *mpa_frag_virt;
+ dma_addr_t mpa_frag;
+ dma_addr_t pkt_hdr;
+ u16 mpa_frag_len;
+ u16 fpdu_length;
+ u16 incomplete_bytes;
+ u8 pkt_hdr_size;
+};
+
struct qed_iwarp_info {
struct list_head listen_list; /* qed_iwarp_listener */
struct list_head ep_list; /* qed_iwarp_ep */
struct list_head ep_free_list; /* pre-allocated ep's */
+ struct list_head mpa_buf_list; /* list of mpa_bufs */
+ struct list_head mpa_buf_pending_list;
spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale;
@@ -73,9 +101,14 @@ struct qed_iwarp_info {
u8 tcp_flags;
u8 ll2_syn_handle;
u8 ll2_ooo_handle;
+ u8 ll2_mpa_handle;
u8 peer2peer;
enum mpa_negotiation_mode mpa_rev;
enum mpa_rtr_type rtr_type;
+ struct qed_iwarp_fpdu *partial_fpdus;
+ struct qed_iwarp_ll2_mpa_buf *mpa_bufs;
+ u8 *mpa_intermediate_buf;
+ u16 max_num_partial_fpdus;
};
enum qed_iwarp_ep_state {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 250afa5486cf..047f556ca62e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -423,6 +423,41 @@ static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
}
static int
+qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long *p_lock_flags)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_slow_path_cqe *sp_cqe;
+
+ sp_cqe = &p_cqe->rx_cqe_sp;
+ if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
+ sp_cqe->ramrod_cmd_id);
+ return -EINVAL;
+ }
+
+ if (!p_ll2_conn->cbs.slowpath_cb) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
+ return -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
+
+ p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ le32_to_cpu(sp_cqe->opaque_data.data[0]),
+ le32_to_cpu(sp_cqe->opaque_data.data[1]));
+
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
+
+ return 0;
+}
+
+static int
qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
@@ -495,8 +530,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
switch (cqe->rx_cqe_sp.type) {
case CORE_RX_CQE_TYPE_SLOW_PATH:
- DP_NOTICE(p_hwfn, "LL2 - unexpected Rx CQE slowpath\n");
- rc = -EINVAL;
+ rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
+ cqe, &flags);
break;
case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
case CORE_RX_CQE_TYPE_REGULAR:
@@ -894,7 +929,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
- p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_OOO) ? 0 : 1;
+ p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
if ((IS_MF_DEFAULT(p_hwfn) || IS_MF_SI(p_hwfn)) &&
p_ramrod->main_func_queue && (conn_type != QED_LL2_TYPE_ROCE) &&
@@ -1105,6 +1140,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_info)
{
struct qed_ll2_tx_packet *p_descq;
+ u32 desc_size;
u32 capacity;
int rc = 0;
@@ -1122,13 +1158,17 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
goto out;
capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
- p_descq = kcalloc(capacity, sizeof(struct qed_ll2_tx_packet),
- GFP_KERNEL);
+ /* First element is part of the packet, rest are flexibly added */
+ desc_size = (sizeof(*p_descq) +
+ (p_ll2_info->input.tx_max_bds_per_packet - 1) *
+ sizeof(p_descq->bds_set));
+
+ p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
if (!p_descq) {
rc = -ENOMEM;
goto out;
}
- p_ll2_info->tx_queue.descq_array = p_descq;
+ p_ll2_info->tx_queue.descq_mem = p_descq;
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
@@ -1209,6 +1249,7 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+ p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
p_ll2_info->cbs.cookie = cbs->cookie;
return 0;
@@ -1260,6 +1301,11 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
CORE_TX_DEST_NW : CORE_TX_DEST_LB;
+ if (data->input.conn_type == QED_LL2_TYPE_OOO ||
+ data->input.secondary_queue)
+ p_ll2_info->main_func_queue = false;
+ else
+ p_ll2_info->main_func_queue = true;
/* Correct maximum number of Tx BDs */
p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
@@ -1359,11 +1405,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
+ u32 desc_size;
u8 qid;
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1397,9 +1445,15 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
INIT_LIST_HEAD(&p_tx->sending_descq);
spin_lock_init(&p_tx->lock);
capacity = qed_chain_get_capacity(&p_tx->txq_chain);
- for (i = 0; i < capacity; i++)
- list_add_tail(&p_tx->descq_array[i].list_entry,
- &p_tx->free_descq);
+ /* First element is part of the packet, rest are flexibly added */
+ desc_size = (sizeof(*p_pkt) +
+ (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
+ sizeof(p_pkt->bds_set));
+
+ for (i = 0; i < capacity; i++) {
+ p_pkt = p_tx->descq_mem + desc_size * i;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ }
p_tx->cur_completing_bd_idx = 0;
p_tx->bds_idx = 0;
p_tx->b_completing_packet = false;
@@ -1579,11 +1633,28 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
: CORE_RROCE;
- tx_dest = (pkt->tx_dest == QED_LL2_TX_DEST_NW) ? CORE_TX_DEST_NW
- : CORE_TX_DEST_LB;
+ switch (pkt->tx_dest) {
+ case QED_LL2_TX_DEST_NW:
+ tx_dest = CORE_TX_DEST_NW;
+ break;
+ case QED_LL2_TX_DEST_LB:
+ tx_dest = CORE_TX_DEST_LB;
+ break;
+ case QED_LL2_TX_DEST_DROP:
+ tx_dest = CORE_TX_DEST_DROP;
+ break;
+ default:
+ tx_dest = CORE_TX_DEST_LB;
+ break;
+ }
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO)
+ start_bd->nw_vlan_or_lb_echo =
+ cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
+ else
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W,
cpu_to_le16(pkt->l4_hdr_offset_w));
SET_FIELD(start_bd->bitfield1, CORE_TX_BD_TX_DST, tx_dest);
@@ -1591,6 +1662,9 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
@@ -1698,7 +1772,7 @@ int qed_ll2_prepare_tx_packet(void *cxt,
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (pkt->num_of_bds > CORE_LL2_TX_MAX_BDS_PER_PACKET)
+ if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
@@ -1858,7 +1932,7 @@ void qed_ll2_release_connection(void *cxt, u8 connection_handle)
qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
}
- kfree(p_ll2_conn->tx_queue.descq_array);
+ kfree(p_ll2_conn->tx_queue.descq_mem);
qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
kfree(p_ll2_conn->rx_queue.descq_array);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index a822528e9c63..f65817012e97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -63,17 +63,14 @@ struct qed_ll2_rx_packet {
struct qed_ll2_tx_packet {
struct list_head list_entry;
u16 bd_used;
- u16 vlan;
- u16 l4_hdr_offset_w;
- u8 bd_flags;
bool notify_fw;
void *cookie;
-
+ /* Flexible Array of bds_set determined by max_bds_per_packet */
struct {
struct core_tx_bd *txq_bd;
dma_addr_t tx_frag;
u16 frag_len;
- } bds_set[ETH_TX_MAX_BDS_PER_NON_LSO_PACKET];
+ } bds_set[1];
};
struct qed_ll2_rx_queue {
@@ -101,7 +98,7 @@ struct qed_ll2_tx_queue {
struct list_head active_descq;
struct list_head free_descq;
struct list_head sending_descq;
- struct qed_ll2_tx_packet *descq_array;
+ void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
struct qed_ll2_tx_packet *cur_send_packet;
struct qed_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
@@ -124,6 +121,7 @@ struct qed_ll2_info {
bool b_active;
enum core_tx_dest tx_dest;
u8 tx_stats_en;
+ bool main_func_queue;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
struct qed_ll2_cbs cbs;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 866444b6c82f..2c6d7c69c8f7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -51,15 +51,11 @@
#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1
#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0)
-#define NSS_COMMON_MACSEC_CTL 0x28
-#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
-
#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4))
#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19)
#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16)
#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8
#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0
-#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f
#define NSS_COMMON_CLK_DIV_RGMII_1000 1
#define NSS_COMMON_CLK_DIV_RGMII_100 9
@@ -68,9 +64,6 @@
#define NSS_COMMON_CLK_DIV_SGMII_100 4
#define NSS_COMMON_CLK_DIV_SGMII_10 49
-#define QSGMII_PCS_MODE_CTL 0x68
-#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7)
-
#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
@@ -83,15 +76,10 @@
#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
#define QSGMII_PHY_QSGMII_EN BIT(7)
#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
-#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7
#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
-#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3
#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
-#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3
#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
-#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3
#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
-#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf
struct ipq806x_gmac {
struct platform_device *pdev;
@@ -217,7 +205,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
* code and keep it consistent with the Linux convention, we'll number
* them from 0 to 3 here.
*/
- if (gmac->id < 0 || gmac->id > 3) {
+ if (gmac->id > 3) {
dev_err(dev, "invalid gmac id\n");
return -EINVAL;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 6f550e15a41c..a81335e8ebe8 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -704,6 +704,14 @@ struct netvsc_reconfig {
u32 event;
};
+/* L4 hash bits for different protocols */
+#define HV_TCP4_L4HASH 1
+#define HV_TCP6_L4HASH 2
+#define HV_UDP4_L4HASH 4
+#define HV_UDP6_L4HASH 8
+#define HV_DEFAULT_L4HASH (HV_TCP4_L4HASH | HV_TCP6_L4HASH | HV_UDP4_L4HASH | \
+ HV_UDP6_L4HASH)
+
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
@@ -726,10 +734,9 @@ struct net_device_context {
u32 tx_send_table[VRSS_SEND_TAB_SIZE];
/* Ethtool settings */
- bool udp4_l4_hash;
- bool udp6_l4_hash;
u8 duplex;
u32 speed;
+ u32 l4_hash; /* L4 hash settings */
struct netvsc_ethtool_stats eth_stats;
/* State to manage the associated VF interface. */
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dfb986421ec6..44746de3dd4c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -203,7 +203,7 @@ static inline u32 netvsc_get_hash(
const struct net_device_context *ndc)
{
struct flow_keys flow;
- u32 hash;
+ u32 hash, pkt_proto = 0;
static u32 hashrnd __read_mostly;
net_get_random_once(&hashrnd, sizeof(hashrnd));
@@ -211,11 +211,25 @@ static inline u32 netvsc_get_hash(
if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
return 0;
- if (flow.basic.ip_proto == IPPROTO_TCP ||
- (flow.basic.ip_proto == IPPROTO_UDP &&
- ((flow.basic.n_proto == htons(ETH_P_IP) && ndc->udp4_l4_hash) ||
- (flow.basic.n_proto == htons(ETH_P_IPV6) &&
- ndc->udp6_l4_hash)))) {
+ switch (flow.basic.ip_proto) {
+ case IPPROTO_TCP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_TCP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_TCP6_L4HASH;
+
+ break;
+
+ case IPPROTO_UDP:
+ if (flow.basic.n_proto == htons(ETH_P_IP))
+ pkt_proto = HV_UDP4_L4HASH;
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ pkt_proto = HV_UDP6_L4HASH;
+
+ break;
+ }
+
+ if (pkt_proto & ndc->l4_hash) {
return skb_get_hash(skb);
} else {
if (flow.basic.n_proto == htons(ETH_P_IP))
@@ -898,8 +912,7 @@ static void netvsc_init_settings(struct net_device *dev)
{
struct net_device_context *ndc = netdev_priv(dev);
- ndc->udp4_l4_hash = true;
- ndc->udp6_l4_hash = true;
+ ndc->l4_hash = HV_DEFAULT_L4HASH;
ndc->speed = SPEED_UNKNOWN;
ndc->duplex = DUPLEX_FULL;
@@ -1245,23 +1258,32 @@ static int
netvsc_get_rss_hash_opts(struct net_device_context *ndc,
struct ethtool_rxnfc *info)
{
+ const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
+
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
case TCP_V4_FLOW:
+ if (ndc->l4_hash & HV_TCP4_L4HASH)
+ info->data |= l4_flag;
+
+ break;
+
case TCP_V6_FLOW:
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (ndc->l4_hash & HV_TCP6_L4HASH)
+ info->data |= l4_flag;
+
break;
case UDP_V4_FLOW:
- if (ndc->udp4_l4_hash)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (ndc->l4_hash & HV_UDP4_L4HASH)
+ info->data |= l4_flag;
break;
case UDP_V6_FLOW:
- if (ndc->udp6_l4_hash)
- info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (ndc->l4_hash & HV_UDP6_L4HASH)
+ info->data |= l4_flag;
break;
@@ -1302,23 +1324,51 @@ static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
{
if (info->data == (RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (info->flow_type == UDP_V4_FLOW)
- ndc->udp4_l4_hash = true;
- else if (info->flow_type == UDP_V6_FLOW)
- ndc->udp6_l4_hash = true;
- else
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ ndc->l4_hash |= HV_TCP4_L4HASH;
+ break;
+
+ case TCP_V6_FLOW:
+ ndc->l4_hash |= HV_TCP6_L4HASH;
+ break;
+
+ case UDP_V4_FLOW:
+ ndc->l4_hash |= HV_UDP4_L4HASH;
+ break;
+
+ case UDP_V6_FLOW:
+ ndc->l4_hash |= HV_UDP6_L4HASH;
+ break;
+
+ default:
return -EOPNOTSUPP;
+ }
return 0;
}
if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
- if (info->flow_type == UDP_V4_FLOW)
- ndc->udp4_l4_hash = false;
- else if (info->flow_type == UDP_V6_FLOW)
- ndc->udp6_l4_hash = false;
- else
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ ndc->l4_hash &= ~HV_TCP4_L4HASH;
+ break;
+
+ case TCP_V6_FLOW:
+ ndc->l4_hash &= ~HV_TCP6_L4HASH;
+ break;
+
+ case UDP_V4_FLOW:
+ ndc->l4_hash &= ~HV_UDP4_L4HASH;
+ break;
+
+ case UDP_V6_FLOW:
+ ndc->l4_hash &= ~HV_UDP6_L4HASH;
+ break;
+
+ default:
return -EOPNOTSUPP;
+ }
return 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index cd931cf9dcc2..e2cf8ffc5c6f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -366,6 +366,11 @@ config REALTEK_PHY
---help---
Supports the Realtek 821x PHY.
+config RENESAS_PHY
+ tristate "Driver for Renesas PHYs"
+ ---help---
+ Supports the Renesas PHYs uPD60620 and uPD60620A.
+
config ROCKCHIP_PHY
tristate "Driver for Rockchip Ethernet PHYs"
---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 416df92fbf4f..1404ad3b77c7 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -72,6 +72,7 @@ obj-$(CONFIG_MICROSEMI_PHY) += mscc.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
+obj-$(CONFIG_RENESAS_PHY) += uPD60620.o
obj-$(CONFIG_ROCKCHIP_PHY) += rockchip.o
obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
new file mode 100644
index 000000000000..96b33475ea5e
--- /dev/null
+++ b/drivers/net/phy/uPD60620.c
@@ -0,0 +1,109 @@
+/*
+ * Driver for the Renesas PHY uPD60620.
+ *
+ * Copyright (C) 2015 Softing Industrial Automation GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+
+#define UPD60620_PHY_ID 0xb8242824
+
+/* Extended Registers and values */
+/* PHY Special Control/Status */
+#define PHY_PHYSCR 0x1F /* PHY.31 */
+#define PHY_PHYSCR_10MB 0x0004 /* PHY speed = 10mb */
+#define PHY_PHYSCR_100MB 0x0008 /* PHY speed = 100mb */
+#define PHY_PHYSCR_DUPLEX 0x0010 /* PHY Duplex */
+
+/* PHY Special Modes */
+#define PHY_SPM 0x12 /* PHY.18 */
+
+/* Init PHY */
+
+static int upd60620_config_init(struct phy_device *phydev)
+{
+ /* Enable support for passive HUBs (could be a strap option) */
+ /* PHYMODE: All speeds, HD in parallel detect */
+ return phy_write(phydev, PHY_SPM, 0x0180 | phydev->mdio.addr);
+}
+
+/* Get PHY status from common registers */
+
+static int upd60620_read_status(struct phy_device *phydev)
+{
+ int phy_state;
+
+ /* Read negotiated state */
+ phy_state = phy_read(phydev, MII_BMSR);
+ if (phy_state < 0)
+ return phy_state;
+
+ phydev->link = 0;
+ phydev->lp_advertising = 0;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phy_state & (BMSR_ANEGCOMPLETE | BMSR_LSTATUS)) {
+ phy_state = phy_read(phydev, PHY_PHYSCR);
+ if (phy_state < 0)
+ return phy_state;
+
+ if (phy_state & (PHY_PHYSCR_10MB | PHY_PHYSCR_100MB)) {
+ phydev->link = 1;
+ phydev->speed = SPEED_10;
+ phydev->duplex = DUPLEX_HALF;
+
+ if (phy_state & PHY_PHYSCR_100MB)
+ phydev->speed = SPEED_100;
+ if (phy_state & PHY_PHYSCR_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+
+ phy_state = phy_read(phydev, MII_LPA);
+ if (phy_state < 0)
+ return phy_state;
+
+ phydev->lp_advertising
+ = mii_lpa_to_ethtool_lpa_t(phy_state);
+
+ if (phydev->duplex == DUPLEX_FULL) {
+ if (phy_state & LPA_PAUSE_CAP)
+ phydev->pause = 1;
+ if (phy_state & LPA_PAUSE_ASYM)
+ phydev->asym_pause = 1;
+ }
+ }
+ }
+ return 0;
+}
+
+MODULE_DESCRIPTION("Renesas uPD60620 PHY driver");
+MODULE_AUTHOR("Bernd Edlinger <bernd.edlinger@hotmail.de>");
+MODULE_LICENSE("GPL");
+
+static struct phy_driver upd60620_driver[1] = { {
+ .phy_id = UPD60620_PHY_ID,
+ .phy_id_mask = 0xfffffffe,
+ .name = "Renesas uPD60620",
+ .features = PHY_BASIC_FEATURES,
+ .flags = 0,
+ .config_init = upd60620_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = upd60620_read_status,
+} };
+
+module_phy_driver(upd60620_driver);
+
+static struct mdio_device_id __maybe_unused upd60620_tbl[] = {
+ { UPD60620_PHY_ID, 0xfffffffe },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, upd60620_tbl);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index c3f77e3b7819..e365866600ba 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64)
static int ppp_dev_init(struct net_device *dev)
{
+ struct ppp *ppp;
+
netdev_lockdep_set_classes(dev);
+
+ ppp = netdev_priv(dev);
+ /* Let the netdevice take a reference on the ppp file. This ensures
+ * that ppp_destroy_interface() won't run before the device gets
+ * unregistered.
+ */
+ atomic_inc(&ppp->file.refcnt);
+
return 0;
}
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev)
wake_up_interruptible(&ppp->file.rwait);
}
+static void ppp_dev_priv_destructor(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ ppp = netdev_priv(dev);
+ if (atomic_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+}
+
static const struct net_device_ops ppp_netdev_ops = {
.ndo_init = ppp_dev_init,
.ndo_uninit = ppp_dev_uninit,
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev)
dev->tx_queue_len = 3;
dev->type = ARPHRD_PPP;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->priv_destructor = ppp_dev_priv_destructor;
netif_keep_dst(dev);
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 29c7e2ec0dcb..52ea80bcd639 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -560,6 +560,7 @@ static const struct driver_info wwan_info = {
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
#define MICROSOFT_VENDOR_ID 0x045e
+#define UBLOX_VENDOR_ID 0x1546
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -869,6 +870,18 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&zte_cdc_info,
}, {
+ /* U-blox TOBY-L2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
+ /* U-blox SARA-U2 */
+ USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_info,
+}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index bb2aad078637..5a14cc7f28ee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2136,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
if (a == &dev_attr_uuid.attr) {
- if (uuid_is_null(&ns->uuid) ||
+ if (uuid_is_null(&ns->uuid) &&
!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
return 0;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index cb73bc8cad3b..3f5a04c586ce 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -94,7 +94,7 @@ struct nvme_dev {
struct mutex shutdown_lock;
bool subsystem;
void __iomem *cmb;
- dma_addr_t cmb_dma_addr;
+ pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
u32 cmbsz;
u32 cmbloc;
@@ -1226,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
nvmeq->sq_cmds_io = dev->cmb + offset;
} else {
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1527,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
resource_size_t bar_size;
struct pci_dev *pdev = to_pci_dev(dev->dev);
void __iomem *cmb;
- dma_addr_t dma_addr;
+ int bar;
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1540,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
size = szu * NVME_CMB_SZ(dev->cmbsz);
offset = szu * NVME_CMB_OFST(dev->cmbloc);
- bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
+ bar = NVME_CMB_BIR(dev->cmbloc);
+ bar_size = pci_resource_len(pdev, bar);
if (offset > bar_size)
return NULL;
@@ -1553,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
- cmb = ioremap_wc(dma_addr, size);
+ cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
if (!cmb)
return NULL;
- dev->cmb_dma_addr = dma_addr;
+ dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
dev->cmb_size = size;
return cmb;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 785fb42f6650..2799a6b08f73 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
*/
if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
pr_err("write_pending failed since: %d\n", vscsi->flags);
- return 0;
+ return -EIO;
}
rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index bd4605a34f54..c62e8d111fd9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup);
/**
* iscsi_session_teardown - destroy session, host, and cls_session
* @cls_session: iscsi session
- *
- * The driver must have called iscsi_remove_session before
- * calling this.
*/
void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
{
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
iscsi_pool_free(&session->cmdpool);
+ iscsi_remove_session(cls_session);
+
kfree(session->password);
kfree(session->password_in);
kfree(session->username);
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->portal_type);
kfree(session->discovery_parent_type);
- iscsi_destroy_session(cls_session);
+ iscsi_free_session(cls_session);
+
iscsi_host_dec_session_cnt(shost);
module_put(owner);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e7818afeda2b..15590a063ad9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
if (*bflags & BLIST_NO_DIF)
sdev->no_dif = 1;
+ if (*bflags & BLIST_UNMAP_LIMIT_WS)
+ sdev->unmap_limit_for_ws = 1;
+
sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
if (*bflags & BLIST_TRY_VPD_PAGES)
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0190aeff5f7f..7404d26895f5 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2211,22 +2211,6 @@ void iscsi_free_session(struct iscsi_cls_session *session)
EXPORT_SYMBOL_GPL(iscsi_free_session);
/**
- * iscsi_destroy_session - destroy iscsi session
- * @session: iscsi_session
- *
- * Can be called by a LLD or iscsi_transport. There must not be
- * any running connections.
- */
-int iscsi_destroy_session(struct iscsi_cls_session *session)
-{
- iscsi_remove_session(session);
- ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
- iscsi_free_session(session);
- return 0;
-}
-EXPORT_SYMBOL_GPL(iscsi_destroy_session);
-
-/**
* iscsi_create_conn - create iscsi class connection
* @session: iscsi cls session
* @dd_size: private driver data size
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fb9f8b5f4673..d175c5c5ccf8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
break;
case SD_LBP_WS16:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS16_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
break;
case SD_LBP_WS10:
- max_blocks = min_not_zero(sdkp->max_ws_blocks,
- (u32)SD_MAX_WS10_BLOCKS);
+ if (sdkp->device->unmap_limit_for_ws)
+ max_blocks = sdkp->max_unmap_blocks;
+ else
+ max_blocks = sdkp->max_ws_blocks;
+
+ max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
break;
case SD_LBP_ZERO:
@@ -3099,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_security(sdkp, buffer);
}
- sdkp->first_scan = 0;
-
/*
* We now have all cache related info, determine how we deal
* with flush requests.
@@ -3115,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
/*
- * Use the device's preferred I/O size for reads and writes
+ * Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, or
* garbage.
*/
@@ -3129,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- /* Combine with controller limits */
- q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q));
+ /* Do not exceed controller limit */
+ rw_max = min(rw_max, queue_max_hw_sectors(q));
+
+ /*
+ * Only update max_sectors if previously unset or if the current value
+ * exceeds the capabilities of the hardware.
+ */
+ if (sdkp->first_scan ||
+ q->limits.max_sectors > q->limits.max_dev_sectors ||
+ q->limits.max_sectors > q->limits.max_hw_sectors)
+ q->limits.max_sectors = rw_max;
+
+ sdkp->first_scan = 0;
set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 0e79eebfcbb7..419a7a90bce0 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1144,5 +1144,5 @@ static void __exit nhi_unload(void)
tb_domain_exit();
}
-module_init(nhi_init);
+fs_initcall(nhi_init);
module_exit(nhi_unload);
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index f2d06f6f7be9..138027537d29 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1487,6 +1487,9 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
{
int ret;
+ if (WARN_ON(!xdomain_property_dir))
+ return -EAGAIN;
+
if (!key || strlen(key) > 8)
return -EINVAL;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 58585ec8699e..68677d930e20 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -436,8 +436,8 @@ static bool vhost_exceeds_maxpend(struct vhost_net *net)
struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
struct vhost_virtqueue *vq = &nvq->vq;
- return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
- == nvq->done_idx;
+ return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
+ min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
}
/* Expects to be always run from workqueue - which acts as
@@ -480,11 +480,6 @@ static void handle_tx(struct vhost_net *net)
if (zcopy)
vhost_zerocopy_signal_used(net, vq);
- /* If more outstanding DMAs, queue the work.
- * Handle upend_idx wrap around
- */
- if (unlikely(vhost_exceeds_maxpend(net)))
- break;
head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
ARRAY_SIZE(vq->iov),
@@ -519,8 +514,7 @@ static void handle_tx(struct vhost_net *net)
len = msg_data_left(&msg);
zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
- && (nvq->upend_idx + 1) % UIO_MAXIOV !=
- nvq->done_idx
+ && !vhost_exceeds_maxpend(net)
&& vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */