summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/device_pm.c1
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/nfit/core.c15
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/scan.c3
-rw-r--r--drivers/acpi/sleep.h1
-rw-r--r--drivers/acpi/utils.c14
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/power/runtime.c10
-rw-r--r--drivers/block/nbd.c10
-rw-r--r--drivers/bluetooth/btusb.c23
-rw-r--r--drivers/cdrom/gdrom.c13
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c1
-rw-r--r--drivers/char/tpm/tpm_tis_core.c22
-rw-r--r--drivers/clk/clk.c9
-rw-r--r--drivers/clocksource/hyperv_timer.c4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c1
-rw-r--r--drivers/dma-buf/dma-buf.c10
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c17
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/firmware/arm_scmi/notify.h2
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/gpio/gpio-cadence.c1
-rw-r--r--drivers/gpio/gpio-tegra186.c11
-rw-r--r--drivers/gpio/gpio-xilinx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c68
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c7
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h6
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.c174
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c61
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c5
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c124
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c122
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c73
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c10
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c16
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_audio.c1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c26
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c16
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h4
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c144
-rw-r--r--drivers/gpu/drm/radeon/nislands_smc.h34
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c8
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c177
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h34
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c6
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/corsair-psu.c4
-rw-r--r--drivers/hwmon/lm80.c11
-rw-r--r--drivers/hwmon/ltc2992.c8
-rw-r--r--drivers/hwmon/occ/common.c5
-rw-r--r--drivers/hwmon/occ/common.h2
-rw-r--r--drivers/hwmon/pmbus/fsp-3y.c27
-rw-r--r--drivers/iio/accel/Kconfig1
-rw-r--r--drivers/iio/common/hid-sensors/Kconfig1
-rw-r--r--drivers/iio/gyro/Kconfig1
-rw-r--r--drivers/iio/gyro/mpu3050-core.c13
-rw-r--r--drivers/iio/humidity/Kconfig1
-rw-r--r--drivers/iio/industrialio-core.c9
-rw-r--r--drivers/iio/light/Kconfig2
-rw-r--r--drivers/iio/light/gp2ap002.c5
-rw-r--r--drivers/iio/light/tsl2583.c8
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/orientation/Kconfig2
-rw-r--r--drivers/iio/pressure/Kconfig1
-rw-r--r--drivers/iio/proximity/pulsedlight-lidar-lite-v2.c1
-rw-r--r--drivers/iio/temperature/Kconfig1
-rw-r--r--drivers/infiniband/core/cma.c5
-rw-r--r--drivers/infiniband/core/uverbs_std_types_device.c7
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c5
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c6
-rw-r--r--drivers/infiniband/hw/mlx5/dm.c3
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c1
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c11
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c4
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c17
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c21
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c1
-rw-r--r--drivers/leds/leds-lp5523.c2
-rw-r--r--drivers/md/dm-integrity.c81
-rw-r--r--drivers/md/dm-snap.c6
-rw-r--r--drivers/media/dvb-frontends/sp8870.c2
-rw-r--r--drivers/media/platform/rcar_drif.c1
-rw-r--r--drivers/media/usb/gspca/cpia1.c6
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_mt9m111.c16
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_po1030.c14
-rw-r--r--drivers/misc/eeprom/at24.c6
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c2
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c53
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h23
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c7
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c4
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c59
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c12
-rw-r--r--drivers/misc/habanalabs/goya/goya.c47
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c40
-rw-r--r--drivers/misc/ics932s401.c2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h1
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c7
-rw-r--r--drivers/mmc/host/sdhci-pci-gli.c7
-rw-r--r--drivers/mtd/nand/raw/cs553x_nand.c12
-rw-r--r--drivers/mtd/nand/raw/fsmc_nand.c12
-rw-r--r--drivers/mtd/nand/raw/lpc32xx_slc.c15
-rw-r--r--drivers/mtd/nand/raw/ndfc.c12
-rw-r--r--drivers/mtd/nand/raw/sharpsl.c12
-rw-r--r--drivers/mtd/nand/raw/tmio_nand.c8
-rw-r--r--drivers/mtd/nand/raw/txx9ndfmc.c5
-rw-r--r--drivers/mtd/parsers/ofpart_core.c26
-rw-r--r--drivers/net/Kconfig23
-rw-r--r--drivers/net/appletalk/cops.c4
-rw-r--r--drivers/net/appletalk/ltpc.c10
-rw-r--r--drivers/net/bonding/bond_main.c28
-rw-r--r--drivers/net/bonding/bond_options.c5
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/Makefile5
-rw-r--r--drivers/net/can/c_can/c_can.h3
-rw-r--r--drivers/net/can/c_can/c_can_ethtool.c43
-rw-r--r--drivers/net/can/c_can/c_can_main.c (renamed from drivers/net/can/c_can/c_can.c)2
-rw-r--r--drivers/net/can/m_can/m_can.c244
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/spi/hi311x.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c14
-rw-r--r--drivers/net/dsa/b53/b53_common.c17
-rw-r--r--drivers/net/dsa/b53/b53_srab.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c5
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c3
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c214
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h67
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/ocelot/felix.c2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c15
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c5
-rw-r--r--drivers/net/dsa/qca8k.c141
-rw-r--r--drivers/net/dsa/sja1105/Kconfig1
-rw-r--r--drivers/net/dsa/sja1105/Makefile1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h114
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c156
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c359
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c760
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c543
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c97
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h13
-rw-r--r--drivers/net/dsa/sja1105/sja1105_sgmii.h53
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c433
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c497
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h107
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_tas.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c78
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c30
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c18
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c217
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h23
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h25
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h34
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c546
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c138
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c3
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c80
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c6
-rw-r--r--drivers/net/ethernet/cortina/gemini.c34
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c31
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c24
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c76
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h74
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c30
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c21
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c85
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c932
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h72
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c150
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c338
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c412
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h89
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c576
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h52
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c119
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c544
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h134
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c59
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c167
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c132
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c8
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c134
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c465
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c62
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c334
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc_int.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c124
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c288
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1269
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h161
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c653
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h79
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h92
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c245
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c13
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c66
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h42
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_dump.c2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c41
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c86
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c5
-rw-r--r--drivers/net/ethernet/korina.c12
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c23
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c11
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c72
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h54
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h107
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h3915
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c168
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c923
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c617
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h85
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c323
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h39
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c192
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c143
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c303
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c58
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h39
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.c376
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_acl.h124
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c530
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.c3
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.c194
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flow.h14
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c359
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.h18
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c661
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h51
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c301
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c104
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.c239
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_span.h20
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c180
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h7
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c77
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c424
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c111
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c123
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c179
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c1299
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c295
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c630
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c187
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c97
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c130
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c15
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c105
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c1
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c1178
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.h231
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c129
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c40
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c3
-rw-r--r--drivers/net/ethernet/ni/nixge.c8
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig3
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c140
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c829
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h103
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c376
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h39
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c238
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c43
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h11
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c434
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c15
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c5
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c398
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c92
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c109
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c74
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c32
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c7
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c7
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c7
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c8
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c6
-rw-r--r--drivers/net/fjes/fjes_main.c16
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c10
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/ieee802154/mrf24j40.c4
-rw-r--r--drivers/net/ipa/Makefile3
-rw-r--r--drivers/net/ipa/ipa.h2
-rw-r--r--drivers/net/ipa/ipa_cmd.c40
-rw-r--r--drivers/net/ipa/ipa_data-v3.5.1.c45
-rw-r--r--drivers/net/ipa/ipa_data-v4.11.c66
-rw-r--r--drivers/net/ipa/ipa_data-v4.2.c54
-rw-r--r--drivers/net/ipa/ipa_data-v4.5.c69
-rw-r--r--drivers/net/ipa/ipa_data-v4.9.c70
-rw-r--r--drivers/net/ipa/ipa_endpoint.c75
-rw-r--r--drivers/net/ipa/ipa_main.c37
-rw-r--r--drivers/net/ipa/ipa_mem.c263
-rw-r--r--drivers/net/ipa/ipa_mem.h26
-rw-r--r--drivers/net/ipa/ipa_qmi.c32
-rw-r--r--drivers/net/ipa/ipa_reg.h1
-rw-r--r--drivers/net/ipa/ipa_smp2p.c5
-rw-r--r--drivers/net/ipa/ipa_sysfs.c136
-rw-r--r--drivers/net/ipa/ipa_sysfs.h15
-rw-r--r--drivers/net/ipa/ipa_table.c94
-rw-r--r--drivers/net/ipa/ipa_uc.c3
-rw-r--r--drivers/net/ipa/ipa_version.h2
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mdio/Kconfig14
-rw-r--r--drivers/net/mdio/Makefile4
-rw-r--r--drivers/net/mdio/acpi_mdio.c58
-rw-r--r--drivers/net/mdio/fwnode_mdio.c144
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c2
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c6
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c9
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c2
-rw-r--r--drivers/net/mdio/mdio-octeon.c2
-rw-r--r--drivers/net/mdio/mdio-thunder.c1
-rw-r--r--drivers/net/mdio/of_mdio.c140
-rw-r--r--drivers/net/mhi/net.c123
-rw-r--r--drivers/net/netdevsim/bus.c129
-rw-r--r--drivers/net/netdevsim/dev.c404
-rw-r--r--drivers/net/netdevsim/netdev.c95
-rw-r--r--drivers/net/netdevsim/netdevsim.h49
-rw-r--r--drivers/net/pcs/Makefile4
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c185
-rw-r--r--drivers/net/pcs/pcs-xpcs.c648
-rw-r--r--drivers/net/pcs/pcs-xpcs.h115
-rw-r--r--drivers/net/phy/ax88796b.c74
-rw-r--r--drivers/net/phy/bcm87xx.c4
-rw-r--r--drivers/net/phy/davicom.c6
-rw-r--r--drivers/net/phy/dp83640.c5
-rw-r--r--drivers/net/phy/et1011c.c15
-rw-r--r--drivers/net/phy/fixed_phy.c4
-rw-r--r--drivers/net/phy/lxt.c4
-rw-r--r--drivers/net/phy/marvell.c40
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/mdio_device.c4
-rw-r--r--drivers/net/phy/micrel.c410
-rw-r--r--drivers/net/phy/mii_timestamper.c3
-rw-r--r--drivers/net/phy/motorcomm.c17
-rw-r--r--drivers/net/phy/national.c6
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c12
-rw-r--r--drivers/net/phy/phy-c45.c2
-rw-r--r--drivers/net/phy/phy-core.c3
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/phy/phy_device.c130
-rw-r--r--drivers/net/phy/phylink.c60
-rw-r--r--drivers/net/phy/qsemi.c1
-rw-r--r--drivers/net/phy/realtek.c76
-rw-r--r--drivers/net/phy/sfp-bus.c33
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c10
-rw-r--r--drivers/net/phy/ste10Xp.c6
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/asix.h13
-rw-r--r--drivers/net/usb/asix_common.c106
-rw-r--r--drivers/net/usb/asix_devices.c202
-rw-r--r--drivers/net/usb/ax88172a.c21
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c2
-rw-r--r--drivers/net/usb/cdc_mbim.c6
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/hso.c45
-rw-r--r--drivers/net/usb/int51x1.c2
-rw-r--r--drivers/net/usb/lan78xx.c3
-rw-r--r--drivers/net/usb/lg-vl600.c4
-rw-r--r--drivers/net/usb/r8152.c121
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/smsc75xx.c8
-rw-r--r--drivers/net/usb/usbnet.c6
-rw-r--r--drivers/net/virtio_net.c25
-rw-r--r--drivers/net/vrf.c2
-rw-r--r--drivers/net/wan/cosa.c493
-rw-r--r--drivers/net/wan/farsync.c487
-rw-r--r--drivers/net/wan/hd64570.c124
-rw-r--r--drivers/net/wan/hdlc.c63
-rw-r--r--drivers/net/wan/hdlc_cisco.c49
-rw-r--r--drivers/net/wan/hdlc_fr.c101
-rw-r--r--drivers/net/wan/hdlc_x25.c77
-rw-r--r--drivers/net/wan/ixp4xx_hss.c144
-rw-r--r--drivers/net/wan/lapbether.c65
-rw-r--r--drivers/net/wan/n2.c56
-rw-r--r--drivers/net/wan/pc300too.c52
-rw-r--r--drivers/net/wan/pci200syn.c51
-rw-r--r--drivers/net/wan/sealevel.c126
-rw-r--r--drivers/net/wan/wanxl.c186
-rw-r--r--drivers/net/wan/z85230.c993
-rw-r--r--drivers/net/wireguard/Makefile3
-rw-r--r--drivers/net/wireguard/allowedips.c189
-rw-r--r--drivers/net/wireguard/allowedips.h14
-rw-r--r--drivers/net/wireguard/main.c17
-rw-r--r--drivers/net/wireguard/peer.c27
-rw-r--r--drivers/net/wireguard/peer.h3
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c165
-rw-r--r--drivers/net/wireguard/socket.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c199
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c47
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c34
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c391
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c1
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c47
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h87
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/Kconfig8
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h20
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c131
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c267
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h17
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h14
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c6
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c47
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c54
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c42
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c19
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h1
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c182
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/soc.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c81
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c17
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c20
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h1
-rw-r--r--drivers/net/wireless/st/cw1200/cw1200_sdio.c1
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c17
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c17
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c67
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c24
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c4
-rw-r--r--drivers/net/wwan/Kconfig35
-rw-r--r--drivers/net/wwan/Makefile5
-rw-r--r--drivers/net/wwan/iosm/Makefile23
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c88
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h59
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c1363
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h579
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c346
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h98
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_irq.c90
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_irq.h33
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.c223
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mmio.h183
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.c455
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux.h343
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c910
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.h193
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.c580
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pcie.h209
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.c333
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_pm.h207
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.c85
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_port.h50
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.c283
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol.h237
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c552
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h444
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_task_queue.c202
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_task_queue.h97
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.c44
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.h41
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c351
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.h55
-rw-r--r--drivers/net/wwan/wwan_core.c494
-rw-r--r--drivers/net/wwan/wwan_hwsim.c500
-rw-r--r--drivers/nfc/fdp/fdp.c42
-rw-r--r--drivers/nfc/fdp/fdp.h1
-rw-r--r--drivers/nfc/fdp/i2c.c14
-rw-r--r--drivers/nfc/mei_phy.c8
-rw-r--r--drivers/nfc/microread/microread.c1
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c25
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.h17
-rw-r--r--drivers/nfc/nfcmrvl/i2c.c24
-rw-r--r--drivers/nfc/nfcmrvl/main.c13
-rw-r--r--drivers/nfc/nfcmrvl/nfcmrvl.h29
-rw-r--r--drivers/nfc/nfcmrvl/spi.c19
-rw-r--r--drivers/nfc/nfcmrvl/uart.c49
-rw-r--r--drivers/nfc/nfcmrvl/usb.c31
-rw-r--r--drivers/nfc/pn533/i2c.c10
-rw-r--r--drivers/nfc/pn533/pn533.c46
-rw-r--r--drivers/nfc/pn533/uart.c2
-rw-r--r--drivers/nfc/pn533/usb.c4
-rw-r--r--drivers/nfc/pn544/i2c.c11
-rw-r--r--drivers/nfc/port100.c4
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c2
-rw-r--r--drivers/nfc/st-nci/i2c.c9
-rw-r--r--drivers/nfc/st-nci/se.c14
-rw-r--r--drivers/nfc/st-nci/spi.c9
-rw-r--r--drivers/nfc/st-nci/vendor_cmds.c15
-rw-r--r--drivers/nfc/st21nfca/i2c.c9
-rw-r--r--drivers/nfc/st95hf/core.c13
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/fc.c12
-rw-r--r--drivers/nvme/host/multipath.c55
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/tcp.c5
-rw-r--r--drivers/nvme/target/admin-cmd.c7
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/discovery.c2
-rw-r--r--drivers/nvme/target/fabrics-cmd.c6
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/io-cmd-file.c8
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/nvmet.h6
-rw-r--r--drivers/nvme/target/passthru.c2
-rw-r--r--drivers/nvme/target/rdma.c4
-rw-r--r--drivers/platform/mellanox/mlxbf-tmfifo.c11
-rw-r--r--drivers/platform/surface/aggregator/controller.c3
-rw-r--r--drivers/platform/surface/surface_dtx.c8
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/dell/dell-smbios-wmi.c3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c38
-rw-r--r--drivers/platform/x86/hp-wireless.c2
-rw-r--r--drivers/platform/x86/hp_accel.c22
-rw-r--r--drivers/platform/x86/ideapad-laptop.c13
-rw-r--r--drivers/platform/x86/intel_int0002_vgpio.c80
-rw-r--r--drivers/platform/x86/intel_punit_ipc.c1
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c43
-rw-r--r--drivers/ptp/ptp_ocp.c4
-rw-r--r--drivers/rapidio/rio_cm.c17
-rw-r--r--drivers/s390/net/netiucv.c28
-rw-r--r--drivers/s390/net/qeth_core.h42
-rw-r--r--drivers/s390/net/qeth_core_main.c349
-rw-r--r--drivers/s390/net/qeth_ethtool.c7
-rw-r--r--drivers/s390/net/qeth_l2_main.c12
-rw-r--r--drivers/scsi/BusLogic.c6
-rw-r--r--drivers/scsi/BusLogic.h2
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c10
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c7
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c3
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c15
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-fsl-dspi.c4
-rw-r--r--drivers/spi/spi-sc18is602.c9
-rw-r--r--drivers/spi/spi-sprd.c1
-rw-r--r--drivers/spi/spi-zynq-qspi.c9
-rw-r--r--drivers/spi/spi.c51
-rw-r--r--drivers/ssb/driver_gpio.c6
-rw-r--r--drivers/ssb/driver_pcicore.c18
-rw-r--r--drivers/ssb/main.c36
-rw-r--r--drivers/ssb/pci.c16
-rw-r--r--drivers/ssb/pcmcia.c16
-rw-r--r--drivers/ssb/scan.c1
-rw-r--r--drivers/ssb/sdio.c1
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c23
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c21
-rw-r--r--drivers/tee/amdtee/amdtee_private.h13
-rw-r--r--drivers/tee/amdtee/call.c94
-rw-r--r--drivers/tee/amdtee/core.c15
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c57
-rw-r--r--drivers/uio/uio_hv_generic.c12
-rw-r--r--drivers/uio/uio_pci_generic.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c33
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/gadget.c3
-rw-r--r--drivers/usb/dwc2/platform.c4
-rw-r--r--drivers/usb/dwc3/core.h7
-rw-r--r--drivers/usb/dwc3/debug.h8
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c3
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c5
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c1
-rw-r--r--drivers/usb/dwc3/gadget.c13
-rw-r--r--drivers/usb/host/fotg210-hcd.c4
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-pci.c8
-rw-r--r--drivers/usb/host/xhci-ring.c16
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/musb/mediatek.c2
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c112
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c46
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h6
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c19
-rw-r--r--drivers/vhost/vsock.c56
-rw-r--r--drivers/video/console/vgacon.c56
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--drivers/video/fbdev/hgafb.c21
-rw-r--r--drivers/video/fbdev/imsttfb.c26
-rw-r--r--drivers/xen/gntdev.c4
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--drivers/xen/unpopulated-alloc.c4
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xen-pciback/xenbus.c22
958 files changed, 49537 insertions, 11294 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 16c0fe8a72a7..d260bc1f3e6e 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -1313,6 +1313,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
{"PNP0C0B", }, /* Generic ACPI fan */
{"INT3404", }, /* Fan */
{"INTC1044", }, /* Fan for Tiger Lake generation */
+ {"INTC1048", }, /* Fan for Alder Lake generation */
{}
};
struct acpi_device *adev = ACPI_COMPANION(dev);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index b852cff80287..f973bbe90e5e 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -142,6 +142,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
int acpi_power_on_resources(struct acpi_device *device, int state);
int acpi_power_transition(struct acpi_device *device, int state);
+void acpi_turn_off_unused_power_resources(void);
/* --------------------------------------------------------------------------
Device Power Management
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 958aaac869e8..23d9a09d7060 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -686,6 +686,13 @@ int nfit_spa_type(struct acpi_nfit_system_address *spa)
return -1;
}
+static size_t sizeof_spa(struct acpi_nfit_system_address *spa)
+{
+ if (spa->flags & ACPI_NFIT_LOCATION_COOKIE_VALID)
+ return sizeof(*spa);
+ return sizeof(*spa) - 8;
+}
+
static bool add_spa(struct acpi_nfit_desc *acpi_desc,
struct nfit_table_prev *prev,
struct acpi_nfit_system_address *spa)
@@ -693,22 +700,22 @@ static bool add_spa(struct acpi_nfit_desc *acpi_desc,
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
- if (spa->header.length != sizeof(*spa))
+ if (spa->header.length != sizeof_spa(spa))
return false;
list_for_each_entry(nfit_spa, &prev->spas, list) {
- if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
+ if (memcmp(nfit_spa->spa, spa, sizeof_spa(spa)) == 0) {
list_move_tail(&nfit_spa->list, &acpi_desc->spas);
return true;
}
}
- nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
+ nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof_spa(spa),
GFP_KERNEL);
if (!nfit_spa)
return false;
INIT_LIST_HEAD(&nfit_spa->list);
- memcpy(nfit_spa->spa, spa, sizeof(*spa));
+ memcpy(nfit_spa->spa, spa, sizeof_spa(spa));
list_add_tail(&nfit_spa->list, &acpi_desc->spas);
dev_dbg(dev, "spa index: %d type: %s\n",
spa->range_index,
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 32974b575e46..56102eaaa2da 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -995,6 +995,7 @@ void acpi_resume_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
+#endif
void acpi_turn_off_unused_power_resources(void)
{
@@ -1015,4 +1016,3 @@ void acpi_turn_off_unused_power_resources(void)
mutex_unlock(&power_resource_list_lock);
}
-#endif
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a22778e880c2..453eff8ec8c3 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -700,6 +700,7 @@ int acpi_device_add(struct acpi_device *device,
result = acpi_device_set_name(device, acpi_device_bus_id);
if (result) {
+ kfree_const(acpi_device_bus_id->bus_id);
kfree(acpi_device_bus_id);
goto err_unlock;
}
@@ -2359,6 +2360,8 @@ int __init acpi_scan_init(void)
}
}
+ acpi_turn_off_unused_power_resources();
+
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 1856f76ac83f..7fe41ee489d6 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -8,7 +8,6 @@ extern struct list_head acpi_wakeup_device_list;
extern struct mutex acpi_device_lock;
extern void acpi_resume_power_resources(void);
-extern void acpi_turn_off_unused_power_resources(void);
static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
{
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 3b54b8fd7396..e7ddd281afff 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -277,6 +277,20 @@ acpi_evaluate_integer(acpi_handle handle,
EXPORT_SYMBOL(acpi_evaluate_integer);
+int acpi_get_local_address(acpi_handle handle, u32 *addr)
+{
+ unsigned long long adr;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
+ if (ACPI_FAILURE(status))
+ return -ENODATA;
+
+ *addr = (u32)adr;
+ return 0;
+}
+EXPORT_SYMBOL(acpi_get_local_address);
+
acpi_status
acpi_evaluate_reference(acpi_handle handle,
acpi_string pathname,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 61d34e1dc59c..bcec598b89f2 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4918,7 +4918,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
uint32_t enable;
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
- ret = -EINVAL;
+ ret = -EFAULT;
goto err;
}
binder_inner_proc_lock(proc);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4a8bf8cda52b..628e33939aca 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -150,7 +150,7 @@ void fwnode_links_purge(struct fwnode_handle *fwnode)
fwnode_links_purge_consumers(fwnode);
}
-static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
+void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
{
struct fwnode_handle *child;
@@ -164,6 +164,7 @@ static void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
fwnode_for_each_available_child_node(fwnode, child)
fw_devlink_purge_absent_suppliers(child);
}
+EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
#ifdef CONFIG_SRCU
static DEFINE_MUTEX(device_links_lock);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 1fc1a992f90c..b570848d23e0 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1637,6 +1637,7 @@ void pm_runtime_init(struct device *dev)
dev->power.request_pending = false;
dev->power.request = RPM_REQ_NONE;
dev->power.deferred_resume = false;
+ dev->power.needs_force_resume = 0;
INIT_WORK(&dev->power.work, pm_runtime_work);
dev->power.timer_expires = 0;
@@ -1804,10 +1805,12 @@ int pm_runtime_force_suspend(struct device *dev)
* its parent, but set its status to RPM_SUSPENDED anyway in case this
* function will be called again for it in the meantime.
*/
- if (pm_runtime_need_not_resume(dev))
+ if (pm_runtime_need_not_resume(dev)) {
pm_runtime_set_suspended(dev);
- else
+ } else {
__update_runtime_status(dev, RPM_SUSPENDED);
+ dev->power.needs_force_resume = 1;
+ }
return 0;
@@ -1834,7 +1837,7 @@ int pm_runtime_force_resume(struct device *dev)
int (*callback)(struct device *);
int ret = 0;
- if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
+ if (!pm_runtime_status_suspended(dev) || !dev->power.needs_force_resume)
goto out;
/*
@@ -1853,6 +1856,7 @@ int pm_runtime_force_resume(struct device *dev)
pm_runtime_mark_last_busy(dev);
out:
+ dev->power.needs_force_resume = 0;
pm_runtime_enable(dev);
return ret;
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 4ff71b579cfc..45d2c28c8fc8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1980,7 +1980,8 @@ static void nbd_disconnect_and_put(struct nbd_device *nbd)
* config ref and try to destroy the workqueue from inside the work
* queue.
*/
- flush_workqueue(nbd->recv_workq);
+ if (nbd->recv_workq)
+ flush_workqueue(nbd->recv_workq);
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
nbd_config_put(nbd);
@@ -2014,12 +2015,11 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
mutex_unlock(&nbd_index_mutex);
- if (!refcount_inc_not_zero(&nbd->config_refs)) {
- nbd_put(nbd);
- return 0;
- }
+ if (!refcount_inc_not_zero(&nbd->config_refs))
+ goto put_nbd;
nbd_disconnect_and_put(nbd);
nbd_config_put(nbd);
+put_nbd:
nbd_put(nbd);
return 0;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 5d603ef39bad..b88c63fbf7fb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2527,10 +2527,17 @@ static int btusb_intel_download_firmware_newgen(struct hci_dev *hdev,
}
btusb_setup_intel_newgen_get_fw_name(ver, fwname, sizeof(fwname), "sfi");
- err = request_firmware(&fw, fwname, &hdev->dev);
+ err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
fwname, err);
+
return err;
}
@@ -2680,12 +2687,24 @@ download:
err = btusb_setup_intel_new_get_fw_name(ver, params, fwname,
sizeof(fwname), "sfi");
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Unsupported Intel firmware naming");
return -EINVAL;
}
- err = request_firmware(&fw, fwname, &hdev->dev);
+ err = firmware_request_nowarn(&fw, fwname, &hdev->dev);
if (err < 0) {
+ if (!test_bit(BTUSB_BOOTLOADER, &data->flags)) {
+ /* Firmware has already been loaded */
+ set_bit(BTUSB_FIRMWARE_LOADED, &data->flags);
+ return 0;
+ }
+
bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)",
fwname, err);
return err;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 742b4a0932e3..c6d8c0f59722 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -744,6 +744,13 @@ static const struct blk_mq_ops gdrom_mq_ops = {
static int probe_gdrom(struct platform_device *devptr)
{
int err;
+
+ /*
+ * Ensure our "one" device is initialized properly in case of previous
+ * usages of it
+ */
+ memset(&gd, 0, sizeof(gd));
+
/* Start the device */
if (gdrom_execute_diagnostic() != 1) {
pr_warn("ATA Probe for GDROM failed\n");
@@ -830,6 +837,8 @@ static int remove_gdrom(struct platform_device *devptr)
if (gdrom_major)
unregister_blkdev(gdrom_major, GDROM_DEV_NAME);
unregister_cdrom(gd.cd_info);
+ kfree(gd.cd_info);
+ kfree(gd.toc);
return 0;
}
@@ -845,7 +854,7 @@ static struct platform_driver gdrom_driver = {
static int __init init_gdrom(void)
{
int rc;
- gd.toc = NULL;
+
rc = platform_driver_register(&gdrom_driver);
if (rc)
return rc;
@@ -861,8 +870,6 @@ static void __exit exit_gdrom(void)
{
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
- kfree(gd.toc);
- kfree(gd.cd_info);
}
module_init(init_gdrom);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ed3b7dab678d..8b55085650ad 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -984,6 +984,8 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
hdp->hd_phys_address = fixmem32->address;
hdp->hd_address = ioremap(fixmem32->address,
HPET_RANGE_SIZE);
+ if (!hdp->hd_address)
+ return AE_ERROR;
if (hpet_is_known(hdp)) {
iounmap(hdp->hd_address);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index eff1f12d981a..c84d23951219 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -656,6 +656,7 @@ int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip)
if (nr_commands !=
be32_to_cpup((__be32 *)&buf.data[TPM_HEADER_SIZE + 5])) {
+ rc = -EFAULT;
tpm_buf_destroy(&buf);
goto out;
}
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index a2e0395cbe61..55b9d3965ae1 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -709,16 +709,14 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
cap_t cap;
int ret;
- /* TPM 2.0 */
- if (chip->flags & TPM_CHIP_FLAG_TPM2)
- return tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
-
- /* TPM 1.2 */
ret = request_locality(chip, 0);
if (ret < 0)
return ret;
- ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ ret = tpm2_get_tpm_pt(chip, 0x100, &cap2, desc);
+ else
+ ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
release_locality(chip, 0);
@@ -1127,12 +1125,20 @@ int tpm_tis_resume(struct device *dev)
if (ret)
return ret;
- /* TPM 1.2 requires self-test on resume. This function actually returns
+ /*
+ * TPM 1.2 requires self-test on resume. This function actually returns
* an error code but for unknown reason it isn't handled.
*/
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
+ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ ret = request_locality(chip, 0);
+ if (ret < 0)
+ return ret;
+
tpm1_do_selftest(chip);
+ release_locality(chip, 0);
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(tpm_tis_resume);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index e2ec1b745243..65508eb89ec9 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -4540,6 +4540,9 @@ int of_clk_add_provider(struct device_node *np,
struct of_clk_provider *cp;
int ret;
+ if (!np)
+ return 0;
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -4579,6 +4582,9 @@ int of_clk_add_hw_provider(struct device_node *np,
struct of_clk_provider *cp;
int ret;
+ if (!np)
+ return 0;
+
cp = kzalloc(sizeof(*cp), GFP_KERNEL);
if (!cp)
return -ENOMEM;
@@ -4676,6 +4682,9 @@ void of_clk_del_provider(struct device_node *np)
{
struct of_clk_provider *cp;
+ if (!np)
+ return;
+
mutex_lock(&of_clk_mutex);
list_for_each_entry(cp, &of_clk_providers, link) {
if (cp->node == np) {
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 977fd05ac35f..d6ece7bbce89 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -419,7 +419,7 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
}
-#ifdef VDSO_CLOCKMODE_HVCLOCK
+#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
static int hv_cs_enable(struct clocksource *cs)
{
vclocks_set_used(VDSO_CLOCKMODE_HVCLOCK);
@@ -435,7 +435,7 @@ static struct clocksource hyperv_cs_tsc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend= suspend_hv_clock_tsc,
.resume = resume_hv_clock_tsc,
-#ifdef VDSO_CLOCKMODE_HVCLOCK
+#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
.enable = hv_cs_enable,
.vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK,
#else
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index d1bbc16fba4b..7e7450453714 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -646,7 +646,11 @@ static u64 get_max_boost_ratio(unsigned int cpu)
return 0;
}
- highest_perf = perf_caps.highest_perf;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ highest_perf = amd_get_highest_perf();
+ else
+ highest_perf = perf_caps.highest_perf;
+
nominal_perf = perf_caps.nominal_perf;
if (!highest_perf || !nominal_perf) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f0401064d7aa..0e69dffd5a76 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -3033,6 +3033,14 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
{}
};
+static bool intel_pstate_hwp_is_enabled(void)
+{
+ u64 value;
+
+ rdmsrl(MSR_PM_ENABLE, value);
+ return !!(value & 0x1);
+}
+
static int __init intel_pstate_init(void)
{
const struct x86_cpu_id *id;
@@ -3051,8 +3059,12 @@ static int __init intel_pstate_init(void)
* Avoid enabling HWP for processors without EPP support,
* because that means incomplete HWP implementation which is a
* corner case and supporting it is generally problematic.
+ *
+ * If HWP is enabled already, though, there is no choice but to
+ * deal with it.
*/
- if (!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) {
+ if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
+ intel_pstate_hwp_is_enabled()) {
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index facc8e6bc580..d385daf2c71c 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -442,7 +442,6 @@ static int nitrox_probe(struct pci_dev *pdev,
err = pci_request_mem_regions(pdev, nitrox_driver_name);
if (err) {
pci_disable_device(pdev);
- dev_err(&pdev->dev, "Failed to request mem regions!\n");
return err;
}
pci_set_master(pdev);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index f264b70c383e..eadd1eaa2fb5 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -760,7 +760,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
- ret = dma_buf_pin(attach);
+ ret = dmabuf->ops->pin(attach);
if (ret)
goto err_unlock;
}
@@ -786,7 +786,7 @@ err_attach:
err_unpin:
if (dma_buf_is_dynamic(attach->dmabuf))
- dma_buf_unpin(attach);
+ dmabuf->ops->unpin(attach);
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
@@ -843,7 +843,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
__unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
- dma_buf_unpin(attach);
+ dmabuf->ops->unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
}
}
@@ -956,7 +956,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
- r = dma_buf_pin(attach);
+ r = attach->dmabuf->ops->pin(attach);
if (r)
return ERR_PTR(r);
}
@@ -968,7 +968,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
- dma_buf_unpin(attach);
+ attach->dmabuf->ops->unpin(attach);
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 806ca02c52d7..62026607f3f8 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -418,8 +418,23 @@ static int __init hidma_mgmt_init(void)
hidma_mgmt_of_populate_channels(child);
}
#endif
- return platform_driver_register(&hidma_mgmt_driver);
+ /*
+ * We do not check for return value here, as it is assumed that
+ * platform_driver_register must not fail. The reason for this is that
+ * the (potential) hidma_mgmt_of_populate_channels calls above are not
+ * cleaned up if it does fail, and to do this work is quite
+ * complicated. In particular, various calls of of_address_to_resource,
+ * of_irq_to_resource, platform_device_register_full, of_dma_configure,
+ * and of_msi_configure which then call other functions and so on, must
+ * be cleaned up - this is not a trivial exercise.
+ *
+ * Currently, this module is not intended to be unloaded, and there is
+ * no module_exit function defined which does the needed cleanup. For
+ * this reason, we have to assume success here.
+ */
+ platform_driver_register(&hidma_mgmt_driver);
+ return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 9fa4dfc6ebee..f0d8f60acee1 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3083,7 +3083,7 @@ static void read_mc_regs(struct amd64_pvt *pvt)
edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
/* Check first whether TOP_MEM2 is enabled: */
- rdmsrl(MSR_K8_SYSCFG, msr_val);
+ rdmsrl(MSR_AMD64_SYSCFG, msr_val);
if (msr_val & BIT(21)) {
rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
diff --git a/drivers/firmware/arm_scmi/notify.h b/drivers/firmware/arm_scmi/notify.h
index ce0324be6c71..4e9b627edfef 100644
--- a/drivers/firmware/arm_scmi/notify.h
+++ b/drivers/firmware/arm_scmi/notify.h
@@ -79,8 +79,6 @@ struct scmi_protocol_events {
int scmi_notification_init(struct scmi_handle *handle);
void scmi_notification_exit(struct scmi_handle *handle);
-
-struct scmi_protocol_handle;
int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id,
const struct scmi_protocol_handle *ph,
const struct scmi_protocol_events *ee);
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index d0dee37ad522..4ceba5ef7895 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -552,8 +552,10 @@ static unsigned long scpi_clk_get_val(u16 clk_id)
ret = scpi_send_message(CMD_GET_CLOCK_VALUE, &le_clk_id,
sizeof(le_clk_id), &rate, sizeof(rate));
+ if (ret)
+ return 0;
- return ret ? ret : le32_to_cpu(rate);
+ return le32_to_cpu(rate);
}
static int scpi_clk_set_val(u16 clk_id, unsigned long rate)
diff --git a/drivers/gpio/gpio-cadence.c b/drivers/gpio/gpio-cadence.c
index a4d3239d2594..4ab3fcd9b9ba 100644
--- a/drivers/gpio/gpio-cadence.c
+++ b/drivers/gpio/gpio-cadence.c
@@ -278,6 +278,7 @@ static const struct of_device_id cdns_of_ids[] = {
{ .compatible = "cdns,gpio-r1p02" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, cdns_of_ids);
static struct platform_driver cdns_gpio_driver = {
.driver = {
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 1bd9e44df718..05974b760796 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -444,16 +444,6 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
-static int tegra186_irq_set_affinity(struct irq_data *data,
- const struct cpumask *dest,
- bool force)
-{
- if (data->parent_data)
- return irq_chip_set_affinity_parent(data, dest, force);
-
- return -EINVAL;
-}
-
static void tegra186_gpio_irq(struct irq_desc *desc)
{
struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@@ -700,7 +690,6 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
gpio->intc.irq_set_wake = tegra186_irq_set_wake;
- gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index b411d3156e0b..136557e7dd3c 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -542,7 +542,7 @@ static void xgpio_irqhandler(struct irq_desc *desc)
}
/**
- * xgpio_of_probe - Probe method for the GPIO device.
+ * xgpio_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
* Return:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index dc3a69296321..264176a01e16 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1006,6 +1006,7 @@ struct amdgpu_device {
struct amdgpu_df df;
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
+ uint32_t harvest_ip_mask;
int num_ip_blocks;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b4ad1c055c70..66ddfe4f58c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1683,6 +1683,19 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
if (!ip_block_version)
return -EINVAL;
+ switch (ip_block_version->type) {
+ case AMD_IP_BLOCK_TYPE_VCN:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
+ return 0;
+ break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
ip_block_version->funcs->name);
@@ -3111,7 +3124,6 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
return amdgpu_device_asic_has_dc_support(adev->asic_type);
}
-
static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
struct amdgpu_device *adev =
@@ -3276,6 +3288,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->vm_manager.vm_pte_funcs = NULL;
adev->vm_manager.vm_pte_num_scheds = 0;
adev->gmc.gmc_funcs = NULL;
+ adev->harvest_ip_mask = 0x0;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
@@ -3410,19 +3423,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
- /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
- /* this will fail for cards that aren't VGA class devices, just
- * ignore it */
- if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
- vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
-
- if (amdgpu_device_supports_px(ddev)) {
- px = true;
- vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, px);
- vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
- }
-
if (amdgpu_emu_mode == 1) {
/* post the asic on emulation mode */
emu_soc_asic_init(adev);
@@ -3619,6 +3619,19 @@ fence_driver_init:
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(pdev);
+ /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
+ /* this will fail for cards that aren't VGA class devices, just
+ * ignore it */
+ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
+
+ if (amdgpu_device_supports_px(ddev)) {
+ px = true;
+ vga_switcheroo_register_client(adev->pdev,
+ &amdgpu_switcheroo_ops, px);
+ vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+ }
+
if (adev->gmc.xgmi.pending_reset)
queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
@@ -3630,8 +3643,6 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
- if (px)
- vga_switcheroo_fini_domain_pm_ops(adev->dev);
failed_unmap:
iounmap(adev->rmmio);
@@ -4468,7 +4479,6 @@ out:
r = amdgpu_ib_ring_tests(tmp_adev);
if (r) {
dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_device_ip_suspend(tmp_adev);
need_full_reset = true;
r = -EAGAIN;
goto end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index b2dbcb4df020..e1b6f5891759 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -373,6 +373,34 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
return -EINVAL;
}
+void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ struct harvest_table *harvest_info;
+ int i;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
+
+ for (i = 0; i < 32; i++) {
+ if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
+ break;
+
+ switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
+ case VCN_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 8f6183801cb3..1b1ae21b1037 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -29,6 +29,7 @@
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
+void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 2e622c1675d7..8a1fb8b6606e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -837,6 +837,174 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return 0;
}
+static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
+ unsigned int *width, unsigned int *height)
+{
+ unsigned int cpp_log2 = ilog2(cpp);
+ unsigned int pixel_log2 = block_log2 - cpp_log2;
+ unsigned int width_log2 = (pixel_log2 + 1) / 2;
+ unsigned int height_log2 = pixel_log2 - width_log2;
+
+ *width = 1 << width_log2;
+ *height = 1 << height_log2;
+}
+
+static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
+ bool pipe_aligned)
+{
+ unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+
+ switch (ver) {
+ case AMD_FMT_MOD_TILE_VER_GFX9: {
+ /*
+ * TODO: for pipe aligned we may need to check the alignment of the
+ * total size of the surface, which may need to be bigger than the
+ * natural alignment due to some HW workarounds
+ */
+ return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
+ }
+ case AMD_FMT_MOD_TILE_VER_GFX10:
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS: {
+ int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
+
+ if (ver == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
+ AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
+ ++pipes_log2;
+
+ return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
+ }
+ default:
+ return 0;
+ }
+}
+
+static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
+ const struct drm_format_info *format,
+ unsigned int block_width, unsigned int block_height,
+ unsigned int block_size_log2)
+{
+ unsigned int width = rfb->base.width /
+ ((plane && plane < format->num_planes) ? format->hsub : 1);
+ unsigned int height = rfb->base.height /
+ ((plane && plane < format->num_planes) ? format->vsub : 1);
+ unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
+ unsigned int block_pitch = block_width * cpp;
+ unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
+ unsigned int block_size = 1 << block_size_log2;
+ uint64_t size;
+
+ if (rfb->base.pitches[plane] % block_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is not a multiple of block pitch %d\n",
+ rfb->base.pitches[plane], plane, block_pitch);
+ return -EINVAL;
+ }
+ if (rfb->base.pitches[plane] < min_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is less than minimum pitch %d\n",
+ rfb->base.pitches[plane], plane, min_pitch);
+ return -EINVAL;
+ }
+
+ /* Force at least natural alignment. */
+ if (rfb->base.offsets[plane] % block_size) {
+ drm_dbg_kms(rfb->base.dev,
+ "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
+ rfb->base.offsets[plane], plane, block_size);
+ return -EINVAL;
+ }
+
+ size = rfb->base.offsets[plane] +
+ (uint64_t)rfb->base.pitches[plane] / block_pitch *
+ block_size * DIV_ROUND_UP(height, block_height);
+
+ if (rfb->base.obj[0]->size < size) {
+ drm_dbg_kms(rfb->base.dev,
+ "BO size 0x%zx is less than 0x%llx required for plane %d\n",
+ rfb->base.obj[0]->size, size, plane);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
+{
+ const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
+ uint64_t modifier = rfb->base.modifier;
+ int ret;
+ unsigned int i, block_width, block_height, block_size_log2;
+
+ if (!rfb->base.dev->mode_config.allow_fb_modifiers)
+ return 0;
+
+ for (i = 0; i < format_info->num_planes; ++i) {
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ block_width = 256 / format_info->cpp[i];
+ block_height = 1;
+ block_size_log2 = 8;
+ } else {
+ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+
+ switch ((swizzle & ~3) + 1) {
+ case DC_SW_256B_S:
+ block_size_log2 = 8;
+ break;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ block_size_log2 = 12;
+ break;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_T:
+ case DC_SW_64KB_S_X:
+ block_size_log2 = 16;
+ break;
+ default:
+ drm_dbg_kms(rfb->base.dev,
+ "Swizzle mode with unknown block size: %d\n", swizzle);
+ return -EINVAL;
+ }
+
+ get_block_dimensions(block_size_log2, format_info->cpp[i],
+ &block_width, &block_height);
+ }
+
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ if (AMD_FMT_MOD_GET(DCC, modifier)) {
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
+ block_size_log2 = get_dcc_block_size(modifier, false, false);
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height,
+ block_size_log2);
+ if (ret)
+ return ret;
+
+ ++i;
+ block_size_log2 = get_dcc_block_size(modifier, true, true);
+ } else {
+ bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
+
+ block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
+ }
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags, bool *tmz_surface)
{
@@ -902,10 +1070,8 @@ int amdgpu_display_gem_fb_verify_and_init(
int ret;
rfb->base.obj[0] = obj;
-
- /* Verify that bo size can fit the fb size. */
- ret = drm_gem_fb_init_with_funcs(dev, &rfb->base, file_priv, mode_cmd,
- &amdgpu_fb_funcs);
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
if (ret)
goto err;
/* Verify that the modifier is supported. */
@@ -967,9 +1133,12 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
}
}
- for (i = 1; i < rfb->base.format->num_planes; ++i) {
+ ret = amdgpu_display_verify_sizes(rfb);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < rfb->base.format->num_planes; ++i) {
drm_gem_object_get(rfb->base.obj[0]);
- drm_gem_object_put(rfb->base.obj[i]);
rfb->base.obj[i] = rfb->base.obj[0];
}
@@ -999,6 +1168,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
@@ -1412,7 +1582,7 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
}
}
}
- return r;
+ return 0;
}
int amdgpu_display_resume_helper(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 922938931e1a..f93883db2b46 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1573,6 +1573,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
+ if (ret)
+ return ret;
+
if (amdgpu_device_supports_px(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 4f10c4529840..09b048647523 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -288,10 +288,13 @@ out:
static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
{
struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
+ int i;
drm_fb_helper_unregister_fbi(&rfbdev->helper);
if (rfb->base.obj[0]) {
+ for (i = 0; i < rfb->base.format->num_planes; i++)
+ drm_gem_object_put(rfb->base.obj[0]);
amdgpufb_destroy_pinned_object(rfb->base.obj[0]);
rfb->base.obj[0] = NULL;
drm_framebuffer_unregister_private(&rfb->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 94b069630db3..b4971e90b98c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
/* Check if we have an idle VMID */
i = 0;
list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
+ /* Don't use per engine and per process VMID at the same time */
+ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
+ NULL : ring;
+
+ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
if (!fences[i])
break;
++i;
@@ -281,7 +285,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
if (updates && (*id)->flushed_updates &&
updates->context == (*id)->flushed_updates->context &&
!dma_fence_is_later(updates, (*id)->flushed_updates))
- updates = NULL;
+ updates = NULL;
if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
@@ -290,6 +294,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_signaled((*id)->last_flush))) {
struct dma_fence *tmp;
+ /* Don't use per engine and per process VMID at the same time */
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
/* to prevent one context starved by another context */
(*id)->pd_gpu_addr = 0;
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
@@ -365,12 +373,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
needs_flush = true;
- /* Concurrent flushes are only possible starting with Vega10 and
- * are broken on Navi10 and Navi14.
- */
- if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
- adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14))
+ if (needs_flush && !adev->vm_manager.concurrent_flush)
continue;
/* Good, we can use this VMID. Remember this submission as
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3bef0432cac2..d5cbc51c5eaa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -225,7 +225,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
*addr += mm_cur->start & ~PAGE_MASK;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- num_bytes = num_pages * 8;
+ num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
@@ -1210,6 +1210,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
+ ttm->sg = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0ffdf847cad0..9acee4a5b2ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3148,6 +3148,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
unsigned i;
+ /* Concurrent flushes are only possible starting with Vega10 and
+ * are broken on Navi10 and Navi14.
+ */
+ adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
+ adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14);
amdgpu_vmid_mgr_init(adev);
adev->vm_manager.fence_context =
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 976a12e5a8b9..4e140288159c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -331,6 +331,7 @@ struct amdgpu_vm_manager {
/* Handling of VMIDs */
struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
unsigned int first_kfd_vmid;
+ bool concurrent_flush;
/* Handling of VM fences */
u64 fence_context;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 2408ed4c7d84..7ce76a6b3a35 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1395,9 +1395,10 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG, 0xffffffff, 0x20000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xffffffff, 0x00000420),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04800000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x04900000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DFSM_TILES_IN_FLIGHT, 0x0000ffff, 0x0000003f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_LAST_OF_BURST_CONFIG, 0xffffffff, 0x03860204),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1800ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff0ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGE_PRIV_CONTROL, 0x00007fff, 0x000001fe),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0xffffffff, 0xe4e4e4e4),
@@ -1415,12 +1416,13 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_2, 0x00000820, 0x00000820),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_SPARE, 0xffffffff, 0xffff3101),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x001f0000, 0x00070104),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ALU_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_ARB_CONFIG, 0x00000133, 0x00000130),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0xffdf80ff, 0x479c0010),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
};
static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a078a38c2cee..516467e962b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -4943,7 +4943,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
amdgpu_gfx_rlc_enter_safe_mode(adev);
/* Enable 3D CGCG/CGLS */
- if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ if (enable) {
/* write cmd to clear cgcg/cgls ov */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
/* unset CGCG override */
@@ -4955,8 +4955,12 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
/* enable 3Dcgcg FSM(0x0000363f) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D);
- data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
- RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGCG)
+ data = (0x36 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
+ RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK;
+ else
+ data = 0x0 << RLC_CGCG_CGLS_CTRL_3D__CGCG_GFX_IDLE_THRESHOLD__SHIFT;
+
if (adev->cg_flags & AMD_CG_SUPPORT_GFX_3D_CGLS)
data |= (0x000F << RLC_CGCG_CGLS_CTRL_3D__CGLS_REP_COMPANSAT_DELAY__SHIFT) |
RLC_CGCG_CGLS_CTRL_3D__CGLS_EN_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index c1bd190841f8..e4f27b3f28fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -59,6 +59,7 @@ MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_32_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_k_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_k_mc.bin");
@@ -243,10 +244,16 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "polaris10";
break;
case CHIP_POLARIS12:
- if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision))
+ if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
chip_name = "polaris12_k";
- else
- chip_name = "polaris12";
+ } else {
+ WREG32(mmMC_SEQ_IO_DEBUG_INDEX, ixMC_IO_DEBUG_UP_159);
+ /* Polaris12 32bit ASIC needs a special MC firmware */
+ if (RREG32(mmMC_SEQ_IO_DEBUG_DATA) == 0x05b4dc40)
+ chip_name = "polaris12_32";
+ else
+ chip_name = "polaris12";
+ }
break;
case CHIP_FIJI:
case CHIP_CARRIZO:
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 83531997aeba..938ef4ce5b76 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -198,8 +198,6 @@ static int jpeg_v2_5_hw_fini(void *handle)
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
- ring->sched.ready = false;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
index de5dfcfb3859..94be35357f7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
@@ -166,8 +166,6 @@ static int jpeg_v3_0_hw_fini(void *handle)
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index d54af7f8801b..d290ca0b06da 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -623,6 +623,16 @@ static const struct amdgpu_ip_block_version nv_common_ip_block =
.funcs = &nv_common_ip_funcs,
};
+static bool nv_is_headless_sku(struct pci_dev *pdev)
+{
+ if ((pdev->device == 0x731E &&
+ (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
+ (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
+ (pdev->device == 0x7360 && pdev->revision == 0xC7))
+ return true;
+ return false;
+}
+
static int nv_reg_base_init(struct amdgpu_device *adev)
{
int r;
@@ -635,6 +645,12 @@ static int nv_reg_base_init(struct amdgpu_device *adev)
goto legacy_init;
}
+ amdgpu_discovery_harvest_ip(adev);
+ if (nv_is_headless_sku(adev->pdev)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
+
return 0;
}
@@ -671,16 +687,6 @@ void nv_set_virt_ops(struct amdgpu_device *adev)
adev->virt.ops = &xgpu_nv_virt_ops;
}
-static bool nv_is_headless_sku(struct pci_dev *pdev)
-{
- if ((pdev->device == 0x731E &&
- (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
- (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
- (pdev->device == 0x7360 && pdev->revision == 0xC7))
- return true;
- return false;
-}
-
int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
@@ -728,8 +734,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (!nv_is_headless_sku(adev->pdev))
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
@@ -752,8 +757,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (!nv_is_headless_sku(adev->pdev))
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
@@ -777,7 +781,6 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
-
if (adev->enable_mes)
amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
break;
@@ -1149,6 +1152,11 @@ static int nv_common_early_init(void *handle)
return -EINVAL;
}
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
+ adev->pg_flags &= ~(AMD_PG_SUPPORT_VCN |
+ AMD_PG_SUPPORT_VCN_DPG |
+ AMD_PG_SUPPORT_JPEG);
+
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_setting(adev);
xgpu_nv_mailbox_set_irq_funcs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 920fc6d4a127..8859133ce37e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -123,6 +123,10 @@ static const struct soc15_reg_golden golden_settings_sdma_nv14[] = {
static const struct soc15_reg_golden golden_settings_sdma_nv12[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG, 0x001877ff, 0x00000044),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x001877ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index b1ad9e52b234..240596b25fe4 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -497,11 +497,6 @@ static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
- sdma2->sched.ready = false;
- sdma3->sched.ready = false;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index d80e12b80c7e..e65c286f93a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -302,6 +302,7 @@ static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
*codecs = &rv_video_codecs_decode;
return 0;
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
case CHIP_RENOIR:
if (encode)
*codecs = &vega_video_codecs_encode;
@@ -1392,7 +1393,6 @@ static int soc15_common_early_init(void *handle)
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
- AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1401,7 +1401,8 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_MC_MGCG |
AMD_CG_SUPPORT_MC_LS |
AMD_CG_SUPPORT_SDMA_MGCG |
- AMD_CG_SUPPORT_SDMA_LS;
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA |
AMD_PG_SUPPORT_MMHUB |
@@ -1411,7 +1412,6 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_RLC_LS |
AMD_CG_SUPPORT_GFX_CP_LS |
- AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 51a773a37a35..0c1beefa3e49 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1119,10 +1119,10 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp);
- /* put VCPU into reset */
- WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
- UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
- ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+ /* stall UMC channel */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2),
+ UVD_LMI_CTRL2__STALL_ARB_UMC_MASK,
+ ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK |
UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
@@ -1141,6 +1141,11 @@ static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev)
UVD_SOFT_RESET__LMI_SOFT_RESET_MASK,
~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
+ /* put VCPU into reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET),
+ UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
+ ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
+
WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0);
vcn_v1_0_enable_clock_gating(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3f15bf34123a..14470da52113 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -373,7 +373,7 @@ static int vcn_v3_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int i, j;
+ int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -388,12 +388,6 @@ static int vcn_v3_0_hw_fini(void *handle)
vcn_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
}
}
- ring->sched.ready = false;
-
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- ring->sched.ready = false;
- }
}
return 0;
@@ -589,6 +583,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
VCN, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0),
AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
+
+ /* VCN global tiling registers */
+ WREG32_SOC15_DPG_MODE(0, SOC15_DPG_MODE_OFFSET(
+ UVD, 0, mmUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
static void vcn_v3_0_disable_static_power_gating(struct amdgpu_device *adev, int inst)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index b34ab76c5f4c..389eff96fcf6 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4015,6 +4015,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;
+ /*
+ * For reasons we don't (yet) fully understand a non-zero
+ * src_y coordinate into an NV12 buffer can cause a
+ * system hang. To avoid hangs (and maybe be overly cautious)
+ * let's reject both non-zero src_x and src_y.
+ *
+ * We currently know of only one use-case to reproduce a
+ * scenario with non-zero src_x and src_y for NV12, which
+ * is to gesture the YouTube Android app into full screen
+ * on ChromeOS.
+ */
+ if (state->fb &&
+ state->fb->format->format == DRM_FORMAT_NV12 &&
+ (scaling_info->src_rect.x != 0 ||
+ scaling_info->src_rect.y != 0))
+ return -EINVAL;
+
scaling_info->src_rect.width = state->src_w >> 16;
if (scaling_info->src_rect.width == 0)
return -EINVAL;
@@ -9869,6 +9886,53 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
#endif
+static int validate_overlay(struct drm_atomic_state *state)
+{
+ int i;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *primary_state, *overlay_state = NULL;
+
+ /* Check if primary plane is contained inside overlay */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ overlay_state = new_plane_state;
+ continue;
+ }
+ }
+
+ /* check if we're making changes to the overlay plane */
+ if (!overlay_state)
+ return 0;
+
+ /* check if overlay plane is enabled */
+ if (!overlay_state->crtc)
+ return 0;
+
+ /* find the primary plane for the CRTC that the overlay is enabled on */
+ primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
+ if (IS_ERR(primary_state))
+ return PTR_ERR(primary_state);
+
+ /* check if primary plane is enabled */
+ if (!primary_state->crtc)
+ return 0;
+
+ /* Perform the bounds check to ensure the overlay plane covers the primary */
+ if (primary_state->crtc_x < overlay_state->crtc_x ||
+ primary_state->crtc_y < overlay_state->crtc_y ||
+ primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
+ primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
+ DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -10043,6 +10107,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ ret = validate_overlay(state);
+ if (ret)
+ goto fail;
+
/* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 529545045a3e..1b6b15708b96 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3012,7 +3012,7 @@ static int trigger_hpd_mst_set(void *data, u64 val)
if (!aconnector->dc_link)
continue;
- if (!(aconnector->port && &aconnector->mst_port->mst_mgr))
+ if (!aconnector->mst_port)
continue;
link = aconnector->dc_link;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 616f5b1ea3a8..666796a0067c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -650,6 +650,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
/* File created at /sys/class/drm/card0/device/hdcp_srm*/
hdcp_work[0].attr = data_attr;
+ sysfs_bin_attr_init(&hdcp_work[0].attr);
if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
DRM_WARN("Failed to create device file hdcp_srm");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index f4374d83662a..c1f5474c205a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -1076,6 +1076,24 @@ static bool dc_link_detect_helper(struct dc_link *link,
dc_is_dvi_signal(link->connector_signal)) {
if (prev_sink)
dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+
+ return false;
+ }
+ /*
+ * Abort detection for DP connectors if we have
+ * no EDID and connector is active converter
+ * as there are no display downstream
+ *
+ */
+ if (dc_is_dp_sst_signal(link->connector_signal) &&
+ (link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_DVI_CONVERTER)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 4a5fa23d8e7b..5fcc2e64305d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -826,10 +826,11 @@ static const struct dc_plane_cap plane_cap = {
.fp16 = 16000
},
+ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 600,
- .nv12 = 600,
- .fp16 = 600
+ .argb8888 = 167,
+ .nv12 = 167,
+ .fp16 = 167
}
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 5b54b7fc5105..472696f949ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -843,10 +843,11 @@ static const struct dc_plane_cap plane_cap = {
.fp16 = 16000
},
+ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 600,
- .nv12 = 600,
- .fp16 = 600
+ .argb8888 = 167,
+ .nv12 = 167,
+ .fp16 = 167
},
64,
64
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index fc2dea243d1b..a33f0365329b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -284,10 +284,11 @@ static const struct dc_plane_cap plane_cap = {
.nv12 = 16000,
.fp16 = 16000
},
+ /* 6:1 downscaling ratio: 1000/6 = 166.666 */
.max_downscale_factor = {
- .argb8888 = 600,
- .nv12 = 600,
- .fp16 = 600
+ .argb8888 = 167,
+ .nv12 = 167,
+ .fp16 = 167
},
16,
16
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 43ed6291b2b8..9ab706cd07ff 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -216,6 +216,12 @@ enum PP_FEATURE_MASK {
PP_GFX_DCS_MASK = 0x80000,
};
+enum amd_harvest_ip_mask {
+ AMD_HARVEST_IP_VCN_MASK = 0x1,
+ AMD_HARVEST_IP_JPEG_MASK = 0x2,
+ AMD_HARVEST_IP_DMU_MASK = 0x4,
+};
+
enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2,
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 8128603ef495..9a54066ec0af 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -451,7 +451,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- struct pp_states_info data;
+ struct pp_states_info data = {0};
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
@@ -1893,6 +1893,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
}
}
+ if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+ /* SMU MP1 does not support dcefclk level setting */
+ if (asic_type >= CHIP_NAVI10) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ }
+
#undef DEVICE_ATTR_IS
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index 26a5321e621b..15c0b8af376f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
@@ -4817,70 +4817,70 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
u32 reg;
int ret;
- table->initialState.levels[0].mclk.vDLL_CNTL =
+ table->initialState.level.mclk.vDLL_CNTL =
cpu_to_be32(si_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_SS =
+ table->initialState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
+ table->initialState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->initialState.levels[0].mclk.mclk_value =
+ table->initialState.level.mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
- table->initialState.levels[0].sclk.sclk_value =
+ table->initialState.level.sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
- table->initialState.levels[0].arbRefreshState =
+ table->initialState.level.arbRefreshState =
SISLANDS_INITIAL_STATE_ARB_INDEX;
- table->initialState.levels[0].ACIndex = 0;
+ table->initialState.level.ACIndex = 0;
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
- &table->initialState.levels[0].vddc,
+ &table->initialState.level.vddc,
&std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
+ table->initialState.level.vddc.index,
+ &table->initialState.level.std_vddc);
}
if (eg_pi->vddci_control)
si_populate_voltage_value(adev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
+ &table->initialState.level.vddci);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
@@ -4888,41 +4888,41 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
initial_state->performance_levels[0].vddc,
initial_state->performance_levels[0].sclk,
initial_state->performance_levels[0].mclk,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
- si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
+ si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd);
reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
- table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+ table->initialState.level.aT = cpu_to_be32(reg);
+ table->initialState.level.bSP = cpu_to_be32(pi->dsp);
+ table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- table->initialState.levels[0].strobeMode =
+ table->initialState.level.strobeMode =
si_get_strobe_mode_settings(adev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+ table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
else
- table->initialState.levels[0].mcFlags = 0;
+ table->initialState.level.mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
- table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->initialState.level.dpm2.MaxPS = 0;
+ table->initialState.level.dpm2.NearTDPDec = 0;
+ table->initialState.level.dpm2.AboveSafeInc = 0;
+ table->initialState.level.dpm2.BelowSafeInc = 0;
+ table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -4953,18 +4953,18 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
if (pi->acpi_vddc) {
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ pi->acpi_vddc, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+ table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
if (si_pi->vddc_phase_shed_control) {
si_populate_phase_shedding_value(adev,
@@ -4972,23 +4972,23 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
pi->acpi_vddc,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
} else {
ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
- pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+ pi->min_vddc_in_table, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(adev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(adev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE =
+ table->ACPIState.level.gen2PCIE =
(u8)amdgpu_get_pcie_gen_support(adev,
si_pi->sys_pcie_mask,
si_pi->boot_pcie_gen,
@@ -5000,14 +5000,14 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
pi->min_vddc_in_table,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
if (pi->acpi_vddc) {
if (eg_pi->acpi_vddci)
si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
+ &table->ACPIState.level.vddci);
}
mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
@@ -5018,59 +5018,59 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
- table->ACPIState.levels[0].mclk.vDLL_CNTL =
+ table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(mpll_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(mpll_func_cntl_1);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(mpll_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_SS =
+ table->ACPIState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+ table->ACPIState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(spll_func_cntl_4);
- table->ACPIState.levels[0].mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.sclk_value = 0;
+ table->ACPIState.level.mclk.mclk_value = 0;
+ table->ACPIState.level.sclk.sclk_value = 0;
- si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
+ si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd);
if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 0;
+ table->ACPIState.level.ACIndex = 0;
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
- table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->ACPIState.level.dpm2.MaxPS = 0;
+ table->ACPIState.level.dpm2.NearTDPDec = 0;
+ table->ACPIState.level.dpm2.AboveSafeInc = 0;
+ table->ACPIState.level.dpm2.BelowSafeInc = 0;
+ table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static int si_populate_ulv_state(struct amdgpu_device *adev,
- SISLANDS_SMC_SWSTATE *state)
+ struct SISLANDS_SMC_SWSTATE_SINGLE *state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_power_info *si_pi = si_get_pi(adev);
@@ -5079,19 +5079,19 @@ static int si_populate_ulv_state(struct amdgpu_device *adev,
int ret;
ret = si_convert_power_level_to_smc(adev, &ulv->pl,
- &state->levels[0]);
+ &state->level);
if (!ret) {
if (eg_pi->sclk_deep_sleep) {
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
else
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
}
if (ulv->one_pcie_lane_in_ulv)
state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
- state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
- state->levels[0].ACIndex = 1;
- state->levels[0].std_vddc = state->levels[0].vddc;
+ state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+ state->level.ACIndex = 1;
+ state->level.std_vddc = state->level.vddc;
state->levelCount = 1;
state->flags |= PPSMC_SWSTATE_FLAG_DC;
@@ -5190,7 +5190,9 @@ static int si_init_smc_table(struct amdgpu_device *adev)
if (ret)
return ret;
- table->driverState = table->initialState;
+ table->driverState.flags = table->initialState.flags;
+ table->driverState.levelCount = table->initialState.levelCount;
+ table->driverState.levels[0] = table->initialState.level;
ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
SISLANDS_INITIAL_STATE_ARB_INDEX);
@@ -5737,8 +5739,8 @@ static int si_upload_ulv_state(struct amdgpu_device *adev)
if (ulv->supported && ulv->pl.vddc) {
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, ULVState);
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+ struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
+ u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
memset(smc_state, 0, state_size);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
index 0f7554052c90..c7dc117a688c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
@@ -191,6 +191,14 @@ struct SISLANDS_SMC_SWSTATE
typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+struct SISLANDS_SMC_SWSTATE_SINGLE {
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL level;
+};
+
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -208,19 +216,19 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
struct SISLANDS_SMC_STATETABLE
{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
- SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
- PP_SIslands_DPM2Parameters dpm2Params;
- SISLANDS_SMC_SWSTATE initialState;
- SISLANDS_SMC_SWSTATE ACPIState;
- SISLANDS_SMC_SWSTATE ULVState;
- SISLANDS_SMC_SWSTATE driverState;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+ SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
+ PP_SIslands_DPM2Parameters dpm2Params;
+ struct SISLANDS_SMC_SWSTATE_SINGLE initialState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ACPIState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ULVState;
+ SISLANDS_SMC_SWSTATE driverState;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index f827096dc849..ac13042672ea 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -1443,7 +1443,6 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_MCLK:
case SMU_UCLK:
- case SMU_DCEFCLK:
case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */
if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
@@ -1463,6 +1462,10 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return size;
break;
+ case SMU_DCEFCLK:
+ dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
+ break;
+
default:
break;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 72d9c1be1835..d2fd44b903ca 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1127,7 +1127,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
case SMU_SOCCLK:
case SMU_MCLK:
case SMU_UCLK:
- case SMU_DCEFCLK:
case SMU_FCLK:
/* There is only 2 levels for fine grained DPM */
if (sienna_cichlid_is_support_fine_grained_dpm(smu, clk_type)) {
@@ -1147,6 +1146,9 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
if (ret)
goto forec_level_out;
break;
+ case SMU_DCEFCLK:
+ dev_info(smu->adev->dev,"Setting DCEFCLK min/max dpm level is not supported!\n");
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index b9a4b7670a89..197b97341cad 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -815,10 +815,8 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctx->addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(ctx->addr)) {
- dev_err(dev, "ioremap failed\n");
+ if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr);
- }
ret = decon_conf_irq(ctx, "vsync", decon_irq_handler, 0);
if (ret < 0)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 44e402b7cdfb..2d2fe5ab26e7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1786,10 +1786,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dsi->reg_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(dsi->reg_base)) {
- dev_err(dev, "failed to remap io region\n");
+ if (IS_ERR(dsi->reg_base))
return PTR_ERR(dsi->reg_base);
- }
dsi->phy = devm_phy_get(dev, "dsim");
if (IS_ERR(dsi->phy)) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 49a2e0c53918..ae576122873e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -723,7 +723,7 @@ static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win)
}
/**
- * shadow_protect_win() - disable updating values from shadow registers at vsync
+ * fimd_shadow_protect_win() - disable updating values from shadow registers at vsync
*
* @ctx: local driver data
* @win: window to protect registers for
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 69f57ca9c68d..93f4d059fc89 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -102,7 +102,6 @@ config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
depends on 64BIT
- depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915
default n
help
Choose this option if you want to enable Intel GVT-g graphics
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 6a2dee8cef1f..642c60f3d9b1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1095,44 +1095,6 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-/* Optimize link config in order: max bpp, min lanes, min clock */
-static int
-intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- const struct link_config_limits *limits)
-{
- const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
-
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
-
- mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
- output_bpp);
-
- for (lane_count = limits->min_lane_count;
- lane_count <= limits->max_lane_count;
- lane_count <<= 1) {
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
- lane_count);
-
- if (mode_rate <= link_avail) {
- pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
-
- return 0;
- }
- }
- }
- }
-
- return -EINVAL;
-}
-
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
{
int i, num_bpc;
@@ -1382,22 +1344,11 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp_can_bigjoiner(intel_dp))
pipe_config->bigjoiner = true;
- if (intel_dp_is_edp(intel_dp))
- /*
- * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
- * section A.1: "It is recommended that the minimum number of
- * lanes be used, using the minimum link rate allowed for that
- * lane configuration."
- *
- * Note that we fall back to the max clock and lane count for eDP
- * panels that fail with the fast optimal settings (see
- * intel_dp->use_max_params), in which case the fast vs. wide
- * choice doesn't matter.
- */
- ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits);
- else
- /* Optimize for slow and wide. */
- ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
+ /*
+ * Optimize for slow and wide for everything, because there are some
+ * eDP 1.3 and 1.4 panels don't work well with fast and narrow.
+ */
+ ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
/* enable compression if the mode doesn't fit available BW */
drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
@@ -2160,7 +2111,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp)
* -PCON supports SRC_CTL_MODE (VESA DP2.0-HDMI2.1 PCON Spec Draft-1 Sec-7)
* -sink is HDMI2.1
*/
- if (!(intel_dp->dpcd[2] & DP_PCON_SOURCE_CTL_MODE) ||
+ if (!(intel_dp->downstream_ports[2] & DP_PCON_SOURCE_CTL_MODE) ||
!intel_dp_is_hdmi_2_1_sink(intel_dp) ||
intel_dp->frl.is_trained)
return;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e5dadde422f7..bbaf05515e88 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -383,7 +383,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
i830_overlay_clock_gating(dev_priv, true);
}
-static void
+__i915_active_call static void
intel_overlay_last_flip_retire(struct i915_active *active)
{
struct intel_overlay *overlay =
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 23f6b00e08e2..f6fe5cb01438 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -189,7 +189,7 @@ compute_partial_view(const struct drm_i915_gem_object *obj,
struct i915_ggtt_view view;
if (i915_gem_object_is_tiled(obj))
- chunk = roundup(chunk, tile_row_pages(obj));
+ chunk = roundup(chunk, tile_row_pages(obj) ?: 1);
view.type = I915_GGTT_VIEW_PARTIAL;
view.partial.offset = rounddown(page_offset, chunk);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index aed8a37ccdc9..7361971c177d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -63,6 +63,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
i915_gem_object_set_tiling_quirk(obj);
+ GEM_BUG_ON(!list_empty(&obj->mm.link));
+ atomic_inc(&obj->mm.shrink_pin);
shrinkable = false;
}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index de575fdb033f..21f08e53889c 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -397,7 +397,10 @@ static void emit_batch(struct i915_vma * const vma,
gen7_emit_pipeline_invalidate(&cmds);
batch_add(&cmds, MI_LOAD_REGISTER_IMM(2));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7));
- batch_add(&cmds, 0xffff0000);
+ batch_add(&cmds, 0xffff0000 |
+ ((IS_IVB_GT1(i915) || IS_VALLEYVIEW(i915)) ?
+ HIZ_RAW_STALL_OPT_DISABLE :
+ 0));
batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1));
batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
gen7_emit_pipeline_invalidate(&cmds);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 176c19633412..74bf6fc8461f 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -641,7 +641,6 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
err = pin_pt_dma(vm, pde->pt.base);
if (err) {
- i915_gem_object_put(pde->pt.base);
free_pd(vm, pde);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index e72b7a0dc316..8a322594210c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -653,8 +653,8 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
* banks of memory are paired and unswizzled on the
* uneven portion, so leave that as unknown.
*/
- if (intel_uncore_read(uncore, C0DRB3) ==
- intel_uncore_read(uncore, C1DRB3)) {
+ if (intel_uncore_read16(uncore, C0DRB3) ==
+ intel_uncore_read16(uncore, C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index e7c2babcee8b..cbac409f6c8a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -46,118 +46,6 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
};
-static struct intel_vgpu_type *
-intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id)
-{
- if (WARN_ON(type_group_id >= gvt->num_types))
- return NULL;
- return &gvt->types[type_group_id];
-}
-
-static ssize_t available_instances_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr,
- char *buf)
-{
- struct intel_vgpu_type *type;
- unsigned int num = 0;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- num = 0;
- else
- num = type->avail_instance;
-
- return sprintf(buf, "%u\n", num);
-}
-
-static ssize_t device_api_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
-}
-
-static ssize_t description_show(struct mdev_type *mtype,
- struct mdev_type_attribute *attr, char *buf)
-{
- struct intel_vgpu_type *type;
- void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
-
- type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype));
- if (!type)
- return 0;
-
- return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n"
- "weight: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution),
- type->weight);
-}
-
-static MDEV_TYPE_ATTR_RO(available_instances);
-static MDEV_TYPE_ATTR_RO(device_api);
-static MDEV_TYPE_ATTR_RO(description);
-
-static struct attribute *gvt_type_attrs[] = {
- &mdev_type_attr_available_instances.attr,
- &mdev_type_attr_device_api.attr,
- &mdev_type_attr_description.attr,
- NULL,
-};
-
-static struct attribute_group *gvt_vgpu_type_groups[] = {
- [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
-};
-
-static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
-{
- *intel_vgpu_type_groups = gvt_vgpu_type_groups;
- return true;
-}
-
-static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i, j;
- struct intel_vgpu_type *type;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- type = &gvt->types[i];
-
- group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
- if (WARN_ON(!group))
- goto unwind;
-
- group->name = type->name;
- group->attrs = gvt_type_attrs;
- gvt_vgpu_type_groups[i] = group;
- }
-
- return 0;
-
-unwind:
- for (j = 0; j < i; j++) {
- group = gvt_vgpu_type_groups[j];
- kfree(group);
- }
-
- return -ENOMEM;
-}
-
-static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
-{
- int i;
- struct attribute_group *group;
-
- for (i = 0; i < gvt->num_types; i++) {
- group = gvt_vgpu_type_groups[i];
- gvt_vgpu_type_groups[i] = NULL;
- kfree(group);
- }
-}
-
static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write,
@@ -169,8 +57,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
- .gvt_find_vgpu_type = intel_gvt_find_vgpu_type,
- .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_page_track_handler,
@@ -274,7 +160,6 @@ void intel_gvt_clean_device(struct drm_i915_private *i915)
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt);
intel_gvt_debugfs_clean(gvt);
@@ -363,12 +248,6 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
if (ret)
goto out_clean_thread;
- ret = intel_gvt_init_vgpu_type_groups(gvt);
- if (ret) {
- gvt_err("failed to init vgpu type groups: %d\n", ret);
- goto out_clean_types;
- }
-
vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu);
@@ -454,7 +333,8 @@ EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
- intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
+ void *gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
+ intel_gvt_hypervisor_host_exit(intel_gvt_host.dev, gvt);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 88ab360fcb31..0c0615602343 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -574,9 +574,6 @@ struct intel_gvt_ops {
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
- struct intel_vgpu_type *(*gvt_find_vgpu_type)(
- struct intel_gvt *gvt, unsigned int type_group_id);
- bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 477badfcb258..dda320749c65 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -669,8 +669,8 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
link_n = vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A));
/* Get H/V total from transcoder timing */
- htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT) + 1;
- vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT) + 1;
+ htotal = (vgpu_vreg_t(vgpu, HTOTAL(TRANSCODER_A)) >> TRANS_HTOTAL_SHIFT);
+ vtotal = (vgpu_vreg_t(vgpu, VTOTAL(TRANSCODER_A)) >> TRANS_VTOTAL_SHIFT);
if (dp_br && link_n && htotal && vtotal) {
u64 pixel_clk = 0;
@@ -682,7 +682,7 @@ static void vgpu_update_refresh_rate(struct intel_vgpu *vgpu)
pixel_clk *= MSEC_PER_SEC;
/* Calcuate refresh rate by (pixel_clk / (h_total * v_total)) */
- new_rate = DIV64_U64_ROUND_CLOSEST(pixel_clk, div64_u64(mul_u32_u32(htotal, vtotal), MSEC_PER_SEC));
+ new_rate = DIV64_U64_ROUND_CLOSEST(mul_u64_u32_shr(pixel_clk, MSEC_PER_SEC, 0), mul_u32_u32(htotal + 1, vtotal + 1));
if (*old_rate != new_rate)
*old_rate = new_rate;
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index b79da5124f83..f33e3cbd0439 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -49,7 +49,7 @@ enum hypervisor_type {
struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops);
- void (*host_exit)(struct device *dev);
+ void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 65ff43cfc0f7..48b4d4cf805d 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -144,6 +144,104 @@ static inline bool handle_valid(unsigned long handle)
return !!(handle & ~0xff);
}
+static ssize_t available_instances_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr,
+ char *buf)
+{
+ struct intel_vgpu_type *type;
+ unsigned int num = 0;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ num = 0;
+ else
+ num = type->avail_instance;
+
+ return sprintf(buf, "%u\n", num);
+}
+
+static ssize_t device_api_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
+}
+
+static ssize_t description_show(struct mdev_type *mtype,
+ struct mdev_type_attribute *attr, char *buf)
+{
+ struct intel_vgpu_type *type;
+ struct intel_gvt *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt;
+
+ type = &gvt->types[mtype_get_type_group_id(mtype)];
+ if (!type)
+ return 0;
+
+ return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+static MDEV_TYPE_ATTR_RO(device_api);
+static MDEV_TYPE_ATTR_RO(description);
+
+static struct attribute *gvt_type_attrs[] = {
+ &mdev_type_attr_available_instances.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_description.attr,
+ NULL,
+};
+
+static struct attribute_group *gvt_vgpu_type_groups[] = {
+ [0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
+};
+
+static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i, j;
+ struct intel_vgpu_type *type;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ type = &gvt->types[i];
+
+ group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL);
+ if (!group)
+ goto unwind;
+
+ group->name = type->name;
+ group->attrs = gvt_type_attrs;
+ gvt_vgpu_type_groups[i] = group;
+ }
+
+ return 0;
+
+unwind:
+ for (j = 0; j < i; j++) {
+ group = gvt_vgpu_type_groups[j];
+ kfree(group);
+ }
+
+ return -ENOMEM;
+}
+
+static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
+{
+ int i;
+ struct attribute_group *group;
+
+ for (i = 0; i < gvt->num_types; i++) {
+ group = gvt_vgpu_type_groups[i];
+ gvt_vgpu_type_groups[i] = NULL;
+ kfree(group);
+ }
+}
+
static int kvmgt_guest_init(struct mdev_device *mdev);
static void intel_vgpu_release_work(struct work_struct *work);
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
@@ -694,14 +792,13 @@ static int intel_vgpu_create(struct mdev_device *mdev)
struct intel_vgpu *vgpu = NULL;
struct intel_vgpu_type *type;
struct device *pdev;
- void *gvt;
+ struct intel_gvt *gvt;
int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
- type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
- mdev_get_type_group_id(mdev));
+ type = &gvt->types[mdev_get_type_group_id(mdev)];
if (!type) {
ret = -EINVAL;
goto out;
@@ -1667,19 +1764,26 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute_group **kvm_vgpu_type_groups;
+ int ret;
+
+ ret = intel_gvt_init_vgpu_type_groups((struct intel_gvt *)gvt);
+ if (ret)
+ return ret;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
- return -EFAULT;
- intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
+ intel_vgpu_ops.supported_type_groups = gvt_vgpu_type_groups;
- return mdev_register_device(dev, &intel_vgpu_ops);
+ ret = mdev_register_device(dev, &intel_vgpu_ops);
+ if (ret)
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
+
+ return ret;
}
-static void kvmgt_host_exit(struct device *dev)
+static void kvmgt_host_exit(struct device *dev, void *gvt)
{
mdev_unregister_device(dev);
+ intel_gvt_cleanup_vgpu_type_groups((struct intel_gvt *)gvt);
}
static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 550a456e936f..e6c5a792a49a 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -63,13 +63,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
-static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
+static inline void intel_gvt_hypervisor_host_exit(struct device *dev, void *gvt)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
- intel_gvt_host.mpt->host_exit(dev);
+ intel_gvt_host.mpt->host_exit(dev, gvt);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index cf9a3d384971..aa573b078ae7 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -1156,7 +1156,8 @@ static int auto_active(struct i915_active *ref)
return 0;
}
-static void auto_retire(struct i915_active *ref)
+__i915_active_call static void
+auto_retire(struct i915_active *ref)
{
i915_active_put(ref);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b23f58e94cfb..b3cedd20f365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -999,12 +999,11 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
obj->mm.madv = args->madv;
if (i915_gem_object_has_pages(obj)) {
- struct list_head *list;
+ unsigned long flags;
- if (i915_gem_object_is_shrinkable(obj)) {
- unsigned long flags;
-
- spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ spin_lock_irqsave(&i915->mm.obj_lock, flags);
+ if (!list_empty(&obj->mm.link)) {
+ struct list_head *list;
if (obj->mm.madv != I915_MADV_WILLNEED)
list = &i915->mm.purge_list;
@@ -1012,8 +1011,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
list = &i915->mm.shrink_list;
list_move_tail(&obj->mm.link, list);
- spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
+ spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
}
/* if the object is no longer attached, discard its backing storage */
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index 4c8cd08c672d..9a777b0ff59b 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -28,10 +28,46 @@
#include "i915_drv.h"
-#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+struct remap_pfn {
+ struct mm_struct *mm;
+ unsigned long pfn;
+ pgprot_t prot;
+
+ struct sgt_iter sgt;
+ resource_size_t iobase;
+};
#define use_dma(io) ((io) != -1)
+static inline unsigned long sgt_pfn(const struct remap_pfn *r)
+{
+ if (use_dma(r->iobase))
+ return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT;
+ else
+ return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT);
+}
+
+static int remap_sg(pte_t *pte, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+
+ if (GEM_WARN_ON(!r->sgt.sgp))
+ return -EINVAL;
+
+ /* Special PTE are not associated with any struct page */
+ set_pte_at(r->mm, addr, pte,
+ pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot)));
+ r->pfn++; /* track insertions in case we need to unwind later */
+
+ r->sgt.curr += PAGE_SIZE;
+ if (r->sgt.curr >= r->sgt.max)
+ r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase));
+
+ return 0;
+}
+
+#define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP)
+
/**
* remap_io_sg - remap an IO mapping to userspace
* @vma: user vma to map to
@@ -46,7 +82,12 @@ int remap_io_sg(struct vm_area_struct *vma,
unsigned long addr, unsigned long size,
struct scatterlist *sgl, resource_size_t iobase)
{
- unsigned long pfn, len, remapped = 0;
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .prot = vma->vm_page_prot,
+ .sgt = __sgt_iter(sgl, use_dma(iobase)),
+ .iobase = iobase,
+ };
int err;
/* We rely on prevalidation of the io-mapping to skip track_pfn(). */
@@ -55,25 +96,11 @@ int remap_io_sg(struct vm_area_struct *vma,
if (!use_dma(iobase))
flush_cache_range(vma, addr, size);
- do {
- if (use_dma(iobase)) {
- if (!sg_dma_len(sgl))
- break;
- pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT;
- len = sg_dma_len(sgl);
- } else {
- pfn = page_to_pfn(sg_page(sgl));
- len = sgl->length;
- }
-
- err = remap_pfn_range(vma, addr + remapped, pfn, len,
- vma->vm_page_prot);
- if (err)
- break;
- remapped += len;
- } while ((sgl = __sg_next(sgl)));
-
- if (err)
- zap_vma_ptes(vma, addr, remapped);
- return err;
+ err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
+ if (unlikely(err)) {
+ zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT);
+ return err;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index d553f62f4eeb..b4d8e1b01ee4 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1153,10 +1153,6 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
{
struct device_node *phandle;
- a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
- if (IS_ERR(a6xx_gpu->llc_mmio))
- return;
-
/*
* There is a different programming path for targets with an mmu500
* attached, so detect if that is the case
@@ -1166,6 +1162,11 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
of_device_is_compatible(phandle, "arm,mmu-500"));
of_node_put(phandle);
+ if (a6xx_gpu->have_mmu500)
+ a6xx_gpu->llc_mmio = NULL;
+ else
+ a6xx_gpu->llc_mmio = msm_ioremap(pdev, "cx_mem", "gpu_cx");
+
a6xx_gpu->llc_slice = llcc_slice_getd(LLCC_GPU);
a6xx_gpu->htw_llc_slice = llcc_slice_getd(LLCC_GPUHTW);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 7c29976be243..18bc76b7f1a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -648,16 +648,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
if (unlikely(!cstate->num_mixers))
return;
- /*
- * For planes without commit update, drm framework will not add
- * those planes to current state since hardware update is not
- * required. However, if those planes were power collapsed since
- * last commit cycle, driver has to restore the hardware state
- * of those planes explicitly here prior to plane flush.
- */
- drm_atomic_crtc_for_each_plane(plane, crtc)
- dpu_plane_restore(plane, state);
-
/* update performance setting before crtc kickoff */
dpu_core_perf_crtc_update(crtc, 1, false);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index df7f3d3afd8b..7a993547eb75 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1258,22 +1258,6 @@ static void dpu_plane_atomic_update(struct drm_plane *plane,
}
}
-void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state)
-{
- struct dpu_plane *pdpu;
-
- if (!plane || !plane->state) {
- DPU_ERROR("invalid plane\n");
- return;
- }
-
- pdpu = to_dpu_plane(plane);
-
- DPU_DEBUG_PLANE(pdpu, "\n");
-
- dpu_plane_atomic_update(plane, state);
-}
-
static void dpu_plane_destroy(struct drm_plane *plane)
{
struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 03b6365a750c..34e03ac05f4a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -85,12 +85,6 @@ void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
u32 *flush_sspp);
/**
- * dpu_plane_restore - restore hw state if previously power collapsed
- * @plane: Pointer to drm plane structure
- */
-void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state);
-
-/**
* dpu_plane_flush - final plane operations before commit flush
* @plane: Pointer to drm plane structure
*/
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 82a8673ab8da..d7e4a39a904e 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -527,6 +527,7 @@ int dp_audio_hw_params(struct device *dev,
dp_audio_setup_acr(audio);
dp_audio_safe_to_exit_level(audio);
dp_audio_enable(audio, true);
+ dp_display_signal_audio_start(dp_display);
dp_display->audio_enabled = true;
end:
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 5a39da6e1eaf..1784e119269b 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -178,6 +178,15 @@ static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
return 0;
}
+void dp_display_signal_audio_start(struct msm_dp *dp_display)
+{
+ struct dp_display_private *dp;
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ reinit_completion(&dp->audio_comp);
+}
+
void dp_display_signal_audio_complete(struct msm_dp *dp_display)
{
struct dp_display_private *dp;
@@ -586,10 +595,8 @@ static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- if (state == ST_CONNECT_PENDING) {
- dp_display_enable(dp, 0);
+ if (state == ST_CONNECT_PENDING)
dp->hpd_state = ST_CONNECTED;
- }
mutex_unlock(&dp->event_mutex);
@@ -651,7 +658,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
/* signal the disconnect event early to ensure proper teardown */
- reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(g_dp_display, false);
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
@@ -669,10 +675,8 @@ static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data
mutex_lock(&dp->event_mutex);
state = dp->hpd_state;
- if (state == ST_DISCONNECT_PENDING) {
- dp_display_disable(dp, 0);
+ if (state == ST_DISCONNECT_PENDING)
dp->hpd_state = ST_DISCONNECTED;
- }
mutex_unlock(&dp->event_mutex);
@@ -898,7 +902,6 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
/* wait only if audio was enabled */
if (dp_display->audio_enabled) {
/* signal the disconnect event */
- reinit_completion(&dp->audio_comp);
dp_display_handle_plugged_change(dp_display, false);
if (!wait_for_completion_timeout(&dp->audio_comp,
HZ * 5))
@@ -1272,7 +1275,12 @@ static int dp_pm_resume(struct device *dev)
status = dp_catalog_link_is_connected(dp->catalog);
- if (status)
+ /*
+ * can not declared display is connected unless
+ * HDMI cable is plugged in and sink_count of
+ * dongle become 1
+ */
+ if (status && dp->link->sink_count)
dp->dp_display.is_connected = true;
else
dp->dp_display.is_connected = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 6092ba1ed85e..5173c89eedf7 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -34,6 +34,7 @@ int dp_display_get_modes(struct msm_dp *dp_display,
int dp_display_request_irq(struct msm_dp *dp_display);
bool dp_display_check_video_test(struct msm_dp *dp_display);
int dp_display_get_test_bpp(struct msm_dp *dp_display);
+void dp_display_signal_audio_start(struct msm_dp *dp_display);
void dp_display_signal_audio_complete(struct msm_dp *dp_display);
#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index f0a2ddf96a4b..ff7f2ec42030 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -843,7 +843,7 @@ int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
if (pixel_clk_provider)
*pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
- return -EINVAL;
+ return 0;
}
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 582b1428f971..86e40a0d41a3 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -405,6 +405,10 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm, struct clk_hw **prov
if (!vco_name)
return -ENOMEM;
+ parent_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+ if (!parent_name)
+ return -ENOMEM;
+
clk_name = devm_kzalloc(dev, 32, GFP_KERNEL);
if (!clk_name)
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index e1104d2454e2..fe7d17cd35ec 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -42,7 +42,7 @@
* - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 6
+#define MSM_VERSION_MINOR 7
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index b199942266a2..56df86e5f740 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -190,13 +190,25 @@ struct page **msm_gem_get_pages(struct drm_gem_object *obj)
}
p = get_pages(obj);
+
+ if (!IS_ERR(p)) {
+ msm_obj->pin_count++;
+ update_inactive(msm_obj);
+ }
+
msm_gem_unlock(obj);
return p;
}
void msm_gem_put_pages(struct drm_gem_object *obj)
{
- /* when we start tracking the pin count, then do something here */
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ msm_gem_lock(obj);
+ msm_obj->pin_count--;
+ GEM_WARN_ON(msm_obj->pin_count < 0);
+ update_inactive(msm_obj);
+ msm_gem_unlock(obj);
}
int msm_gem_mmap_obj(struct drm_gem_object *obj,
@@ -646,6 +658,8 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
ret = -ENOMEM;
goto fail;
}
+
+ update_inactive(msm_obj);
}
return msm_obj->vaddr;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index a6480d2c81b2..03e2cc2a2ce1 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -221,7 +221,7 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
/* imported/exported objects are not purgeable: */
static inline bool is_unpurgeable(struct msm_gem_object *msm_obj)
{
- return msm_obj->base.dma_buf && msm_obj->base.import_attach;
+ return msm_obj->base.import_attach || msm_obj->pin_count;
}
static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@@ -271,7 +271,7 @@ static inline void mark_unpurgeable(struct msm_gem_object *msm_obj)
static inline bool is_unevictable(struct msm_gem_object *msm_obj)
{
- return is_unpurgeable(msm_obj) || msm_obj->pin_count || msm_obj->vaddr;
+ return is_unpurgeable(msm_obj) || msm_obj->vaddr;
}
static inline void mark_evictable(struct msm_gem_object *msm_obj)
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index dd5ef6493723..769f666335ac 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1687,102 +1687,102 @@ static int ni_populate_smc_initial_state(struct radeon_device *rdev,
u32 reg;
int ret;
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.mpll_ad_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.mpll_dq_func_cntl_2);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(ni_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vDLL_CNTL =
+ table->initialState.level.mclk.vDLL_CNTL =
cpu_to_be32(ni_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMPLL_SS =
+ table->initialState.level.mclk.vMPLL_SS =
cpu_to_be32(ni_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
+ table->initialState.level.mclk.vMPLL_SS2 =
cpu_to_be32(ni_pi->clock_registers.mpll_ss2);
- table->initialState.levels[0].mclk.mclk_value =
+ table->initialState.level.mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(ni_pi->clock_registers.cg_spll_spread_spectrum_2);
- table->initialState.levels[0].sclk.sclk_value =
+ table->initialState.level.sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
- table->initialState.levels[0].arbRefreshState =
+ table->initialState.level.arbRefreshState =
NISLANDS_INITIAL_STATE_ARB_INDEX;
- table->initialState.levels[0].ACIndex = 0;
+ table->initialState.level.ACIndex = 0;
ret = ni_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
- &table->initialState.levels[0].vddc,
+ &table->initialState.level.vddc,
&std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
+ table->initialState.level.vddc.index,
+ &table->initialState.level.std_vddc);
}
if (eg_pi->vddci_control)
ni_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
+ &table->initialState.level.vddci);
- ni_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+ ni_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
+ table->initialState.level.aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+ table->initialState.level.bSP = cpu_to_be32(pi->dsp);
if (pi->boot_in_gen2)
- table->initialState.levels[0].gen2PCIE = 1;
+ table->initialState.level.gen2PCIE = 1;
else
- table->initialState.levels[0].gen2PCIE = 0;
+ table->initialState.level.gen2PCIE = 0;
if (pi->mem_gddr5) {
- table->initialState.levels[0].strobeMode =
+ table->initialState.level.strobeMode =
cypress_get_strobe_mode_settings(rdev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
+ table->initialState.level.mcFlags = NISLANDS_SMC_MC_EDC_RD_FLAG | NISLANDS_SMC_MC_EDC_WR_FLAG;
else
- table->initialState.levels[0].mcFlags = 0;
+ table->initialState.level.mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
+ table->initialState.level.dpm2.MaxPS = 0;
+ table->initialState.level.dpm2.NearTDPDec = 0;
+ table->initialState.level.dpm2.AboveSafeInc = 0;
+ table->initialState.level.dpm2.BelowSafeInc = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -1813,43 +1813,43 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
if (pi->acpi_vddc) {
ret = ni_populate_voltage_value(rdev,
&eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ pi->acpi_vddc, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
if (pi->pcie_gen2) {
if (pi->acpi_pcie_gen2)
- table->ACPIState.levels[0].gen2PCIE = 1;
+ table->ACPIState.level.gen2PCIE = 1;
else
- table->ACPIState.levels[0].gen2PCIE = 0;
+ table->ACPIState.level.gen2PCIE = 0;
} else {
- table->ACPIState.levels[0].gen2PCIE = 0;
+ table->ACPIState.level.gen2PCIE = 0;
}
} else {
ret = ni_populate_voltage_value(rdev,
&eg_pi->vddc_voltage_table,
pi->min_vddc_in_table,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = ni_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc,
+ &table->ACPIState.level.vddc,
&std_vddc);
if (!ret)
ni_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = 0;
+ table->ACPIState.level.gen2PCIE = 0;
}
if (eg_pi->acpi_vddci) {
@@ -1857,7 +1857,7 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
ni_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
+ &table->ACPIState.level.vddci);
}
@@ -1900,37 +1900,37 @@ static int ni_populate_smc_acpi_state(struct radeon_device *rdev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
+ table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
+ table->ACPIState.level.mclk.vDLL_CNTL = cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.mclk_value = 0;
+ table->ACPIState.level.mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(spll_func_cntl_4);
- table->ACPIState.levels[0].sclk.sclk_value = 0;
+ table->ACPIState.level.sclk.sclk_value = 0;
- ni_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+ ni_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 1;
+ table->ACPIState.level.ACIndex = 1;
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
+ table->ACPIState.level.dpm2.MaxPS = 0;
+ table->ACPIState.level.dpm2.NearTDPDec = 0;
+ table->ACPIState.level.dpm2.AboveSafeInc = 0;
+ table->ACPIState.level.dpm2.BelowSafeInc = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -1980,7 +1980,9 @@ static int ni_init_smc_table(struct radeon_device *rdev)
if (ret)
return ret;
- table->driverState = table->initialState;
+ table->driverState.flags = table->initialState.flags;
+ table->driverState.levelCount = table->initialState.levelCount;
+ table->driverState.levels[0] = table->initialState.level;
table->ULVState = table->initialState;
diff --git a/drivers/gpu/drm/radeon/nislands_smc.h b/drivers/gpu/drm/radeon/nislands_smc.h
index 7395cb6b3cac..42f3bab0f9ee 100644
--- a/drivers/gpu/drm/radeon/nislands_smc.h
+++ b/drivers/gpu/drm/radeon/nislands_smc.h
@@ -143,6 +143,14 @@ struct NISLANDS_SMC_SWSTATE
typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE;
+struct NISLANDS_SMC_SWSTATE_SINGLE {
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL level;
+};
+
#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -160,19 +168,19 @@ typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE;
struct NISLANDS_SMC_STATETABLE
{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
- NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- PP_NIslands_DPM2Parameters dpm2Params;
- NISLANDS_SMC_SWSTATE initialState;
- NISLANDS_SMC_SWSTATE ACPIState;
- NISLANDS_SMC_SWSTATE ULVState;
- NISLANDS_SMC_SWSTATE driverState;
- NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS];
+ NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ PP_NIslands_DPM2Parameters dpm2Params;
+ struct NISLANDS_SMC_SWSTATE_SINGLE initialState;
+ struct NISLANDS_SMC_SWSTATE_SINGLE ACPIState;
+ struct NISLANDS_SMC_SWSTATE_SINGLE ULVState;
+ NISLANDS_SMC_SWSTATE driverState;
+ NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 42281fce552e..56ed5634cebe 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1549,6 +1549,7 @@ struct radeon_dpm {
void *priv;
u32 new_active_crtcs;
int new_active_crtc_count;
+ int high_pixelclock_count;
u32 current_active_crtcs;
int current_active_crtc_count;
bool single_display;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 42301b4e56f5..28c4413f4dc8 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2120,11 +2120,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
- rdev->pm.power_state[state_index].clock_info =
- kcalloc(1, sizeof(struct radeon_pm_clock_info),
- GFP_KERNEL);
+ /* avoid memory leaks from invalid modes or unknown frev. */
+ if (!rdev->pm.power_state[state_index].clock_info) {
+ rdev->pm.power_state[state_index].clock_info =
+ kzalloc(sizeof(struct radeon_pm_clock_info),
+ GFP_KERNEL);
+ }
if (!rdev->pm.power_state[state_index].clock_info)
- return state_index;
+ goto out;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
@@ -2243,17 +2246,24 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
break;
}
}
+out:
+ /* free any unused clock_info allocation. */
+ if (state_index && state_index < num_modes) {
+ kfree(rdev->pm.power_state[state_index].clock_info);
+ rdev->pm.power_state[state_index].clock_info = NULL;
+ }
+
/* last mode is usually default */
- if (rdev->pm.default_power_state_index == -1) {
+ if (state_index && rdev->pm.default_power_state_index == -1) {
rdev->pm.power_state[state_index - 1].type =
POWER_STATE_TYPE_DEFAULT;
rdev->pm.default_power_state_index = state_index - 1;
rdev->pm.power_state[state_index - 1].default_clock_mode =
&rdev->pm.power_state[state_index - 1].clock_info[0];
- rdev->pm.power_state[state_index].flags &=
+ rdev->pm.power_state[state_index - 1].flags &=
~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY;
- rdev->pm.power_state[state_index].misc = 0;
- rdev->pm.power_state[state_index].misc2 = 0;
+ rdev->pm.power_state[state_index - 1].misc = 0;
+ rdev->pm.power_state[state_index - 1].misc2 = 0;
}
return state_index;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 3808a753127b..04109a2a6fd7 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -301,7 +301,8 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages[p] = pagelist[i];
+ rdev->gart.pages[p] = pagelist ? pagelist[i] :
+ rdev->dummy_page.page;
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
page_entry = radeon_gart_get_page_entry(page_base, flags);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 0c1950f4e146..3861c0b98fcf 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1767,6 +1767,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
struct drm_device *ddev = rdev->ddev;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
+ struct radeon_connector *radeon_connector;
if (!rdev->pm.dpm_enabled)
return;
@@ -1776,6 +1777,7 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
/* update active crtc counts */
rdev->pm.dpm.new_active_crtcs = 0;
rdev->pm.dpm.new_active_crtc_count = 0;
+ rdev->pm.dpm.high_pixelclock_count = 0;
if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
@@ -1783,6 +1785,12 @@ static void radeon_pm_compute_clocks_dpm(struct radeon_device *rdev)
if (crtc->enabled) {
rdev->pm.dpm.new_active_crtcs |= (1 << radeon_crtc->crtc_id);
rdev->pm.dpm.new_active_crtc_count++;
+ if (!radeon_crtc->connector)
+ continue;
+
+ radeon_connector = to_radeon_connector(radeon_crtc->connector);
+ if (radeon_connector->pixelclock_for_modeset > 297000)
+ rdev->pm.dpm.high_pixelclock_count++;
}
}
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 918609551804..3add39c1a689 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2979,6 +2979,9 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6605)) {
max_sclk = 75000;
}
+
+ if (rdev->pm.dpm.high_pixelclock_count > 1)
+ disable_sclk_switching = true;
}
if (rps->vce_active) {
@@ -4350,70 +4353,70 @@ static int si_populate_smc_initial_state(struct radeon_device *rdev,
u32 reg;
int ret;
- table->initialState.levels[0].mclk.vDLL_CNTL =
+ table->initialState.level.mclk.vDLL_CNTL =
cpu_to_be32(si_pi->clock_registers.dll_cntl);
- table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->initialState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
- table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
- table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
- table->initialState.levels[0].mclk.vMPLL_SS =
+ table->initialState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->initialState.levels[0].mclk.vMPLL_SS2 =
+ table->initialState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->initialState.levels[0].mclk.mclk_value =
+ table->initialState.level.mclk.mclk_value =
cpu_to_be32(initial_state->performance_levels[0].mclk);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
- table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
- table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
+ table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
- table->initialState.levels[0].sclk.sclk_value =
+ table->initialState.level.sclk.sclk_value =
cpu_to_be32(initial_state->performance_levels[0].sclk);
- table->initialState.levels[0].arbRefreshState =
+ table->initialState.level.arbRefreshState =
SISLANDS_INITIAL_STATE_ARB_INDEX;
- table->initialState.levels[0].ACIndex = 0;
+ table->initialState.level.ACIndex = 0;
ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
initial_state->performance_levels[0].vddc,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(rdev,
- &table->initialState.levels[0].vddc,
+ &table->initialState.level.vddc,
&std_vddc);
if (!ret)
si_populate_std_voltage_value(rdev, std_vddc,
- table->initialState.levels[0].vddc.index,
- &table->initialState.levels[0].std_vddc);
+ table->initialState.level.vddc.index,
+ &table->initialState.level.std_vddc);
}
if (eg_pi->vddci_control)
si_populate_voltage_value(rdev,
&eg_pi->vddci_voltage_table,
initial_state->performance_levels[0].vddci,
- &table->initialState.levels[0].vddci);
+ &table->initialState.level.vddci);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(rdev,
@@ -4421,43 +4424,43 @@ static int si_populate_smc_initial_state(struct radeon_device *rdev,
initial_state->performance_levels[0].vddc,
initial_state->performance_levels[0].sclk,
initial_state->performance_levels[0].mclk,
- &table->initialState.levels[0].vddc);
+ &table->initialState.level.vddc);
- si_populate_initial_mvdd_value(rdev, &table->initialState.levels[0].mvdd);
+ si_populate_initial_mvdd_value(rdev, &table->initialState.level.mvdd);
reg = CG_R(0xffff) | CG_L(0);
- table->initialState.levels[0].aT = cpu_to_be32(reg);
+ table->initialState.level.aT = cpu_to_be32(reg);
- table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
+ table->initialState.level.bSP = cpu_to_be32(pi->dsp);
- table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
+ table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen;
if (pi->mem_gddr5) {
- table->initialState.levels[0].strobeMode =
+ table->initialState.level.strobeMode =
si_get_strobe_mode_settings(rdev,
initial_state->performance_levels[0].mclk);
if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
- table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
+ table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
else
- table->initialState.levels[0].mcFlags = 0;
+ table->initialState.level.mcFlags = 0;
}
table->initialState.levelCount = 1;
table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
- table->initialState.levels[0].dpm2.MaxPS = 0;
- table->initialState.levels[0].dpm2.NearTDPDec = 0;
- table->initialState.levels[0].dpm2.AboveSafeInc = 0;
- table->initialState.levels[0].dpm2.BelowSafeInc = 0;
- table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->initialState.level.dpm2.MaxPS = 0;
+ table->initialState.level.dpm2.NearTDPDec = 0;
+ table->initialState.level.dpm2.AboveSafeInc = 0;
+ table->initialState.level.dpm2.BelowSafeInc = 0;
+ table->initialState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
@@ -4488,18 +4491,18 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
if (pi->acpi_vddc) {
ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
- pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
+ pi->acpi_vddc, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
+ table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen;
if (si_pi->vddc_phase_shed_control) {
si_populate_phase_shedding_value(rdev,
@@ -4507,23 +4510,23 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
pi->acpi_vddc,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
} else {
ret = si_populate_voltage_value(rdev, &eg_pi->vddc_voltage_table,
- pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
+ pi->min_vddc_in_table, &table->ACPIState.level.vddc);
if (!ret) {
u16 std_vddc;
ret = si_get_std_voltage_value(rdev,
- &table->ACPIState.levels[0].vddc, &std_vddc);
+ &table->ACPIState.level.vddc, &std_vddc);
if (!ret)
si_populate_std_voltage_value(rdev, std_vddc,
- table->ACPIState.levels[0].vddc.index,
- &table->ACPIState.levels[0].std_vddc);
+ table->ACPIState.level.vddc.index,
+ &table->ACPIState.level.std_vddc);
}
- table->ACPIState.levels[0].gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
+ table->ACPIState.level.gen2PCIE = (u8)r600_get_pcie_gen_support(rdev,
si_pi->sys_pcie_mask,
si_pi->boot_pcie_gen,
RADEON_PCIE_GEN1);
@@ -4534,14 +4537,14 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
pi->min_vddc_in_table,
0,
0,
- &table->ACPIState.levels[0].vddc);
+ &table->ACPIState.level.vddc);
}
if (pi->acpi_vddc) {
if (eg_pi->acpi_vddci)
si_populate_voltage_value(rdev, &eg_pi->vddci_voltage_table,
eg_pi->acpi_vddci,
- &table->ACPIState.levels[0].vddci);
+ &table->ACPIState.level.vddci);
}
mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
@@ -4552,59 +4555,59 @@ static int si_populate_smc_acpi_state(struct radeon_device *rdev,
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
spll_func_cntl_2 |= SCLK_MUX_SEL(4);
- table->ACPIState.levels[0].mclk.vDLL_CNTL =
+ table->ACPIState.level.mclk.vDLL_CNTL =
cpu_to_be32(dll_cntl);
- table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
+ table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL =
cpu_to_be32(mclk_pwrmgt_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL =
cpu_to_be32(mpll_ad_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL =
cpu_to_be32(mpll_dq_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL =
cpu_to_be32(mpll_func_cntl);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 =
cpu_to_be32(mpll_func_cntl_1);
- table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 =
cpu_to_be32(mpll_func_cntl_2);
- table->ACPIState.levels[0].mclk.vMPLL_SS =
+ table->ACPIState.level.mclk.vMPLL_SS =
cpu_to_be32(si_pi->clock_registers.mpll_ss1);
- table->ACPIState.levels[0].mclk.vMPLL_SS2 =
+ table->ACPIState.level.mclk.vMPLL_SS2 =
cpu_to_be32(si_pi->clock_registers.mpll_ss2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL =
cpu_to_be32(spll_func_cntl);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 =
cpu_to_be32(spll_func_cntl_2);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 =
cpu_to_be32(spll_func_cntl_3);
- table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
+ table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 =
cpu_to_be32(spll_func_cntl_4);
- table->ACPIState.levels[0].mclk.mclk_value = 0;
- table->ACPIState.levels[0].sclk.sclk_value = 0;
+ table->ACPIState.level.mclk.mclk_value = 0;
+ table->ACPIState.level.sclk.sclk_value = 0;
- si_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
+ si_populate_mvdd_value(rdev, 0, &table->ACPIState.level.mvdd);
if (eg_pi->dynamic_ac_timing)
- table->ACPIState.levels[0].ACIndex = 0;
+ table->ACPIState.level.ACIndex = 0;
- table->ACPIState.levels[0].dpm2.MaxPS = 0;
- table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
- table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
- table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
- table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
+ table->ACPIState.level.dpm2.MaxPS = 0;
+ table->ACPIState.level.dpm2.NearTDPDec = 0;
+ table->ACPIState.level.dpm2.AboveSafeInc = 0;
+ table->ACPIState.level.dpm2.BelowSafeInc = 0;
+ table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0;
reg = MIN_POWER_MASK | MAX_POWER_MASK;
- table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg);
reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
- table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
+ table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg);
return 0;
}
static int si_populate_ulv_state(struct radeon_device *rdev,
- SISLANDS_SMC_SWSTATE *state)
+ struct SISLANDS_SMC_SWSTATE_SINGLE *state)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct si_power_info *si_pi = si_get_pi(rdev);
@@ -4613,19 +4616,19 @@ static int si_populate_ulv_state(struct radeon_device *rdev,
int ret;
ret = si_convert_power_level_to_smc(rdev, &ulv->pl,
- &state->levels[0]);
+ &state->level);
if (!ret) {
if (eg_pi->sclk_deep_sleep) {
if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
else
- state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
+ state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
}
if (ulv->one_pcie_lane_in_ulv)
state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
- state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
- state->levels[0].ACIndex = 1;
- state->levels[0].std_vddc = state->levels[0].vddc;
+ state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
+ state->level.ACIndex = 1;
+ state->level.std_vddc = state->level.vddc;
state->levelCount = 1;
state->flags |= PPSMC_SWSTATE_FLAG_DC;
@@ -4725,7 +4728,9 @@ static int si_init_smc_table(struct radeon_device *rdev)
if (ret)
return ret;
- table->driverState = table->initialState;
+ table->driverState.flags = table->initialState.flags;
+ table->driverState.levelCount = table->initialState.levelCount;
+ table->driverState.levels[0] = table->initialState.level;
ret = si_do_program_memory_timing_parameters(rdev, radeon_boot_state,
SISLANDS_INITIAL_STATE_ARB_INDEX);
@@ -5275,8 +5280,8 @@ static int si_upload_ulv_state(struct radeon_device *rdev)
if (ulv->supported && ulv->pl.vddc) {
u32 address = si_pi->state_table_start +
offsetof(SISLANDS_SMC_STATETABLE, ULVState);
- SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
- u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
+ struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState;
+ u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE);
memset(smc_state, 0, state_size);
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index fbd6589bdab9..4ea1cb2e45a3 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -191,6 +191,14 @@ struct SISLANDS_SMC_SWSTATE
typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE;
+struct SISLANDS_SMC_SWSTATE_SINGLE {
+ uint8_t flags;
+ uint8_t levelCount;
+ uint8_t padding2;
+ uint8_t padding3;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL level;
+};
+
#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0
#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1
#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2
@@ -208,19 +216,19 @@ typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE;
struct SISLANDS_SMC_STATETABLE
{
- uint8_t thermalProtectType;
- uint8_t systemFlags;
- uint8_t maxVDDCIndexInPPTable;
- uint8_t extraFlags;
- uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
- SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
- SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
- PP_SIslands_DPM2Parameters dpm2Params;
- SISLANDS_SMC_SWSTATE initialState;
- SISLANDS_SMC_SWSTATE ACPIState;
- SISLANDS_SMC_SWSTATE ULVState;
- SISLANDS_SMC_SWSTATE driverState;
- SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1];
+ uint8_t thermalProtectType;
+ uint8_t systemFlags;
+ uint8_t maxVDDCIndexInPPTable;
+ uint8_t extraFlags;
+ uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS];
+ SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable;
+ SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable;
+ PP_SIslands_DPM2Parameters dpm2Params;
+ struct SISLANDS_SMC_SWSTATE_SINGLE initialState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ACPIState;
+ struct SISLANDS_SMC_SWSTATE_SINGLE ULVState;
+ SISLANDS_SMC_SWSTATE driverState;
+ SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
};
typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index bd5b8eb58b18..090529d0d5dc 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -197,12 +197,6 @@ struct vc4_vec_connector {
struct drm_encoder *encoder;
};
-static inline struct vc4_vec_connector *
-to_vc4_vec_connector(struct drm_connector *connector)
-{
- return container_of(connector, struct vc4_vec_connector, base);
-}
-
enum vc4_vec_tv_mode_id {
VC4_VEC_TV_MODE_NTSC,
VC4_VEC_TV_MODE_NTSC_J,
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 5677263bcf0d..483cd757abd3 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -485,7 +485,7 @@ static int adm9240_in_write(struct device *dev, u32 attr, int channel, long val)
reg = ADM9240_REG_IN_MIN(channel);
break;
case hwmon_in_max:
- reg = ADM9240_REG_IN(channel);
+ reg = ADM9240_REG_IN_MAX(channel);
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c
index 3a5807e4a2ef..02298b86b57b 100644
--- a/drivers/hwmon/corsair-psu.c
+++ b/drivers/hwmon/corsair-psu.c
@@ -355,7 +355,7 @@ static umode_t corsairpsu_hwmon_power_is_visible(const struct corsairpsu_data *p
return 0444;
default:
return 0;
- };
+ }
}
static umode_t corsairpsu_hwmon_in_is_visible(const struct corsairpsu_data *priv, u32 attr,
@@ -376,7 +376,7 @@ static umode_t corsairpsu_hwmon_in_is_visible(const struct corsairpsu_data *priv
break;
default:
break;
- };
+ }
return res;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index ac4adb44b224..97ab491d2922 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -596,7 +596,6 @@ static int lm80_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm80_data *data;
- int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
@@ -609,14 +608,8 @@ static int lm80_probe(struct i2c_client *client)
lm80_init_client(client);
/* A few vars need to be filled upon startup */
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
- if (rv < 0)
- return rv;
- data->fan[f_min][0] = rv;
- rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
- if (rv < 0)
- return rv;
- data->fan[f_min][1] = rv;
+ data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+ data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups);
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 4382105bf142..2a4bed0ab226 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -900,11 +900,15 @@ static int ltc2992_parse_dt(struct ltc2992_state *st)
fwnode_for_each_available_child_node(fwnode, child) {
ret = fwnode_property_read_u32(child, "reg", &addr);
- if (ret < 0)
+ if (ret < 0) {
+ fwnode_handle_put(child);
return ret;
+ }
- if (addr > 1)
+ if (addr > 1) {
+ fwnode_handle_put(child);
return -EINVAL;
+ }
ret = fwnode_property_read_u32(child, "shunt-resistor-micro-ohms", &val);
if (!ret)
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index f1ac153d0b56..967532afb1c0 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -217,9 +217,9 @@ int occ_update_response(struct occ *occ)
return rc;
/* limit the maximum rate of polling the OCC */
- if (time_after(jiffies, occ->last_update + OCC_UPDATE_FREQUENCY)) {
+ if (time_after(jiffies, occ->next_update)) {
rc = occ_poll(occ);
- occ->last_update = jiffies;
+ occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
} else {
rc = occ->last_error;
}
@@ -1165,6 +1165,7 @@ int occ_setup(struct occ *occ, const char *name)
return rc;
}
+ occ->next_update = jiffies + OCC_UPDATE_FREQUENCY;
occ_parse_poll_response(occ);
rc = occ_setup_sensor_attrs(occ);
diff --git a/drivers/hwmon/occ/common.h b/drivers/hwmon/occ/common.h
index 67e6968b8978..e6df719770e8 100644
--- a/drivers/hwmon/occ/common.h
+++ b/drivers/hwmon/occ/common.h
@@ -99,7 +99,7 @@ struct occ {
u8 poll_cmd_data; /* to perform OCC poll command */
int (*send_cmd)(struct occ *occ, u8 *cmd);
- unsigned long last_update;
+ unsigned long next_update;
struct mutex lock; /* lock OCC access */
struct device *hwmon;
diff --git a/drivers/hwmon/pmbus/fsp-3y.c b/drivers/hwmon/pmbus/fsp-3y.c
index b177987286ae..e24842475254 100644
--- a/drivers/hwmon/pmbus/fsp-3y.c
+++ b/drivers/hwmon/pmbus/fsp-3y.c
@@ -57,7 +57,7 @@ static int page_log_to_page_real(int page_log, enum chips chip)
case YH5151E_PAGE_12V_LOG:
return YH5151E_PAGE_12V_REAL;
case YH5151E_PAGE_5V_LOG:
- return YH5151E_PAGE_5V_LOG;
+ return YH5151E_PAGE_5V_REAL;
case YH5151E_PAGE_3V3_LOG:
return YH5151E_PAGE_3V3_REAL;
}
@@ -103,8 +103,18 @@ static int set_page(struct i2c_client *client, int page_log)
static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct fsp3y_data *data = to_fsp3y_data(info);
int rv;
+ /*
+ * YH5151-E outputs vout in linear11. The conversion is done when
+ * reading. Here, we have to inject pmbus_core with the correct
+ * exponent (it is -6).
+ */
+ if (data->chip == yh5151e && reg == PMBUS_VOUT_MODE)
+ return 0x1A;
+
rv = set_page(client, page);
if (rv < 0)
return rv;
@@ -114,6 +124,8 @@ static int fsp3y_read_byte_data(struct i2c_client *client, int page, int reg)
static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase, int reg)
{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ struct fsp3y_data *data = to_fsp3y_data(info);
int rv;
/*
@@ -144,7 +156,18 @@ static int fsp3y_read_word_data(struct i2c_client *client, int page, int phase,
if (rv < 0)
return rv;
- return i2c_smbus_read_word_data(client, reg);
+ rv = i2c_smbus_read_word_data(client, reg);
+ if (rv < 0)
+ return rv;
+
+ /*
+ * YH-5151E is non-compliant and outputs output voltages in linear11
+ * instead of linear16.
+ */
+ if (data->chip == yh5151e && reg == PMBUS_READ_VOUT)
+ rv = sign_extend32(rv, 10) & 0xffff;
+
+ return rv;
}
static struct pmbus_driver_info fsp3y_info[] = {
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index cceda3cecbcf..8b1723635cce 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -229,7 +229,6 @@ config DMARD10
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Accelerometers 3D"
diff --git a/drivers/iio/common/hid-sensors/Kconfig b/drivers/iio/common/hid-sensors/Kconfig
index 24d492567336..2a3dd3b907be 100644
--- a/drivers/iio/common/hid-sensors/Kconfig
+++ b/drivers/iio/common/hid-sensors/Kconfig
@@ -19,6 +19,7 @@ config HID_SENSOR_IIO_TRIGGER
tristate "Common module (trigger) for all HID Sensor IIO drivers"
depends on HID_SENSOR_HUB && HID_SENSOR_IIO_COMMON && IIO_BUFFER
select IIO_TRIGGER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build trigger support for HID sensors.
Triggers will be send if all requested attributes were read.
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 5824f2edf975..20b5ac7ab66a 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -111,7 +111,6 @@ config FXAS21002C_SPI
config HID_SENSOR_GYRO_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Gyroscope 3D"
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index ac90be03332a..f17a93519535 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -272,7 +272,16 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
- /* The temperature scaling is (x+23000)/280 Celsius */
+ /*
+ * The temperature scaling is (x+23000)/280 Celsius
+ * for the "best fit straight line" temperature range
+ * of -30C..85C. The 23000 includes room temperature
+ * offset of +35C, 280 is the precision scale and x is
+ * the 16-bit signed integer reported by hardware.
+ *
+ * Temperature value itself represents temperature of
+ * the sensor die.
+ */
*val = 23000;
return IIO_VAL_INT;
default:
@@ -329,7 +338,7 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
goto out_read_raw_unlock;
}
- *val = be16_to_cpu(raw_val);
+ *val = (s16)be16_to_cpu(raw_val);
ret = IIO_VAL_INT;
goto out_read_raw_unlock;
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 6549fcf6db69..2de5494e7c22 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -52,7 +52,6 @@ config HID_SENSOR_HUMIDITY
tristate "HID Environmental humidity sensor"
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
help
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index d92c58a94fe4..59efb36db2c7 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -1778,7 +1778,6 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!indio_dev->info)
goto out_unlock;
- ret = -EINVAL;
list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
ret = h->ioctl(indio_dev, filp, cmd, arg);
if (ret != IIO_IOCTL_UNHANDLED)
@@ -1786,7 +1785,7 @@ static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
if (ret == IIO_IOCTL_UNHANDLED)
- ret = -EINVAL;
+ ret = -ENODEV;
out_unlock:
mutex_unlock(&indio_dev->info_exist_lock);
@@ -1926,9 +1925,6 @@ EXPORT_SYMBOL(__iio_device_register);
**/
void iio_device_unregister(struct iio_dev *indio_dev)
{
- struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- struct iio_ioctl_handler *h, *t;
-
cdev_device_del(&indio_dev->chrdev, &indio_dev->dev);
mutex_lock(&indio_dev->info_exist_lock);
@@ -1939,9 +1935,6 @@ void iio_device_unregister(struct iio_dev *indio_dev)
indio_dev->info = NULL;
- list_for_each_entry_safe(h, t, &iio_dev_opaque->ioctl_handlers, entry)
- list_del(&h->entry);
-
iio_device_wakeup_eventset(indio_dev);
iio_buffer_wakeup_poll(indio_dev);
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 33ad4dd0b5c7..917f9becf9c7 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -256,7 +256,6 @@ config ISL29125
config HID_SENSOR_ALS
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID ALS"
@@ -270,7 +269,6 @@ config HID_SENSOR_ALS
config HID_SENSOR_PROX
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID PROX"
diff --git a/drivers/iio/light/gp2ap002.c b/drivers/iio/light/gp2ap002.c
index d048ae257c51..f960be7d4001 100644
--- a/drivers/iio/light/gp2ap002.c
+++ b/drivers/iio/light/gp2ap002.c
@@ -582,7 +582,7 @@ static int gp2ap002_probe(struct i2c_client *client,
"gp2ap002", indio_dev);
if (ret) {
dev_err(dev, "unable to request IRQ\n");
- goto out_disable_vio;
+ goto out_put_pm;
}
gp2ap002->irq = client->irq;
@@ -612,8 +612,9 @@ static int gp2ap002_probe(struct i2c_client *client,
return 0;
-out_disable_pm:
+out_put_pm:
pm_runtime_put_noidle(dev);
+out_disable_pm:
pm_runtime_disable(dev);
out_disable_vio:
regulator_disable(gp2ap002->vio);
diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c
index 0f787bfc88fc..c9d8f07a6fcd 100644
--- a/drivers/iio/light/tsl2583.c
+++ b/drivers/iio/light/tsl2583.c
@@ -341,6 +341,14 @@ static int tsl2583_als_calibrate(struct iio_dev *indio_dev)
return lux_val;
}
+ /* Avoid division by zero of lux_value later on */
+ if (lux_val == 0) {
+ dev_err(&chip->client->dev,
+ "%s: lux_val of 0 will produce out of range trim_value\n",
+ __func__);
+ return -ENODATA;
+ }
+
gain_trim_val = (unsigned int)(((chip->als_settings.als_cal_target)
* chip->als_settings.als_gain_trim) / lux_val);
if ((gain_trim_val < 250) || (gain_trim_val > 4000)) {
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 5d4ffd66032e..74ad5701c6c2 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -95,7 +95,6 @@ config MAG3110
config HID_SENSOR_MAGNETOMETER_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Magenetometer 3D"
diff --git a/drivers/iio/orientation/Kconfig b/drivers/iio/orientation/Kconfig
index a505583cc2fd..396cbbb867f4 100644
--- a/drivers/iio/orientation/Kconfig
+++ b/drivers/iio/orientation/Kconfig
@@ -9,7 +9,6 @@ menu "Inclinometer sensors"
config HID_SENSOR_INCLINOMETER_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Inclinometer 3D"
@@ -20,7 +19,6 @@ config HID_SENSOR_INCLINOMETER_3D
config HID_SENSOR_DEVICE_ROTATION
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID Device Rotation"
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 689b978db4f9..fc0d3cfca418 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -79,7 +79,6 @@ config DPS310
config HID_SENSOR_PRESS
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
tristate "HID PRESS"
diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
index c685f10b5ae4..cc206bfa09c7 100644
--- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
+++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
@@ -160,6 +160,7 @@ static int lidar_get_measurement(struct lidar_data *data, u16 *reg)
ret = lidar_write_control(data, LIDAR_REG_CONTROL_ACQUIRE);
if (ret < 0) {
dev_err(&client->dev, "cannot send start measurement command");
+ pm_runtime_put_noidle(&client->dev);
return ret;
}
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index f1f2a1499c9e..4df60082c1fa 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -45,7 +45,6 @@ config HID_SENSOR_TEMP
tristate "HID Environmental temperature sensor"
depends on HID_SENSOR_HUB
select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
select HID_SENSOR_IIO_COMMON
select HID_SENSOR_IIO_TRIGGER
help
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 2b9ffc21cbc4..ab148a696c0c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -473,6 +473,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
list_del(&id_priv->list);
cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
+ id_priv->id.device = NULL;
if (id_priv->id.route.addr.dev_addr.sgid_attr) {
rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
@@ -1860,6 +1861,7 @@ static void _destroy_id(struct rdma_id_private *id_priv,
iw_destroy_cm_id(id_priv->cm_id.iw);
}
cma_leave_mc_groups(id_priv);
+ rdma_restrack_del(&id_priv->res);
cma_release_dev(id_priv);
}
@@ -1873,7 +1875,6 @@ static void _destroy_id(struct rdma_id_private *id_priv,
kfree(id_priv->id.route.path_rec);
put_net(id_priv->id.route.addr.dev_addr.net);
- rdma_restrack_del(&id_priv->res);
kfree(id_priv);
}
@@ -3774,7 +3775,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
}
id_priv->backlog = backlog;
- if (id->device) {
+ if (id_priv->cma_dev) {
if (rdma_cap_ib_cm(id->device, 1)) {
ret = cma_ib_listen(id_priv);
if (ret)
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c
index 9ec6971056fa..049684880ae0 100644
--- a/drivers/infiniband/core/uverbs_std_types_device.c
+++ b/drivers/infiniband/core/uverbs_std_types_device.c
@@ -117,8 +117,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_INFO_HANDLES)(
return ret;
uapi_object = uapi_get_object(attrs->ufile->device->uapi, object_id);
- if (!uapi_object)
- return -EINVAL;
+ if (IS_ERR(uapi_object))
+ return PTR_ERR(uapi_object);
handles = gather_objects_handle(attrs->ufile, uapi_object, attrs,
out_len, &total);
@@ -331,6 +331,9 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)(
if (ret)
return ret;
+ if (!user_entry_size)
+ return -EINVAL;
+
max_entries = uverbs_attr_ptr_get_array_size(
attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES,
user_entry_size);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index b496f30ce066..364f69cd620f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -1423,7 +1423,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
struct i40e_qv_info *iw_qvinfo;
u32 ceq_idx;
u32 i;
- u32 size;
+ size_t size;
if (!ldev->msix_count) {
i40iw_pr_err("No MSI-X vectors\n");
@@ -1433,8 +1433,7 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
iwdev->msix_count = ldev->msix_count;
size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
- size += sizeof(struct i40e_qvlist_info);
- size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
+ size += struct_size(iw_qvlist, qv_info, iwdev->msix_count);
iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
if (!iwdev->iw_msixtbl)
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index a0b677accd96..eb9b0a2707f8 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -630,9 +630,8 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
case UVERBS_OBJECT_QP:
{
struct mlx5_ib_qp *qp = to_mqp(uobj->object);
- enum ib_qp_type qp_type = qp->ibqp.qp_type;
- if (qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp;
@@ -649,10 +648,9 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
sq->tisn) == obj_id);
}
- if (qp_type == MLX5_IB_QPT_DCT)
+ if (qp->type == MLX5_IB_QPT_DCT)
return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
qp->dct.mdct.mqp.qpn) == obj_id;
-
return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
qp->ibqp.qp_num) == obj_id;
}
diff --git a/drivers/infiniband/hw/mlx5/dm.c b/drivers/infiniband/hw/mlx5/dm.c
index 094bf85589db..001d766cf291 100644
--- a/drivers/infiniband/hw/mlx5/dm.c
+++ b/drivers/infiniband/hw/mlx5/dm.c
@@ -217,6 +217,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR)(
if (err)
return err;
+ if (op >= BITS_PER_TYPE(u32))
+ return -EOPNOTSUPP;
+
if (!(MLX5_CAP_DEV_MEM(dev->mdev, memic_operations) & BIT(op)))
return -EOPNOTSUPP;
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 2fc6a60c4e77..941adf5cf3d0 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -2280,6 +2280,7 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
u8 ft_type, u8 dv_prt,
void *in, size_t len)
{
+ struct mlx5_pkt_reformat_params reformat_params;
enum mlx5_flow_namespace_type namespace;
u8 prm_prt;
int ret;
@@ -2292,9 +2293,13 @@ static int mlx5_ib_flow_action_create_packet_reformat_ctx(
if (ret)
return ret;
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = prm_prt;
+ reformat_params.size = len;
+ reformat_params.data = in;
maction->flow_action_raw.pkt_reformat =
- mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
- in, namespace);
+ mlx5_packet_reformat_alloc(dev->mdev, &reformat_params,
+ namespace);
if (IS_ERR(maction->flow_action_raw.pkt_reformat)) {
ret = PTR_ERR(maction->flow_action_raw.pkt_reformat);
return ret;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 6d1dd09a4388..644d5d0ac544 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4419,6 +4419,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
if (bound) {
rdma_roce_rescan_device(&dev->ib_dev);
+ mpi->ibdev->ib_active = true;
break;
}
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 4388afeff251..9662cd39c7ff 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -743,10 +743,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
- if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+ if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
mlx5_ib_can_load_pas_with_umr(dev, 0))
- ent->limit = dev->mdev->profile->mr_cache[i].limit;
+ ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
spin_lock_irq(&ent->lock);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 782b2af8f211..1338c11fd121 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1559,12 +1559,16 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
}
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
- param = (struct mlx5_eq_param){
- .irq_index = 0,
+ param = (struct mlx5_eq_param) {
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
+ if (!zalloc_cpumask_var(&param.affinity, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
eq->core = mlx5_eq_create_generic(dev->mdev, &param);
+ free_cpumask_var(param.affinity);
if (IS_ERR(eq->core)) {
err = PTR_ERR(eq->core);
goto err_wq;
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 2af26737d32d..a6712e373eed 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -346,13 +346,15 @@ static inline enum comp_state do_read(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, payload_addr(pkt),
payload_size(pkt), to_mr_obj, NULL);
- if (ret)
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
+ }
if (wqe->dma.resid == 0 && (pkt->mask & RXE_END_MASK))
return COMPST_COMP_ACK;
- else
- return COMPST_UPDATE_COMP;
+
+ return COMPST_UPDATE_COMP;
}
static inline enum comp_state do_atomic(struct rxe_qp *qp,
@@ -366,10 +368,12 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
&wqe->dma, &atomic_orig,
sizeof(u64), to_mr_obj, NULL);
- if (ret)
+ if (ret) {
+ wqe->status = IB_WC_LOC_PROT_ERR;
return COMPST_ERROR;
- else
- return COMPST_COMP_ACK;
+ }
+
+ return COMPST_COMP_ACK;
}
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 34ae957a315c..b0f350d674fd 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -242,6 +242,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
+ qp->sq.queue = NULL;
return err;
}
@@ -295,6 +296,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
if (err) {
vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
+ qp->rq.queue = NULL;
return err;
}
}
@@ -355,6 +357,11 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
err2:
rxe_queue_cleanup(qp->sq.queue);
err1:
+ qp->pd = NULL;
+ qp->rcq = NULL;
+ qp->scq = NULL;
+ qp->srq = NULL;
+
if (srq)
rxe_drop_ref(srq);
rxe_drop_ref(scq);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index d2313efb26db..3f175f220a22 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -300,7 +300,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
struct siw_ucontext *uctx =
rdma_udata_to_drv_context(udata, struct siw_ucontext,
base_ucontext);
- struct siw_cq *scq = NULL, *rcq = NULL;
unsigned long flags;
int num_sqe, num_rqe, rv = 0;
size_t length;
@@ -343,10 +342,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
rv = -EINVAL;
goto err_out;
}
- scq = to_siw_cq(attrs->send_cq);
- rcq = to_siw_cq(attrs->recv_cq);
- if (!scq || (!rcq && !attrs->srq)) {
+ if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
rv = -EINVAL;
goto err_out;
@@ -378,7 +375,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
else {
/* Zero sized SQ is not supported */
rv = -EINVAL;
- goto err_out;
+ goto err_out_xa;
}
if (num_rqe)
num_rqe = roundup_pow_of_two(num_rqe);
@@ -401,8 +398,8 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
}
}
qp->pd = pd;
- qp->scq = scq;
- qp->rcq = rcq;
+ qp->scq = to_siw_cq(attrs->send_cq);
+ qp->rcq = to_siw_cq(attrs->recv_cq);
if (attrs->srq) {
/*
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index b90e825df7e1..62543a4eccc0 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -596,7 +596,7 @@ config IRQ_IDT3243X
config APPLE_AIC
bool "Apple Interrupt Controller (AIC)"
depends on ARM64
- default ARCH_APPLE
+ depends on ARCH_APPLE || COMPILE_TEST
help
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 91adf771f185..090bc3f4f7d8 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -359,10 +359,8 @@ static int mvebu_icu_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
icu->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(icu->base)) {
- dev_err(&pdev->dev, "Failed to map icu base address.\n");
+ if (IS_ERR(icu->base))
return PTR_ERR(icu->base);
- }
/*
* Legacy bindings: ICU is one node with one MSI parent: force manually
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index 18832ccc8ff8..3a7b7a7f20ca 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -384,10 +384,8 @@ static int mvebu_sei_probe(struct platform_device *pdev)
sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sei->base = devm_ioremap_resource(sei->dev, sei->res);
- if (IS_ERR(sei->base)) {
- dev_err(sei->dev, "Failed to remap SEI resource\n");
+ if (IS_ERR(sei->base))
return PTR_ERR(sei->base);
- }
/* Retrieve the SEI capabilities with the interrupt ranges */
sei->caps = of_device_get_match_data(&pdev->dev);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index b9db90c4aa56..4704f2ee5797 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -892,10 +892,8 @@ static int stm32_exti_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host_data->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(host_data->base)) {
- dev_err(dev, "Unable to map registers\n");
+ if (IS_ERR(host_data->base))
return PTR_ERR(host_data->base);
- }
for (i = 0; i < drv_data->bank_nr; i++)
stm32_exti_chip_init(host_data, i, np);
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 70061991915a..cd5642cef01f 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -46,7 +46,7 @@ static void hfcsusb_start_endpoint(struct hfcsusb *hw, int channel);
static void hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel);
static int hfcsusb_setup_bch(struct bchannel *bch, int protocol);
static void deactivate_bchannel(struct bchannel *bch);
-static void hfcsusb_ph_info(struct hfcsusb *hw);
+static int hfcsusb_ph_info(struct hfcsusb *hw);
/* start next background transfer for control channel */
static void
@@ -241,7 +241,7 @@ hfcusb_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
* send full D/B channel status information
* as MPH_INFORMATION_IND
*/
-static void
+static int
hfcsusb_ph_info(struct hfcsusb *hw)
{
struct ph_info *phi;
@@ -250,7 +250,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
phi = kzalloc(struct_size(phi, bch, dch->dev.nrbchan), GFP_ATOMIC);
if (!phi)
- return;
+ return -ENOMEM;
phi->dch.ch.protocol = hw->protocol;
phi->dch.ch.Flags = dch->Flags;
@@ -263,6 +263,8 @@ hfcsusb_ph_info(struct hfcsusb *hw)
_queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
struct_size(phi, bch, dch->dev.nrbchan), phi, GFP_ATOMIC);
kfree(phi);
+
+ return 0;
}
/*
@@ -347,8 +349,7 @@ hfcusb_l2l1D(struct mISDNchannel *ch, struct sk_buff *skb)
ret = l1_event(dch->l1, hh->prim);
break;
case MPH_INFORMATION_REQ:
- hfcsusb_ph_info(hw);
- ret = 0;
+ ret = hfcsusb_ph_info(hw);
break;
}
@@ -403,8 +404,7 @@ hfc_l1callback(struct dchannel *dch, u_int cmd)
hw->name, __func__, cmd);
return -1;
}
- hfcsusb_ph_info(hw);
- return 0;
+ return hfcsusb_ph_info(hw);
}
static int
@@ -746,8 +746,7 @@ hfcsusb_setup_bch(struct bchannel *bch, int protocol)
handle_led(hw, (bch->nr == 1) ? LED_B1_OFF :
LED_B2_OFF);
}
- hfcsusb_ph_info(hw);
- return 0;
+ return hfcsusb_ph_info(hw);
}
static void
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index a16c7a2a7f3d..88d592bafdb0 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -630,17 +630,19 @@ static void
release_io(struct inf_hw *hw)
{
if (hw->cfg.mode) {
- if (hw->cfg.p) {
+ if (hw->cfg.mode == AM_MEMIO) {
release_mem_region(hw->cfg.start, hw->cfg.size);
- iounmap(hw->cfg.p);
+ if (hw->cfg.p)
+ iounmap(hw->cfg.p);
} else
release_region(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = AM_NONE;
}
if (hw->addr.mode) {
- if (hw->addr.p) {
+ if (hw->addr.mode == AM_MEMIO) {
release_mem_region(hw->addr.start, hw->addr.size);
- iounmap(hw->addr.p);
+ if (hw->addr.p)
+ iounmap(hw->addr.p);
} else
release_region(hw->addr.start, hw->addr.size);
hw->addr.mode = AM_NONE;
@@ -670,9 +672,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->cfg.start, (ulong)hw->cfg.size);
return err;
}
- if (hw->ci->cfg_mode == AM_MEMIO)
- hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
hw->cfg.mode = hw->ci->cfg_mode;
+ if (hw->ci->cfg_mode == AM_MEMIO) {
+ hw->cfg.p = ioremap(hw->cfg.start, hw->cfg.size);
+ if (!hw->cfg.p)
+ return -ENOMEM;
+ }
if (debug & DEBUG_HW)
pr_notice("%s: IO cfg %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->cfg.start,
@@ -697,12 +702,12 @@ setup_io(struct inf_hw *hw)
(ulong)hw->addr.start, (ulong)hw->addr.size);
return err;
}
+ hw->addr.mode = hw->ci->addr_mode;
if (hw->ci->addr_mode == AM_MEMIO) {
hw->addr.p = ioremap(hw->addr.start, hw->addr.size);
- if (unlikely(!hw->addr.p))
+ if (!hw->addr.p)
return -ENOMEM;
}
- hw->addr.mode = hw->ci->addr_mode;
if (debug & DEBUG_HW)
pr_notice("%s: IO addr %lx (%lu bytes) mode%d\n",
hw->name, (ulong)hw->addr.start,
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index ee925b58bbce..2a1ddd47a096 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -1100,7 +1100,6 @@ nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
card->typ = NETJET_S_TJ300;
card->base = pci_resource_start(pdev, 0);
- card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index fc433e63b1dc..b1590cb4a188 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -307,7 +307,7 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
usleep_range(3000, 6000);
ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
if (ret)
- return ret;
+ goto out;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 781942aeddd1..20f2510db1f6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -66,14 +66,14 @@ struct superblock {
__u8 magic[8];
__u8 version;
__u8 log2_interleave_sectors;
- __u16 integrity_tag_size;
- __u32 journal_sections;
- __u64 provided_data_sectors; /* userspace uses this value */
- __u32 flags;
+ __le16 integrity_tag_size;
+ __le32 journal_sections;
+ __le64 provided_data_sectors; /* userspace uses this value */
+ __le32 flags;
__u8 log2_sectors_per_block;
__u8 log2_blocks_per_bitmap_bit;
__u8 pad[2];
- __u64 recalc_sector;
+ __le64 recalc_sector;
__u8 pad2[8];
__u8 salt[SALT_SIZE];
};
@@ -86,16 +86,16 @@ struct superblock {
#define JOURNAL_ENTRY_ROUNDUP 8
-typedef __u64 commit_id_t;
+typedef __le64 commit_id_t;
#define JOURNAL_MAC_PER_SECTOR 8
struct journal_entry {
union {
struct {
- __u32 sector_lo;
- __u32 sector_hi;
+ __le32 sector_lo;
+ __le32 sector_hi;
} s;
- __u64 sector;
+ __le64 sector;
} u;
commit_id_t last_bytes[];
/* __u8 tag[0]; */
@@ -806,7 +806,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
- uint64_t section_le;
+ __le64 section_le;
r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
if (unlikely(r < 0)) {
@@ -1640,7 +1640,7 @@ static void integrity_end_io(struct bio *bio)
static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
const char *data, char *result)
{
- __u64 sector_le = cpu_to_le64(sector);
+ __le64 sector_le = cpu_to_le64(sector);
SHASH_DESC_ON_STACK(req, ic->internal_hash);
int r;
unsigned digest_size;
@@ -2689,30 +2689,26 @@ next_chunk:
if (unlikely(dm_integrity_failed(ic)))
goto err;
- if (!ic->discard) {
- io_req.bi_op = REQ_OP_READ;
- io_req.bi_op_flags = 0;
- io_req.mem.type = DM_IO_VMA;
- io_req.mem.ptr.addr = ic->recalc_buffer;
- io_req.notify.fn = NULL;
- io_req.client = ic->io;
- io_loc.bdev = ic->dev->bdev;
- io_loc.sector = get_data_sector(ic, area, offset);
- io_loc.count = n_sectors;
+ io_req.bi_op = REQ_OP_READ;
+ io_req.bi_op_flags = 0;
+ io_req.mem.type = DM_IO_VMA;
+ io_req.mem.ptr.addr = ic->recalc_buffer;
+ io_req.notify.fn = NULL;
+ io_req.client = ic->io;
+ io_loc.bdev = ic->dev->bdev;
+ io_loc.sector = get_data_sector(ic, area, offset);
+ io_loc.count = n_sectors;
- r = dm_io(&io_req, 1, &io_loc, NULL);
- if (unlikely(r)) {
- dm_integrity_io_error(ic, "reading data", r);
- goto err;
- }
+ r = dm_io(&io_req, 1, &io_loc, NULL);
+ if (unlikely(r)) {
+ dm_integrity_io_error(ic, "reading data", r);
+ goto err;
+ }
- t = ic->recalc_tags;
- for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
- integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
- t += ic->tag_size;
- }
- } else {
- t = ic->recalc_tags + (n_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+ t = ic->recalc_tags;
+ for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
+ integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
+ t += ic->tag_size;
}
metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
@@ -3826,7 +3822,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
for (i = 0; i < ic->journal_sections; i++) {
struct scatterlist sg;
struct skcipher_request *section_req;
- __u32 section_le = cpu_to_le32(i);
+ __le32 section_le = cpu_to_le32(i);
memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
@@ -4368,13 +4364,11 @@ try_smaller_buffer:
goto bad;
}
INIT_WORK(&ic->recalc_work, integrity_recalc);
- if (!ic->discard) {
- ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
- if (!ic->recalc_buffer) {
- ti->error = "Cannot allocate buffer for recalculating";
- r = -ENOMEM;
- goto bad;
- }
+ ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
+ if (!ic->recalc_buffer) {
+ ti->error = "Cannot allocate buffer for recalculating";
+ r = -ENOMEM;
+ goto bad;
}
ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
ic->tag_size, GFP_KERNEL);
@@ -4383,9 +4377,6 @@ try_smaller_buffer:
r = -ENOMEM;
goto bad;
}
- if (ic->discard)
- memset(ic->recalc_tags, DISCARD_FILLER,
- (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size);
} else {
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
ti->error = "Recalculate can only be specified with internal_hash";
@@ -4579,7 +4570,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 9, 0},
+ .version = {1, 10, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index a2acb014c13a..b8e4d31124ea 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -855,12 +855,11 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
- unsigned chunk_size = 0;
+ unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o)
list_for_each_entry(snap, &o->snapshots, list)
- chunk_size = min_not_zero(chunk_size,
- snap->store->chunk_size);
+ chunk_size = min(chunk_size, snap->store->chunk_size);
return (uint32_t) chunk_size;
}
@@ -1409,6 +1408,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (!s->store->chunk_size) {
ti->error = "Chunk size not set";
+ r = -EINVAL;
goto bad_read_metadata;
}
diff --git a/drivers/media/dvb-frontends/sp8870.c b/drivers/media/dvb-frontends/sp8870.c
index 655db8272268..9767159aeb9b 100644
--- a/drivers/media/dvb-frontends/sp8870.c
+++ b/drivers/media/dvb-frontends/sp8870.c
@@ -281,7 +281,7 @@ static int sp8870_set_frontend_parameters(struct dvb_frontend *fe)
// read status reg in order to clear pending irqs
err = sp8870_readreg(state, 0x200);
- if (err)
+ if (err < 0)
return err;
// system controller start
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 83bd9a412a56..1e3b68a8743a 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -915,7 +915,6 @@ static int rcar_drif_g_fmt_sdr_cap(struct file *file, void *priv,
{
struct rcar_drif_sdr *sdr = video_drvdata(file);
- memset(f->fmt.sdr.reserved, 0, sizeof(f->fmt.sdr.reserved));
f->fmt.sdr.pixelformat = sdr->fmt->pixelformat;
f->fmt.sdr.buffersize = sdr->fmt->buffersize;
diff --git a/drivers/media/usb/gspca/cpia1.c b/drivers/media/usb/gspca/cpia1.c
index a4f7431486f3..d93d384286c1 100644
--- a/drivers/media/usb/gspca/cpia1.c
+++ b/drivers/media/usb/gspca/cpia1.c
@@ -1424,7 +1424,6 @@ static int sd_config(struct gspca_dev *gspca_dev,
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
- int ret;
sd->mainsFreq = FREQ_DEF == V4L2_CID_POWER_LINE_FREQUENCY_60HZ;
reset_camera_params(gspca_dev);
@@ -1436,10 +1435,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
cam->cam_mode = mode;
cam->nmodes = ARRAY_SIZE(mode);
- ret = goto_low_power(gspca_dev);
- if (ret)
- gspca_err(gspca_dev, "Cannot go to low power mode: %d\n",
- ret);
+ goto_low_power(gspca_dev);
/* Check the firmware version. */
sd->params.version.firmwareVersion = 0;
get_version_information(gspca_dev);
diff --git a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
index bfa3b381d8a2..bf1af6ed9131 100644
--- a/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
+++ b/drivers/media/usb/gspca/m5602/m5602_mt9m111.c
@@ -195,7 +195,7 @@ static const struct v4l2_ctrl_config mt9m111_greenbal_cfg = {
int mt9m111_probe(struct sd *sd)
{
u8 data[2] = {0x00, 0x00};
- int i, rc = 0;
+ int i, err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -213,18 +213,18 @@ int mt9m111_probe(struct sd *sd)
/* Do the preinit */
for (i = 0; i < ARRAY_SIZE(preinit_mt9m111); i++) {
if (preinit_mt9m111[i][0] == BRIDGE) {
- rc |= m5602_write_bridge(sd,
- preinit_mt9m111[i][1],
- preinit_mt9m111[i][2]);
+ err = m5602_write_bridge(sd,
+ preinit_mt9m111[i][1],
+ preinit_mt9m111[i][2]);
} else {
data[0] = preinit_mt9m111[i][2];
data[1] = preinit_mt9m111[i][3];
- rc |= m5602_write_sensor(sd,
- preinit_mt9m111[i][1], data, 2);
+ err = m5602_write_sensor(sd,
+ preinit_mt9m111[i][1], data, 2);
}
+ if (err < 0)
+ return err;
}
- if (rc < 0)
- return rc;
if (m5602_read_sensor(sd, MT9M111_SC_CHIPVER, data, 2))
return -ENODEV;
diff --git a/drivers/media/usb/gspca/m5602/m5602_po1030.c b/drivers/media/usb/gspca/m5602/m5602_po1030.c
index d680b777f097..8fd99ceee4b6 100644
--- a/drivers/media/usb/gspca/m5602/m5602_po1030.c
+++ b/drivers/media/usb/gspca/m5602/m5602_po1030.c
@@ -154,8 +154,8 @@ static const struct v4l2_ctrl_config po1030_greenbal_cfg = {
int po1030_probe(struct sd *sd)
{
- int rc = 0;
u8 dev_id_h = 0, i;
+ int err;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
@@ -174,14 +174,14 @@ int po1030_probe(struct sd *sd)
for (i = 0; i < ARRAY_SIZE(preinit_po1030); i++) {
u8 data = preinit_po1030[i][2];
if (preinit_po1030[i][0] == SENSOR)
- rc |= m5602_write_sensor(sd,
- preinit_po1030[i][1], &data, 1);
+ err = m5602_write_sensor(sd, preinit_po1030[i][1],
+ &data, 1);
else
- rc |= m5602_write_bridge(sd, preinit_po1030[i][1],
- data);
+ err = m5602_write_bridge(sd, preinit_po1030[i][1],
+ data);
+ if (err < 0)
+ return err;
}
- if (rc < 0)
- return rc;
if (m5602_read_sensor(sd, PO1030_DEVID_H, &dev_id_h, 1))
return -ENODEV;
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 926408b41270..7a6f01ace78a 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -763,7 +763,8 @@ static int at24_probe(struct i2c_client *client)
at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
if (IS_ERR(at24->nvmem)) {
pm_runtime_disable(dev);
- regulator_disable(at24->vcc_reg);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
return PTR_ERR(at24->nvmem);
}
@@ -774,7 +775,8 @@ static int at24_probe(struct i2c_client *client)
err = at24_read(at24, 0, &test_byte, 1);
if (err) {
pm_runtime_disable(dev);
- regulator_disable(at24->vcc_reg);
+ if (!pm_runtime_status_suspended(dev))
+ regulator_disable(at24->vcc_reg);
return -ENODEV;
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index ff8791a651fd..af3c497defb1 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -2017,7 +2017,7 @@ wait_again:
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
- timeout -= jiffies_to_usecs(completion_rc);
+ timeout = completion_rc;
goto wait_again;
}
} else {
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 832dd5c5bb06..0713b2c12d54 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -362,12 +362,9 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
}
if (err_val & CPU_BOOT_ERR0_SECURITY_NOT_RDY) {
- dev_warn(hdev->dev,
+ dev_err(hdev->dev,
"Device boot warning - security not ready\n");
- /* This is a warning so we don't want it to disable the
- * device
- */
- err_val &= ~CPU_BOOT_ERR0_SECURITY_NOT_RDY;
+ err_exists = true;
}
if (err_val & CPU_BOOT_ERR0_SECURITY_FAIL) {
@@ -403,7 +400,8 @@ static int fw_read_errors(struct hl_device *hdev, u32 boot_err0_reg,
err_exists = true;
}
- if (err_exists)
+ if (err_exists && ((err_val & ~CPU_BOOT_ERR0_ENABLED) &
+ lower_32_bits(hdev->boot_error_status_mask)))
return -EIO;
return 0;
@@ -661,18 +659,13 @@ int hl_fw_cpucp_total_energy_get(struct hl_device *hdev, u64 *total_energy)
return rc;
}
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
enum pll_index *pll_index)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
u8 pll_byte, pll_bit_off;
bool dynamic_pll;
-
- if (input_pll_index >= PLL_MAX) {
- dev_err(hdev->dev, "PLL index %d is out of range\n",
- input_pll_index);
- return -EINVAL;
- }
+ int fw_pll_idx;
dynamic_pll = prop->fw_security_status_valid &&
(prop->fw_app_security_map & CPU_BOOT_DEV_STS0_DYN_PLL_EN);
@@ -680,28 +673,39 @@ int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
if (!dynamic_pll) {
/*
* in case we are working with legacy FW (each asic has unique
- * PLL numbering) extract the legacy numbering
+ * PLL numbering) use the driver based index as they are
+ * aligned with fw legacy numbering
*/
- *pll_index = hdev->legacy_pll_map[input_pll_index];
+ *pll_index = input_pll_index;
return 0;
}
+ /* retrieve a FW compatible PLL index based on
+ * ASIC specific user request
+ */
+ fw_pll_idx = hdev->asic_funcs->map_pll_idx_to_fw_idx(input_pll_index);
+ if (fw_pll_idx < 0) {
+ dev_err(hdev->dev, "Invalid PLL index (%u) error %d\n",
+ input_pll_index, fw_pll_idx);
+ return -EINVAL;
+ }
+
/* PLL map is a u8 array */
- pll_byte = prop->cpucp_info.pll_map[input_pll_index >> 3];
- pll_bit_off = input_pll_index & 0x7;
+ pll_byte = prop->cpucp_info.pll_map[fw_pll_idx >> 3];
+ pll_bit_off = fw_pll_idx & 0x7;
if (!(pll_byte & BIT(pll_bit_off))) {
dev_err(hdev->dev, "PLL index %d is not supported\n",
- input_pll_index);
+ fw_pll_idx);
return -EINVAL;
}
- *pll_index = input_pll_index;
+ *pll_index = fw_pll_idx;
return 0;
}
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr)
{
struct cpucp_packet pkt;
@@ -844,8 +848,13 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
if (rc) {
dev_err(hdev->dev, "Failed to read preboot version\n");
detect_cpu_boot_status(hdev, status);
- fw_read_errors(hdev, boot_err0_reg,
- cpu_security_boot_status_reg);
+
+ /* If we read all FF, then something is totally wrong, no point
+ * of reading specific errors
+ */
+ if (status != -1)
+ fw_read_errors(hdev, boot_err0_reg,
+ cpu_security_boot_status_reg);
return -EIO;
}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 44e89da30b4a..6579f8767abd 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -930,6 +930,9 @@ enum div_select_defs {
* driver is ready to receive asynchronous events. This
* function should be called during the first init and
* after every hard-reset of the device
+ * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
+ * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
+ * generic f/w compatible PLL Indexes
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1054,6 +1057,7 @@ struct hl_asic_funcs {
u32 block_id, u32 block_size);
void (*enable_events_from_fw)(struct hl_device *hdev);
void (*get_msi_info)(u32 *table);
+ int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
};
@@ -1950,8 +1954,6 @@ struct hl_mmu_funcs {
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
- * @legacy_pll_map: map holding map between dynamic (common) PLL indexes and
- * static (asic specific) PLL indexes.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -1960,6 +1962,12 @@ struct hl_mmu_funcs {
* @clock_gating_mask: is clock gating enabled. bitmask that represents the
* different engines. See debugfs-driver-habanalabs for
* details.
+ * @boot_error_status_mask: contains a mask of the device boot error status.
+ * Each bit represents a different error, according to
+ * the defines in hl_boot_if.h. If the bit is cleared,
+ * the error will be ignored by the driver during
+ * device initialization. Mainly used to debug and
+ * workaround firmware bugs
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @card_type: Various ASICs have several card types. This indicates the card
@@ -2071,12 +2079,11 @@ struct hl_device {
struct hl_mmu_priv mmu_priv;
struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
- enum pll_index *legacy_pll_map;
-
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
u64 clock_gating_mask;
+ u64 boot_error_status_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
enum cpucp_card_types card_type;
@@ -2387,9 +2394,9 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
struct hl_info_pci_counters *counters);
int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
u64 *total_energy);
-int get_used_pll_index(struct hl_device *hdev, enum pll_index input_pll_index,
+int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
enum pll_index *pll_index);
-int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, enum pll_index pll_index,
+int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
u16 *pll_freq_arr);
int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
@@ -2411,9 +2418,9 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
int hl_pci_init(struct hl_device *hdev);
void hl_pci_fini(struct hl_device *hdev);
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
bool curr);
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
u64 freq);
int hl_get_temperature(struct hl_device *hdev,
int sensor_index, u32 attr, long *value);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 7135f1e03864..64d1530db985 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -30,6 +30,7 @@ static DEFINE_MUTEX(hl_devs_idr_lock);
static int timeout_locked = 30;
static int reset_on_lockup = 1;
static int memory_scrub = 1;
+static ulong boot_error_status_mask = ULONG_MAX;
module_param(timeout_locked, int, 0444);
MODULE_PARM_DESC(timeout_locked,
@@ -43,6 +44,10 @@ module_param(memory_scrub, int, 0444);
MODULE_PARM_DESC(memory_scrub,
"Scrub device memory in various states (0 = no, 1 = yes, default yes)");
+module_param(boot_error_status_mask, ulong, 0444);
+MODULE_PARM_DESC(boot_error_status_mask,
+ "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
+
#define PCI_VENDOR_ID_HABANALABS 0x1da3
#define PCI_IDS_GOYA 0x0001
@@ -319,6 +324,8 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
hdev->major = hl_major;
hdev->reset_on_lockup = reset_on_lockup;
hdev->memory_scrub = memory_scrub;
+ hdev->boot_error_status_mask = boot_error_status_mask;
+
hdev->pldm = 0;
set_driver_behavior_per_device(hdev);
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 9fa61573a89d..c9f649b31e3a 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -9,7 +9,7 @@
#include <linux/pci.h>
-long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
+long hl_get_frequency(struct hl_device *hdev, u32 pll_index,
bool curr)
{
struct cpucp_packet pkt;
@@ -44,7 +44,7 @@ long hl_get_frequency(struct hl_device *hdev, enum pll_index pll_index,
return (long) result;
}
-void hl_set_frequency(struct hl_device *hdev, enum pll_index pll_index,
+void hl_set_frequency(struct hl_device *hdev, u32 pll_index,
u64 freq)
{
struct cpucp_packet pkt;
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index b751652f80a8..9e4a6bb3acd1 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -105,36 +105,6 @@
#define GAUDI_PLL_MAX 10
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum gaudi_pll_index {
- GAUDI_CPU_PLL = 0,
- GAUDI_PCI_PLL,
- GAUDI_SRAM_PLL,
- GAUDI_HBM_PLL,
- GAUDI_NIC_PLL,
- GAUDI_DMA_PLL,
- GAUDI_MESH_PLL,
- GAUDI_MME_PLL,
- GAUDI_TPC_PLL,
- GAUDI_IF_PLL,
-};
-
-static enum pll_index gaudi_pll_map[PLL_MAX] = {
- [CPU_PLL] = GAUDI_CPU_PLL,
- [PCI_PLL] = GAUDI_PCI_PLL,
- [SRAM_PLL] = GAUDI_SRAM_PLL,
- [HBM_PLL] = GAUDI_HBM_PLL,
- [NIC_PLL] = GAUDI_NIC_PLL,
- [DMA_PLL] = GAUDI_DMA_PLL,
- [MESH_PLL] = GAUDI_MESH_PLL,
- [MME_PLL] = GAUDI_MME_PLL,
- [TPC_PLL] = GAUDI_TPC_PLL,
- [IF_PLL] = GAUDI_IF_PLL,
-};
-
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@@ -810,7 +780,7 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
freq = 0;
}
} else {
- rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
if (rc)
return rc;
@@ -1652,9 +1622,6 @@ static int gaudi_sw_init(struct hl_device *hdev)
hdev->asic_specific = gaudi;
- /* store legacy PLL map */
- hdev->legacy_pll_map = gaudi_pll_map;
-
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5612,6 +5579,7 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
struct hl_cs_job *job;
u32 cb_size, ctl, err_cause;
struct hl_cb *cb;
+ u64 id;
int rc;
cb = hl_cb_kernel_create(hdev, PAGE_SIZE, false);
@@ -5678,8 +5646,9 @@ static int gaudi_memset_device_memory(struct hl_device *hdev, u64 addr,
}
release_cb:
+ id = cb->id;
hl_cb_put(cb);
- hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT);
+ hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, id << PAGE_SHIFT);
return rc;
}
@@ -8783,6 +8752,23 @@ static void gaudi_enable_events_from_fw(struct hl_device *hdev)
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_INTS_REGISTER);
}
+static int gaudi_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+ switch (pll_idx) {
+ case HL_GAUDI_CPU_PLL: return CPU_PLL;
+ case HL_GAUDI_PCI_PLL: return PCI_PLL;
+ case HL_GAUDI_NIC_PLL: return NIC_PLL;
+ case HL_GAUDI_DMA_PLL: return DMA_PLL;
+ case HL_GAUDI_MESH_PLL: return MESH_PLL;
+ case HL_GAUDI_MME_PLL: return MME_PLL;
+ case HL_GAUDI_TPC_PLL: return TPC_PLL;
+ case HL_GAUDI_IF_PLL: return IF_PLL;
+ case HL_GAUDI_SRAM_PLL: return SRAM_PLL;
+ case HL_GAUDI_HBM_PLL: return HBM_PLL;
+ default: return -EINVAL;
+ }
+}
+
static const struct hl_asic_funcs gaudi_funcs = {
.early_init = gaudi_early_init,
.early_fini = gaudi_early_fini,
@@ -8866,7 +8852,8 @@ static const struct hl_asic_funcs gaudi_funcs = {
.ack_protection_bits_errors = gaudi_ack_protection_bits_errors,
.get_hw_block_id = gaudi_get_hw_block_id,
.hw_block_mmap = gaudi_block_mmap,
- .enable_events_from_fw = gaudi_enable_events_from_fw
+ .enable_events_from_fw = gaudi_enable_events_from_fw,
+ .map_pll_idx_to_fw_idx = gaudi_map_pll_idx_to_fw_idx
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
index 8c49da4bcbd5..9b60eadd4c35 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
@@ -13,7 +13,7 @@ void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
struct gaudi_device *gaudi = hdev->asic_specific;
if (freq == PLL_LAST)
- hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
}
int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
@@ -23,7 +23,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -33,7 +33,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -57,7 +57,7 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
gaudi->max_freq_value = value;
@@ -85,7 +85,7 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
gaudi->max_freq_value = value * 1000 * 1000;
- hl_set_frequency(hdev, MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
fail:
return count;
@@ -100,7 +100,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index e27338f4aad2..e0ad2a269779 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -118,30 +118,6 @@
#define IS_MME_IDLE(mme_arch_sts) \
(((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK)
-/*
- * this enum kept here for compatibility with old FW (in which each asic has
- * unique PLL numbering
- */
-enum goya_pll_index {
- GOYA_CPU_PLL = 0,
- GOYA_IC_PLL,
- GOYA_MC_PLL,
- GOYA_MME_PLL,
- GOYA_PCI_PLL,
- GOYA_EMMC_PLL,
- GOYA_TPC_PLL,
-};
-
-static enum pll_index goya_pll_map[PLL_MAX] = {
- [CPU_PLL] = GOYA_CPU_PLL,
- [IC_PLL] = GOYA_IC_PLL,
- [MC_PLL] = GOYA_MC_PLL,
- [MME_PLL] = GOYA_MME_PLL,
- [PCI_PLL] = GOYA_PCI_PLL,
- [EMMC_PLL] = GOYA_EMMC_PLL,
- [TPC_PLL] = GOYA_TPC_PLL,
-};
-
static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = {
"goya cq 0", "goya cq 1", "goya cq 2", "goya cq 3",
"goya cq 4", "goya cpu eq"
@@ -775,7 +751,8 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
freq = 0;
}
} else {
- rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+ rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
+ pll_freq_arr);
if (rc)
return;
@@ -897,9 +874,6 @@ static int goya_sw_init(struct hl_device *hdev)
hdev->asic_specific = goya;
- /* store legacy PLL map */
- hdev->legacy_pll_map = goya_pll_map;
-
/* Create DMA pool for small allocations */
hdev->dma_pool = dma_pool_create(dev_name(hdev->dev),
&hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0);
@@ -5512,6 +5486,20 @@ static void goya_enable_events_from_fw(struct hl_device *hdev)
GOYA_ASYNC_EVENT_ID_INTS_REGISTER);
}
+static int goya_map_pll_idx_to_fw_idx(u32 pll_idx)
+{
+ switch (pll_idx) {
+ case HL_GOYA_CPU_PLL: return CPU_PLL;
+ case HL_GOYA_PCI_PLL: return PCI_PLL;
+ case HL_GOYA_MME_PLL: return MME_PLL;
+ case HL_GOYA_TPC_PLL: return TPC_PLL;
+ case HL_GOYA_IC_PLL: return IC_PLL;
+ case HL_GOYA_MC_PLL: return MC_PLL;
+ case HL_GOYA_EMMC_PLL: return EMMC_PLL;
+ default: return -EINVAL;
+ }
+}
+
static const struct hl_asic_funcs goya_funcs = {
.early_init = goya_early_init,
.early_fini = goya_early_fini,
@@ -5595,7 +5583,8 @@ static const struct hl_asic_funcs goya_funcs = {
.ack_protection_bits_errors = goya_ack_protection_bits_errors,
.get_hw_block_id = goya_get_hw_block_id,
.hw_block_mmap = goya_block_mmap,
- .enable_events_from_fw = goya_enable_events_from_fw
+ .enable_events_from_fw = goya_enable_events_from_fw,
+ .map_pll_idx_to_fw_idx = goya_map_pll_idx_to_fw_idx
};
/*
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 3acb36a1a902..7d007125727f 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -13,19 +13,19 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
switch (freq) {
case PLL_HIGH:
- hl_set_frequency(hdev, MME_PLL, hdev->high_pll);
- hl_set_frequency(hdev, TPC_PLL, hdev->high_pll);
- hl_set_frequency(hdev, IC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, hdev->high_pll);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, hdev->high_pll);
break;
case PLL_LOW:
- hl_set_frequency(hdev, MME_PLL, GOYA_PLL_FREQ_LOW);
- hl_set_frequency(hdev, TPC_PLL, GOYA_PLL_FREQ_LOW);
- hl_set_frequency(hdev, IC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, GOYA_PLL_FREQ_LOW);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, GOYA_PLL_FREQ_LOW);
break;
case PLL_LAST:
- hl_set_frequency(hdev, MME_PLL, goya->mme_clk);
- hl_set_frequency(hdev, TPC_PLL, goya->tpc_clk);
- hl_set_frequency(hdev, IC_PLL, goya->ic_clk);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, goya->mme_clk);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, goya->tpc_clk);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, goya->ic_clk);
break;
default:
dev_err(hdev->dev, "unknown frequency setting\n");
@@ -39,7 +39,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -49,7 +49,7 @@ int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -72,7 +72,7 @@ static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
if (value < 0)
return value;
@@ -105,7 +105,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, MME_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_MME_PLL, value);
goya->mme_clk = value;
fail:
@@ -121,7 +121,7 @@ static ssize_t tpc_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, TPC_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, false);
if (value < 0)
return value;
@@ -154,7 +154,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, TPC_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_TPC_PLL, value);
goya->tpc_clk = value;
fail:
@@ -170,7 +170,7 @@ static ssize_t ic_clk_show(struct device *dev, struct device_attribute *attr,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, IC_PLL, false);
+ value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, false);
if (value < 0)
return value;
@@ -203,7 +203,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- hl_set_frequency(hdev, IC_PLL, value);
+ hl_set_frequency(hdev, HL_GOYA_IC_PLL, value);
goya->ic_clk = value;
fail:
@@ -219,7 +219,7 @@ static ssize_t mme_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, MME_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
if (value < 0)
return value;
@@ -236,7 +236,7 @@ static ssize_t tpc_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, TPC_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_TPC_PLL, true);
if (value < 0)
return value;
@@ -253,7 +253,7 @@ static ssize_t ic_clk_curr_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, IC_PLL, true);
+ value = hl_get_frequency(hdev, HL_GOYA_IC_PLL, true);
if (value < 0)
return value;
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 2bdf560ee681..0f9ea75b0b18 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -134,7 +134,7 @@ static struct ics932s401_data *ics932s401_update_device(struct device *dev)
for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
if (temp < 0)
- data->regs[regs_to_copy[i]] = 0;
+ temp = 0;
data->regs[regs_to_copy[i]] = temp >> 8;
}
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index c394c0b08519..7ac788fae1b8 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -271,6 +271,7 @@ struct lis3lv02d {
int regs_size;
u8 *reg_cache;
bool regs_stored;
+ bool init_required;
u8 odr_mask; /* ODR bit mask */
u8 whoami; /* indicates measurement precision */
s16 (*read_data) (struct lis3lv02d *lis3, int reg);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b8b771b643cc..016a6106151a 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -236,7 +236,8 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
if (host->dram_access_quirk)
return;
- if (data->blocks > 1) {
+ /* SD_IO_RW_EXTENDED (CMD53) can also use block mode under the hood */
+ if (data->blocks > 1 || mrq->cmd->opcode == SD_IO_RW_EXTENDED) {
/*
* In block mode DMA descriptor format, "length" field indicates
* number of blocks and there is no way to pass DMA size that
@@ -258,7 +259,9 @@ static void meson_mmc_get_transfer_mode(struct mmc_host *mmc,
for_each_sg(data->sg, sg, data->sg_len, i) {
/* check for 8 byte alignment */
if (sg->offset % 8) {
- WARN_ONCE(1, "unaligned scatterlist buffer\n");
+ dev_warn_once(mmc_dev(mmc),
+ "unaligned sg offset %u, disabling descriptor DMA for transfer\n",
+ sg->offset);
return;
}
}
diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
index 592d79082f58..061618aa247f 100644
--- a/drivers/mmc/host/sdhci-pci-gli.c
+++ b/drivers/mmc/host/sdhci-pci-gli.c
@@ -627,8 +627,13 @@ static void sdhci_gli_voltage_switch(struct sdhci_host *host)
*
* Wait 5ms after set 1.8V signal enable in Host Control 2 register
* to ensure 1.8V signal enable bit is set by GL9750/GL9755.
+ *
+ * ...however, the controller in the NUC10i3FNK4 (a 9755) requires
+ * slightly longer than 5ms before the control register reports that
+ * 1.8V is ready, and far longer still before the card will actually
+ * work reliably.
*/
- usleep_range(5000, 5500);
+ usleep_range(100000, 110000);
}
static void sdhci_gl9750_reset(struct sdhci_host *host, u8 mask)
diff --git a/drivers/mtd/nand/raw/cs553x_nand.c b/drivers/mtd/nand/raw/cs553x_nand.c
index 6edf78c16fc8..df40927e5678 100644
--- a/drivers/mtd/nand/raw/cs553x_nand.c
+++ b/drivers/mtd/nand/raw/cs553x_nand.c
@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/iopoll.h>
@@ -240,6 +241,15 @@ static int cs_calculate_ecc(struct nand_chip *this, const u_char *dat,
return 0;
}
+static int cs553x_ecc_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
static struct cs553x_nand_controller *controllers[4];
static int cs553x_attach_chip(struct nand_chip *chip)
@@ -251,7 +261,7 @@ static int cs553x_attach_chip(struct nand_chip *chip)
chip->ecc.bytes = 3;
chip->ecc.hwctl = cs_enable_hwecc;
chip->ecc.calculate = cs_calculate_ecc;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = cs553x_ecc_correct;
chip->ecc.strength = 1;
return 0;
diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
index bf695255b43a..a3e66155ae40 100644
--- a/drivers/mtd/nand/raw/fsmc_nand.c
+++ b/drivers/mtd/nand/raw/fsmc_nand.c
@@ -25,6 +25,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/platform_device.h>
#include <linux/of.h>
@@ -432,6 +433,15 @@ static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
return 0;
}
+static int fsmc_correct_ecc1(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
/* Count the number of 0's in buff upto a max of max_bits */
static int count_written_bits(u8 *buff, int size, int max_bits)
{
@@ -917,7 +927,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand)
case NAND_ECC_ENGINE_TYPE_ON_HOST:
dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
nand->ecc.calculate = fsmc_read_hwecc_ecc1;
- nand->ecc.correct = rawnand_sw_hamming_correct;
+ nand->ecc.correct = fsmc_correct_ecc1;
nand->ecc.hwctl = fsmc_enable_hwecc;
nand->ecc.bytes = 3;
nand->ecc.strength = 1;
diff --git a/drivers/mtd/nand/raw/lpc32xx_slc.c b/drivers/mtd/nand/raw/lpc32xx_slc.c
index 6b7269cfb7d8..d7dfc6fd85ca 100644
--- a/drivers/mtd/nand/raw/lpc32xx_slc.c
+++ b/drivers/mtd/nand/raw/lpc32xx_slc.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/mtd/lpc32xx_slc.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#define LPC32XX_MODNAME "lpc32xx-nand"
@@ -345,6 +346,18 @@ static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
}
/*
+ * Corrects the data
+ */
+static int lpc32xx_nand_ecc_correct(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
+/*
* Read a single byte from NAND device
*/
static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
@@ -802,7 +815,7 @@ static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = lpc32xx_nand_ecc_correct;
chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
/*
diff --git a/drivers/mtd/nand/raw/ndfc.c b/drivers/mtd/nand/raw/ndfc.c
index 338d6b1a189e..98d5a94c3a24 100644
--- a/drivers/mtd/nand/raw/ndfc.c
+++ b/drivers/mtd/nand/raw/ndfc.c
@@ -22,6 +22,7 @@
#include <linux/mtd/ndfc.h>
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <asm/io.h>
@@ -100,6 +101,15 @@ static int ndfc_calculate_ecc(struct nand_chip *chip,
return 0;
}
+static int ndfc_correct_ecc(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
/*
* Speedups for buffer read/write/verify
*
@@ -145,7 +155,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
chip->controller = &ndfc->ndfc_control;
chip->legacy.read_buf = ndfc_read_buf;
chip->legacy.write_buf = ndfc_write_buf;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = ndfc_correct_ecc;
chip->ecc.hwctl = ndfc_enable_hwecc;
chip->ecc.calculate = ndfc_calculate_ecc;
chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
diff --git a/drivers/mtd/nand/raw/sharpsl.c b/drivers/mtd/nand/raw/sharpsl.c
index 5612ee628425..2f1fe464e663 100644
--- a/drivers/mtd/nand/raw/sharpsl.c
+++ b/drivers/mtd/nand/raw/sharpsl.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/sharpsl.h>
@@ -96,6 +97,15 @@ static int sharpsl_nand_calculate_ecc(struct nand_chip *chip,
return readb(sharpsl->io + ECCCNTR) != 0;
}
+static int sharpsl_nand_correct_ecc(struct nand_chip *chip,
+ unsigned char *buf,
+ unsigned char *read_ecc,
+ unsigned char *calc_ecc)
+{
+ return ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
+}
+
static int sharpsl_attach_chip(struct nand_chip *chip)
{
if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
@@ -106,7 +116,7 @@ static int sharpsl_attach_chip(struct nand_chip *chip)
chip->ecc.strength = 1;
chip->ecc.hwctl = sharpsl_nand_enable_hwecc;
chip->ecc.calculate = sharpsl_nand_calculate_ecc;
- chip->ecc.correct = rawnand_sw_hamming_correct;
+ chip->ecc.correct = sharpsl_nand_correct_ecc;
return 0;
}
diff --git a/drivers/mtd/nand/raw/tmio_nand.c b/drivers/mtd/nand/raw/tmio_nand.c
index de8e919d0ebe..6d93dd31969b 100644
--- a/drivers/mtd/nand/raw/tmio_nand.c
+++ b/drivers/mtd/nand/raw/tmio_nand.c
@@ -34,6 +34,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
@@ -292,11 +293,12 @@ static int tmio_nand_correct_data(struct nand_chip *chip, unsigned char *buf,
int r0, r1;
/* assume ecc.size = 512 and ecc.bytes = 6 */
- r0 = rawnand_sw_hamming_correct(chip, buf, read_ecc, calc_ecc);
+ r0 = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
if (r0 < 0)
return r0;
- r1 = rawnand_sw_hamming_correct(chip, buf + 256, read_ecc + 3,
- calc_ecc + 3);
+ r1 = ecc_sw_hamming_correct(buf + 256, read_ecc + 3, calc_ecc + 3,
+ chip->ecc.size, false);
if (r1 < 0)
return r1;
return r0 + r1;
diff --git a/drivers/mtd/nand/raw/txx9ndfmc.c b/drivers/mtd/nand/raw/txx9ndfmc.c
index 1a9449e53bf9..b8894ac27073 100644
--- a/drivers/mtd/nand/raw/txx9ndfmc.c
+++ b/drivers/mtd/nand/raw/txx9ndfmc.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand-ecc-sw-hamming.h>
#include <linux/mtd/rawnand.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
@@ -193,8 +194,8 @@ static int txx9ndfmc_correct_data(struct nand_chip *chip, unsigned char *buf,
int stat;
for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
- stat = rawnand_sw_hamming_correct(chip, buf, read_ecc,
- calc_ecc);
+ stat = ecc_sw_hamming_correct(buf, read_ecc, calc_ecc,
+ chip->ecc.size, false);
if (stat < 0)
return stat;
corrected += stat;
diff --git a/drivers/mtd/parsers/ofpart_core.c b/drivers/mtd/parsers/ofpart_core.c
index 0fd8d2a0db97..192190c42fc8 100644
--- a/drivers/mtd/parsers/ofpart_core.c
+++ b/drivers/mtd/parsers/ofpart_core.c
@@ -57,20 +57,22 @@ static int parse_fixed_partitions(struct mtd_info *master,
if (!mtd_node)
return 0;
- ofpart_node = of_get_child_by_name(mtd_node, "partitions");
- if (!ofpart_node && !master->parent) {
- /*
- * We might get here even when ofpart isn't used at all (e.g.,
- * when using another parser), so don't be louder than
- * KERN_DEBUG
- */
- pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
- master->name, mtd_node);
+ if (!master->parent) { /* Master */
+ ofpart_node = of_get_child_by_name(mtd_node, "partitions");
+ if (!ofpart_node) {
+ /*
+ * We might get here even when ofpart isn't used at all (e.g.,
+ * when using another parser), so don't be louder than
+ * KERN_DEBUG
+ */
+ pr_debug("%s: 'partitions' subnode not found on %pOF. Trying to parse direct subnodes as partitions.\n",
+ master->name, mtd_node);
+ ofpart_node = mtd_node;
+ dedicated = false;
+ }
+ } else { /* Partition */
ofpart_node = mtd_node;
- dedicated = false;
}
- if (!ofpart_node)
- return 0;
of_id = of_match_node(parse_ofpart_match_table, ofpart_node);
if (dedicated && !of_id) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 74dc8e249faa..6977f8248df7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -262,17 +262,17 @@ config GENEVE
will be called geneve.
config BAREUDP
- tristate "Bare UDP Encapsulation"
- depends on INET
- depends on IPV6 || !IPV6
- select NET_UDP_TUNNEL
- select GRO_CELLS
- help
- This adds a bare UDP tunnel module for tunnelling different
- kinds of traffic like MPLS, IP, etc. inside a UDP tunnel.
-
- To compile this driver as a module, choose M here: the module
- will be called bareudp.
+ tristate "Bare UDP Encapsulation"
+ depends on INET
+ depends on IPV6 || !IPV6
+ select NET_UDP_TUNNEL
+ select GRO_CELLS
+ help
+ This adds a bare UDP tunnel module for tunnelling different
+ kinds of traffic like MPLS, IP, etc. inside a UDP tunnel.
+
+ To compile this driver as a module, choose M here: the module
+ will be called bareudp.
config GTP
tristate "GPRS Tunneling Protocol datapath (GTP-U)"
@@ -431,6 +431,7 @@ config VSOCKMON
config MHI_NET
tristate "MHI network driver"
depends on MHI_BUS
+ select WWAN
help
This is the network driver for MHI bus. It can be used with
QCOM based WWAN modems (like SDX55). Say Y or M.
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 992e92fb4e8e..f0695d68c47e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -327,6 +327,8 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
break;
}
+ dev->base_addr = ioaddr;
+
/* Reserve any actual interrupt. */
if (dev->irq) {
retval = request_irq(dev->irq, cops_interrupt, 0, dev->name, dev);
@@ -334,8 +336,6 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
goto err_out;
}
- dev->base_addr = ioaddr;
-
lp = netdev_priv(dev);
spin_lock_init(&lp->lock);
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index f0e715a93852..69c270885ff0 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -584,11 +584,13 @@ loop:
printk("%02x ",ltdmacbuf[i]);
printk("\n");
}
+
handlecommand(dev);
- if(0xfa==inb_p(base+6)) {
- /* we timed out, so return */
- goto done;
- }
+
+ if (0xfa == inb_p(base + 6)) {
+ /* we timed out, so return */
+ goto done;
+ }
} else {
/* we don't seem to have a command */
if (!mboxinuse[0]) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index dafeaef3cbd3..1d9137e77dfc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -620,7 +620,7 @@ static int bond_check_dev_link(struct bonding *bond,
*/
/* Yes, the mii is overlaid on the ifreq.ifr_ifru */
- strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
+ strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
mii = if_mii(&ifr);
if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
mii->reg_num = MII_BMSR;
@@ -1525,6 +1525,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
slave->bond = bond;
slave->dev = slave_dev;
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
if (bond_kobj_init(slave))
return NULL;
@@ -1537,7 +1538,6 @@ static struct slave *bond_alloc_slave(struct bonding *bond,
return NULL;
}
}
- INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
return slave;
}
@@ -4202,16 +4202,16 @@ static u32 bond_rr_gen_slave_id(struct bonding *bond)
slave_id = prandom_u32();
break;
case 1:
- slave_id = bond->rr_tx_counter;
+ slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
break;
default:
reciprocal_packets_per_slave =
bond->params.reciprocal_packets_per_slave;
- slave_id = reciprocal_divide(bond->rr_tx_counter,
+ slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
+ slave_id = reciprocal_divide(slave_id,
reciprocal_packets_per_slave);
break;
}
- bond->rr_tx_counter++;
return slave_id;
}
@@ -4852,6 +4852,9 @@ static void bond_destructor(struct net_device *bond_dev)
if (bond->wq)
destroy_workqueue(bond->wq);
+
+ if (bond->rr_tx_counter)
+ free_percpu(bond->rr_tx_counter);
}
void bond_setup(struct net_device *bond_dev)
@@ -5330,10 +5333,8 @@ static int bond_check_params(struct bond_params *params)
(struct reciprocal_value) { 0 };
}
- if (primary) {
- strncpy(params->primary, primary, IFNAMSIZ);
- params->primary[IFNAMSIZ - 1] = 0;
- }
+ if (primary)
+ strscpy_pad(params->primary, primary, sizeof(params->primary));
memcpy(params->arp_targets, arp_target, sizeof(arp_target));
@@ -5352,6 +5353,15 @@ static int bond_init(struct net_device *bond_dev)
if (!bond->wq)
return -ENOMEM;
+ if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN) {
+ bond->rr_tx_counter = alloc_percpu(u32);
+ if (!bond->rr_tx_counter) {
+ destroy_workqueue(bond->wq);
+ bond->wq = NULL;
+ return -ENOMEM;
+ }
+ }
+
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index c9d3604ae129..0cf25de6f46d 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -705,7 +705,7 @@ out:
int __bond_opt_set_notify(struct bonding *bond,
unsigned int option, struct bond_opt_value *val)
{
- int ret = -ENOENT;
+ int ret;
ASSERT_RTNL();
@@ -1206,8 +1206,7 @@ static int bond_option_primary_set(struct bonding *bond,
RCU_INIT_POINTER(bond->primary_slave, NULL);
bond_select_active_slave(bond);
}
- strncpy(bond->params.primary, primary, IFNAMSIZ);
- bond->params.primary[IFNAMSIZ - 1] = 0;
+ strscpy_pad(bond->params.primary, primary, IFNAMSIZ);
netdev_dbg(bond->dev, "Recording %s as primary, but it has not been enslaved yet\n",
primary);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index da6fffb4d5a8..d17482395a4d 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -269,9 +269,6 @@ static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
- if (WARN_ON(!dev))
- return -EINVAL;
-
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 9ad9b39f480e..04d0bb3ffe89 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -169,7 +169,7 @@ static const struct can_bittiming_const at91_bittiming_const = {
};
#define AT91_IS(_model) \
-static inline int at91_is_sam##_model(const struct at91_priv *priv) \
+static inline int __maybe_unused at91_is_sam##_model(const struct at91_priv *priv) \
{ \
return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \
}
diff --git a/drivers/net/can/c_can/Makefile b/drivers/net/can/c_can/Makefile
index e6a94c948531..6fa3b2b9e4b9 100644
--- a/drivers/net/can/c_can/Makefile
+++ b/drivers/net/can/c_can/Makefile
@@ -4,5 +4,10 @@
#
obj-$(CONFIG_CAN_C_CAN) += c_can.o
+
+c_can-objs :=
+c_can-objs += c_can_ethtool.o
+c_can-objs += c_can_main.o
+
obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o
obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 06045f610f0e..4247ff80a29c 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -205,7 +205,6 @@ struct c_can_priv {
struct c_can_raminit raminit_sys; /* RAMINIT via syscon regmap */
void (*raminit)(const struct c_can_priv *priv, bool enable);
u32 comm_rcv_high;
- u32 rxmasked;
u32 dlc[];
};
@@ -219,4 +218,6 @@ int c_can_power_up(struct net_device *dev);
int c_can_power_down(struct net_device *dev);
#endif
+void c_can_set_ethtool_ops(struct net_device *dev);
+
#endif /* C_CAN_H */
diff --git a/drivers/net/can/c_can/c_can_ethtool.c b/drivers/net/can/c_can/c_can_ethtool.c
new file mode 100644
index 000000000000..cd5f07fca2a5
--- /dev/null
+++ b/drivers/net/can/c_can/c_can_ethtool.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2021, Dario Binacchi <dariobin@libero.it>
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/can/dev.h>
+
+#include "c_can.h"
+
+static void c_can_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct c_can_priv *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->device);
+
+ strscpy(info->driver, "c_can", sizeof(info->driver));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static void c_can_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct c_can_priv *priv = netdev_priv(netdev);
+
+ ring->rx_max_pending = priv->msg_obj_num;
+ ring->tx_max_pending = priv->msg_obj_num;
+ ring->rx_pending = priv->msg_obj_rx_num;
+ ring->tx_pending = priv->msg_obj_tx_num;
+}
+
+static const struct ethtool_ops c_can_ethtool_ops = {
+ .get_drvinfo = c_can_get_drvinfo,
+ .get_ringparam = c_can_get_ringparam,
+};
+
+void c_can_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &c_can_ethtool_ops;
+}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can_main.c
index 313793f6922d..7588f70ca0fe 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can_main.c
@@ -599,7 +599,6 @@ static int c_can_chip_config(struct net_device *dev)
/* Clear all internal status */
atomic_set(&priv->tx_active, 0);
- priv->rxmasked = 0;
priv->tx_dir = 0;
/* set bittiming params */
@@ -1335,6 +1334,7 @@ int register_c_can_dev(struct net_device *dev)
dev->flags |= IFF_ECHO; /* we support local echo */
dev->netdev_ops = &c_can_netdev_ops;
+ c_can_set_ethtool_ops(dev);
err = register_candev(dev);
if (!err)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 3cf6de21d19c..bba2a449ac70 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -83,44 +83,25 @@ enum m_can_reg {
#define MRAM_CFG_LEN 8
/* Core Release Register (CREL) */
-#define CREL_REL_SHIFT 28
-#define CREL_REL_MASK (0xF << CREL_REL_SHIFT)
-#define CREL_STEP_SHIFT 24
-#define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT)
-#define CREL_SUBSTEP_SHIFT 20
-#define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT)
+#define CREL_REL_MASK GENMASK(31, 28)
+#define CREL_STEP_MASK GENMASK(27, 24)
+#define CREL_SUBSTEP_MASK GENMASK(23, 20)
/* Data Bit Timing & Prescaler Register (DBTP) */
#define DBTP_TDC BIT(23)
-#define DBTP_DBRP_SHIFT 16
-#define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT)
-#define DBTP_DTSEG1_SHIFT 8
-#define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT)
-#define DBTP_DTSEG2_SHIFT 4
-#define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT)
-#define DBTP_DSJW_SHIFT 0
-#define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
+#define DBTP_DBRP_MASK GENMASK(20, 16)
+#define DBTP_DTSEG1_MASK GENMASK(12, 8)
+#define DBTP_DTSEG2_MASK GENMASK(7, 4)
+#define DBTP_DSJW_MASK GENMASK(3, 0)
/* Transmitter Delay Compensation Register (TDCR) */
-#define TDCR_TDCO_SHIFT 8
-#define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
-#define TDCR_TDCF_SHIFT 0
-#define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
+#define TDCR_TDCO_MASK GENMASK(14, 8)
+#define TDCR_TDCF_MASK GENMASK(6, 0)
/* Test Register (TEST) */
#define TEST_LBCK BIT(4)
-/* CC Control Register(CCCR) */
-#define CCCR_CMR_MASK 0x3
-#define CCCR_CMR_SHIFT 10
-#define CCCR_CMR_CANFD 0x1
-#define CCCR_CMR_CANFD_BRS 0x2
-#define CCCR_CMR_CAN 0x3
-#define CCCR_CME_MASK 0x3
-#define CCCR_CME_SHIFT 8
-#define CCCR_CME_CAN 0
-#define CCCR_CME_CANFD 0x1
-#define CCCR_CME_CANFD_BRS 0x2
+/* CC Control Register (CCCR) */
#define CCCR_TXP BIT(14)
#define CCCR_TEST BIT(7)
#define CCCR_DAR BIT(6)
@@ -130,24 +111,31 @@ enum m_can_reg {
#define CCCR_ASM BIT(2)
#define CCCR_CCE BIT(1)
#define CCCR_INIT BIT(0)
-#define CCCR_CANFD 0x10
+/* for version 3.0.x */
+#define CCCR_CMR_MASK GENMASK(11, 10)
+#define CCCR_CMR_CANFD 0x1
+#define CCCR_CMR_CANFD_BRS 0x2
+#define CCCR_CMR_CAN 0x3
+#define CCCR_CME_MASK GENMASK(9, 8)
+#define CCCR_CME_CAN 0
+#define CCCR_CME_CANFD 0x1
+#define CCCR_CME_CANFD_BRS 0x2
/* for version >=3.1.x */
#define CCCR_EFBI BIT(13)
#define CCCR_PXHD BIT(12)
#define CCCR_BRSE BIT(9)
#define CCCR_FDOE BIT(8)
-/* only for version >=3.2.x */
+/* for version >=3.2.x */
#define CCCR_NISO BIT(15)
+/* for version >=3.3.x */
+#define CCCR_WMM BIT(11)
+#define CCCR_UTSU BIT(10)
/* Nominal Bit Timing & Prescaler Register (NBTP) */
-#define NBTP_NSJW_SHIFT 25
-#define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT)
-#define NBTP_NBRP_SHIFT 16
-#define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT)
-#define NBTP_NTSEG1_SHIFT 8
-#define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT)
-#define NBTP_NTSEG2_SHIFT 0
-#define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
+#define NBTP_NSJW_MASK GENMASK(31, 25)
+#define NBTP_NBRP_MASK GENMASK(24, 16)
+#define NBTP_NTSEG1_MASK GENMASK(15, 8)
+#define NBTP_NTSEG2_MASK GENMASK(6, 0)
/* Timestamp Counter Configuration Register (TSCC) */
#define TSCC_TCP_MASK GENMASK(19, 16)
@@ -159,20 +147,18 @@ enum m_can_reg {
/* Timestamp Counter Value Register (TSCV) */
#define TSCV_TSC_MASK GENMASK(15, 0)
-/* Error Counter Register(ECR) */
+/* Error Counter Register (ECR) */
#define ECR_RP BIT(15)
-#define ECR_REC_SHIFT 8
-#define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
-#define ECR_TEC_SHIFT 0
-#define ECR_TEC_MASK 0xff
+#define ECR_REC_MASK GENMASK(14, 8)
+#define ECR_TEC_MASK GENMASK(7, 0)
-/* Protocol Status Register(PSR) */
+/* Protocol Status Register (PSR) */
#define PSR_BO BIT(7)
#define PSR_EW BIT(6)
#define PSR_EP BIT(5)
-#define PSR_LEC_MASK 0x7
+#define PSR_LEC_MASK GENMASK(2, 0)
-/* Interrupt Register(IR) */
+/* Interrupt Register (IR) */
#define IR_ALL_INT 0xffffffff
/* Renamed bits for versions > 3.1.x */
@@ -221,6 +207,7 @@ enum m_can_reg {
IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
IR_RF1L | IR_RF0L)
#define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
+
/* Interrupts for version >= 3.1.x */
#define IR_ERR_LEC_31X (IR_PED | IR_PEA)
#define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
@@ -237,58 +224,47 @@ enum m_can_reg {
#define ILE_EINT0 BIT(0)
/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
-#define RXFC_FWM_SHIFT 24
-#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
-#define RXFC_FS_SHIFT 16
-#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
+#define RXFC_FWM_MASK GENMASK(30, 24)
+#define RXFC_FS_MASK GENMASK(22, 16)
/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
#define RXFS_RFL BIT(25)
#define RXFS_FF BIT(24)
-#define RXFS_FPI_SHIFT 16
-#define RXFS_FPI_MASK 0x3f0000
-#define RXFS_FGI_SHIFT 8
-#define RXFS_FGI_MASK 0x3f00
-#define RXFS_FFL_MASK 0x7f
+#define RXFS_FPI_MASK GENMASK(21, 16)
+#define RXFS_FGI_MASK GENMASK(13, 8)
+#define RXFS_FFL_MASK GENMASK(6, 0)
/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
-#define M_CAN_RXESC_8BYTES 0x0
-#define M_CAN_RXESC_64BYTES 0x777
+#define RXESC_RBDS_MASK GENMASK(10, 8)
+#define RXESC_F1DS_MASK GENMASK(6, 4)
+#define RXESC_F0DS_MASK GENMASK(2, 0)
+#define RXESC_64B 0x7
-/* Tx Buffer Configuration(TXBC) */
-#define TXBC_NDTB_SHIFT 16
-#define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT)
-#define TXBC_TFQS_SHIFT 24
-#define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT)
+/* Tx Buffer Configuration (TXBC) */
+#define TXBC_TFQS_MASK GENMASK(29, 24)
+#define TXBC_NDTB_MASK GENMASK(21, 16)
/* Tx FIFO/Queue Status (TXFQS) */
#define TXFQS_TFQF BIT(21)
-#define TXFQS_TFQPI_SHIFT 16
-#define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT)
-#define TXFQS_TFGI_SHIFT 8
-#define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT)
-#define TXFQS_TFFL_SHIFT 0
-#define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT)
+#define TXFQS_TFQPI_MASK GENMASK(20, 16)
+#define TXFQS_TFGI_MASK GENMASK(12, 8)
+#define TXFQS_TFFL_MASK GENMASK(5, 0)
-/* Tx Buffer Element Size Configuration(TXESC) */
-#define TXESC_TBDS_8BYTES 0x0
-#define TXESC_TBDS_64BYTES 0x7
+/* Tx Buffer Element Size Configuration (TXESC) */
+#define TXESC_TBDS_MASK GENMASK(2, 0)
+#define TXESC_TBDS_64B 0x7
/* Tx Event FIFO Configuration (TXEFC) */
-#define TXEFC_EFS_SHIFT 16
-#define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT)
+#define TXEFC_EFS_MASK GENMASK(21, 16)
/* Tx Event FIFO Status (TXEFS) */
#define TXEFS_TEFL BIT(25)
#define TXEFS_EFF BIT(24)
-#define TXEFS_EFGI_SHIFT 8
-#define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT)
-#define TXEFS_EFFL_SHIFT 0
-#define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT)
+#define TXEFS_EFGI_MASK GENMASK(12, 8)
+#define TXEFS_EFFL_MASK GENMASK(5, 0)
/* Tx Event FIFO Acknowledge (TXEFA) */
-#define TXEFA_EFAI_SHIFT 0
-#define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT)
+#define TXEFA_EFAI_MASK GENMASK(4, 0)
/* Message RAM Configuration (in bytes) */
#define SIDF_ELEMENT_SIZE 4
@@ -324,13 +300,12 @@ enum m_can_reg {
#define TX_BUF_EFC BIT(23)
#define TX_BUF_FDF BIT(21)
#define TX_BUF_BRS BIT(20)
-#define TX_BUF_MM_SHIFT 24
-#define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT)
+#define TX_BUF_MM_MASK GENMASK(31, 24)
+#define TX_BUF_DLC_MASK GENMASK(19, 16)
/* Tx event FIFO Element */
/* E1 */
-#define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
-#define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
+#define TX_EVENT_MM_MASK GENMASK(31, 24)
#define TX_EVENT_TXTS_MASK GENMASK(15, 0)
static inline u32 m_can_read(struct m_can_classdev *cdev, enum m_can_reg reg)
@@ -449,8 +424,8 @@ static void m_can_clean(struct net_device *net)
net->stats.tx_errors++;
if (cdev->version > 30)
- putidx = ((m_can_read(cdev, M_CAN_TXFQS) &
- TXFQS_TFQPI_MASK) >> TXFQS_TFQPI_SHIFT);
+ putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
can_free_echo_skb(cdev->net, putidx, NULL);
cdev->tx_skb = NULL;
@@ -490,7 +465,7 @@ static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
int i;
/* calculate the fifo get index for where to read data */
- fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_SHIFT;
+ fgi = FIELD_GET(RXFS_FGI_MASK, rxfs);
dlc = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_DLC);
if (dlc & RX_BUF_FDF)
skb = alloc_canfd_skb(dev, &cf);
@@ -663,8 +638,8 @@ static int __m_can_get_berr_counter(const struct net_device *dev,
unsigned int ecr;
ecr = m_can_read(cdev, M_CAN_ECR);
- bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
- bec->txerr = (ecr & ECR_TEC_MASK) >> ECR_TEC_SHIFT;
+ bec->rxerr = FIELD_GET(ECR_REC_MASK, ecr);
+ bec->txerr = FIELD_GET(ECR_TEC_MASK, ecr);
return 0;
}
@@ -1004,24 +979,23 @@ static void m_can_echo_tx_event(struct net_device *dev)
m_can_txefs = m_can_read(cdev, M_CAN_TXEFS);
/* Get Tx Event fifo element count */
- txe_count = (m_can_txefs & TXEFS_EFFL_MASK) >> TXEFS_EFFL_SHIFT;
+ txe_count = FIELD_GET(TXEFS_EFFL_MASK, m_can_txefs);
/* Get and process all sent elements */
for (i = 0; i < txe_count; i++) {
u32 txe, timestamp = 0;
/* retrieve get index */
- fgi = (m_can_read(cdev, M_CAN_TXEFS) & TXEFS_EFGI_MASK) >>
- TXEFS_EFGI_SHIFT;
+ fgi = FIELD_GET(TXEFS_EFGI_MASK, m_can_read(cdev, M_CAN_TXEFS));
/* get message marker, timestamp */
txe = m_can_txe_fifo_read(cdev, fgi, 4);
- msg_mark = (txe & TX_EVENT_MM_MASK) >> TX_EVENT_MM_SHIFT;
+ msg_mark = FIELD_GET(TX_EVENT_MM_MASK, txe);
timestamp = FIELD_GET(TX_EVENT_TXTS_MASK, txe);
/* ack txe element */
- m_can_write(cdev, M_CAN_TXEFA, (TXEFA_EFAI_MASK &
- (fgi << TXEFA_EFAI_SHIFT)));
+ m_can_write(cdev, M_CAN_TXEFA, FIELD_PREP(TXEFA_EFAI_MASK,
+ fgi));
/* update stats */
m_can_tx_update_stats(cdev, msg_mark, timestamp);
@@ -1147,8 +1121,10 @@ static int m_can_set_bittiming(struct net_device *dev)
sjw = bt->sjw - 1;
tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
tseg2 = bt->phase_seg2 - 1;
- reg_btp = (brp << NBTP_NBRP_SHIFT) | (sjw << NBTP_NSJW_SHIFT) |
- (tseg1 << NBTP_NTSEG1_SHIFT) | (tseg2 << NBTP_NTSEG2_SHIFT);
+ reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
+ FIELD_PREP(NBTP_NSJW_MASK, sjw) |
+ FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
+ FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
m_can_write(cdev, M_CAN_NBTP, reg_btp);
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
@@ -1185,13 +1161,13 @@ static int m_can_set_bittiming(struct net_device *dev)
reg_btp |= DBTP_TDC;
m_can_write(cdev, M_CAN_TDCR,
- tdco << TDCR_TDCO_SHIFT);
+ FIELD_PREP(TDCR_TDCO_MASK, tdco));
}
- reg_btp |= (brp << DBTP_DBRP_SHIFT) |
- (sjw << DBTP_DSJW_SHIFT) |
- (tseg1 << DBTP_DTSEG1_SHIFT) |
- (tseg2 << DBTP_DTSEG2_SHIFT);
+ reg_btp = FIELD_PREP(NBTP_NBRP_MASK, brp) |
+ FIELD_PREP(NBTP_NSJW_MASK, sjw) |
+ FIELD_PREP(NBTP_NTSEG1_MASK, tseg1) |
+ FIELD_PREP(NBTP_NTSEG2_MASK, tseg2);
m_can_write(cdev, M_CAN_DBTP, reg_btp);
}
@@ -1217,44 +1193,50 @@ static void m_can_chip_config(struct net_device *dev)
m_can_config_endisable(cdev, true);
/* RX Buffer/FIFO Element Size 64 bytes data field */
- m_can_write(cdev, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
+ m_can_write(cdev, M_CAN_RXESC,
+ FIELD_PREP(RXESC_RBDS_MASK, RXESC_64B) |
+ FIELD_PREP(RXESC_F1DS_MASK, RXESC_64B) |
+ FIELD_PREP(RXESC_F0DS_MASK, RXESC_64B));
/* Accept Non-matching Frames Into FIFO 0 */
m_can_write(cdev, M_CAN_GFC, 0x0);
if (cdev->version == 30) {
/* only support one Tx Buffer currently */
- m_can_write(cdev, M_CAN_TXBC, (1 << TXBC_NDTB_SHIFT) |
+ m_can_write(cdev, M_CAN_TXBC, FIELD_PREP(TXBC_NDTB_MASK, 1) |
cdev->mcfg[MRAM_TXB].off);
} else {
/* TX FIFO is used for newer IP Core versions */
m_can_write(cdev, M_CAN_TXBC,
- (cdev->mcfg[MRAM_TXB].num << TXBC_TFQS_SHIFT) |
- (cdev->mcfg[MRAM_TXB].off));
+ FIELD_PREP(TXBC_TFQS_MASK,
+ cdev->mcfg[MRAM_TXB].num) |
+ cdev->mcfg[MRAM_TXB].off);
}
/* support 64 bytes payload */
- m_can_write(cdev, M_CAN_TXESC, TXESC_TBDS_64BYTES);
+ m_can_write(cdev, M_CAN_TXESC,
+ FIELD_PREP(TXESC_TBDS_MASK, TXESC_TBDS_64B));
/* TX Event FIFO */
if (cdev->version == 30) {
- m_can_write(cdev, M_CAN_TXEFC, (1 << TXEFC_EFS_SHIFT) |
+ m_can_write(cdev, M_CAN_TXEFC,
+ FIELD_PREP(TXEFC_EFS_MASK, 1) |
cdev->mcfg[MRAM_TXE].off);
} else {
/* Full TX Event FIFO is used */
m_can_write(cdev, M_CAN_TXEFC,
- ((cdev->mcfg[MRAM_TXE].num << TXEFC_EFS_SHIFT)
- & TXEFC_EFS_MASK) |
+ FIELD_PREP(TXEFC_EFS_MASK,
+ cdev->mcfg[MRAM_TXE].num) |
cdev->mcfg[MRAM_TXE].off);
}
/* rx fifo configuration, blocking mode, fifo size 1 */
m_can_write(cdev, M_CAN_RXF0C,
- (cdev->mcfg[MRAM_RXF0].num << RXFC_FS_SHIFT) |
+ FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF0].num) |
cdev->mcfg[MRAM_RXF0].off);
m_can_write(cdev, M_CAN_RXF1C,
- (cdev->mcfg[MRAM_RXF1].num << RXFC_FS_SHIFT) |
+ FIELD_PREP(RXFC_FS_MASK, cdev->mcfg[MRAM_RXF1].num) |
cdev->mcfg[MRAM_RXF1].off);
cccr = m_can_read(cdev, M_CAN_CCCR);
@@ -1264,11 +1246,11 @@ static void m_can_chip_config(struct net_device *dev)
/* Version 3.0.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_DAR |
- (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
- (CCCR_CME_MASK << CCCR_CME_SHIFT));
+ FIELD_PREP(CCCR_CMR_MASK, FIELD_MAX(CCCR_CMR_MASK)) |
+ FIELD_PREP(CCCR_CME_MASK, FIELD_MAX(CCCR_CME_MASK)));
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD)
- cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CME_MASK, CCCR_CME_CANFD_BRS);
} else {
/* Version 3.1.x or 3.2.x */
@@ -1372,8 +1354,8 @@ static int m_can_check_core_release(struct m_can_classdev *cdev)
* Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
*/
crel_reg = m_can_read(cdev, M_CAN_CREL);
- rel = (u8)((crel_reg & CREL_REL_MASK) >> CREL_REL_SHIFT);
- step = (u8)((crel_reg & CREL_STEP_MASK) >> CREL_STEP_SHIFT);
+ rel = (u8)FIELD_GET(CREL_REL_MASK, crel_reg);
+ step = (u8)FIELD_GET(CREL_STEP_MASK, crel_reg);
if (rel == 3) {
/* M_CAN v3.x.y: create return value */
@@ -1593,16 +1575,16 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
if (cdev->can.ctrlmode & CAN_CTRLMODE_FD) {
cccr = m_can_read(cdev, M_CAN_CCCR);
- cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
+ cccr &= ~CCCR_CMR_MASK;
if (can_is_canfd_skb(skb)) {
if (cf->flags & CANFD_BRS)
- cccr |= CCCR_CMR_CANFD_BRS <<
- CCCR_CMR_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CMR_MASK,
+ CCCR_CMR_CANFD_BRS);
else
- cccr |= CCCR_CMR_CANFD <<
- CCCR_CMR_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CMR_MASK,
+ CCCR_CMR_CANFD);
} else {
- cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
+ cccr |= FIELD_PREP(CCCR_CMR_MASK, CCCR_CMR_CAN);
}
m_can_write(cdev, M_CAN_CCCR, cccr);
}
@@ -1629,8 +1611,8 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
}
/* get put index for frame */
- putidx = ((m_can_read(cdev, M_CAN_TXFQS) & TXFQS_TFQPI_MASK)
- >> TXFQS_TFQPI_SHIFT);
+ putidx = FIELD_GET(TXFQS_TFQPI_MASK,
+ m_can_read(cdev, M_CAN_TXFQS));
/* Write ID Field to FIFO Element */
m_can_fifo_write(cdev, putidx, M_CAN_FIFO_ID, id);
@@ -1648,9 +1630,9 @@ static netdev_tx_t m_can_tx_handler(struct m_can_classdev *cdev)
* sending the correct echo frame
*/
m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
- ((putidx << TX_BUF_MM_SHIFT) &
- TX_BUF_MM_MASK) |
- (can_fd_len2dlc(cf->len) << 16) |
+ FIELD_PREP(TX_BUF_MM_MASK, putidx) |
+ FIELD_PREP(TX_BUF_DLC_MASK,
+ can_fd_len2dlc(cf->len)) |
fdflags | TX_BUF_EFC);
for (i = 0; i < cf->len; i += 4)
@@ -1810,11 +1792,11 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_RXF0].off = cdev->mcfg[MRAM_XIDF].off +
cdev->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXF0].num = mram_config_vals[3] &
- (RXFC_FS_MASK >> RXFC_FS_SHIFT);
+ FIELD_MAX(RXFC_FS_MASK);
cdev->mcfg[MRAM_RXF1].off = cdev->mcfg[MRAM_RXF0].off +
cdev->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXF1].num = mram_config_vals[4] &
- (RXFC_FS_MASK >> RXFC_FS_SHIFT);
+ FIELD_MAX(RXFC_FS_MASK);
cdev->mcfg[MRAM_RXB].off = cdev->mcfg[MRAM_RXF1].off +
cdev->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
cdev->mcfg[MRAM_RXB].num = mram_config_vals[5];
@@ -1824,7 +1806,7 @@ static void m_can_of_parse_mram(struct m_can_classdev *cdev,
cdev->mcfg[MRAM_TXB].off = cdev->mcfg[MRAM_TXE].off +
cdev->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
cdev->mcfg[MRAM_TXB].num = mram_config_vals[7] &
- (TXBC_NDTB_MASK >> TXBC_NDTB_SHIFT);
+ FIELD_MAX(TXBC_NDTB_MASK);
dev_dbg(cdev->dev,
"sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index c44f3411e561..cfc1325aad10 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -239,7 +239,6 @@ static int softing_handle_1(struct softing *card)
DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]);
/* timestamp */
tmp_u32 = le32_to_cpup((void *)ptr);
- ptr += 4;
ktime = softing_raw2ktime(card, tmp_u32);
++netdev->stats.rx_errors;
@@ -276,7 +275,6 @@ static int softing_handle_1(struct softing *card)
ktime = softing_raw2ktime(card, tmp_u32);
if (!(msg.can_id & CAN_RTR_FLAG))
memcpy(&msg.data[0], ptr, 8);
- ptr += 8;
/* update socket */
if (cmd & CMD_ACK) {
/* acknowledge, was tx msg */
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index 6f5d6d04a8b9..dd17b8c53e1c 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -871,7 +871,7 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_BERR_REPORTING;
if (of_id)
- priv->model = (enum hi3110_model)of_id->data;
+ priv->model = (enum hi3110_model)(uintptr_t)of_id->data;
else
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 173c6614086f..0579ab74f728 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1330,7 +1330,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
if (match)
- priv->model = (enum mcp251x_model)match;
+ priv->model = (enum mcp251x_model)(uintptr_t)match;
else
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index e0ae00e34c7b..47c3f408a799 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -560,7 +560,7 @@ mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
return __mcp251xfd_chip_set_mode(priv, mode_req, false);
}
-static inline int
+static inline int __maybe_unused
mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
const u8 mode_req)
{
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 3deb9f1cd292..f959215c9d53 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -76,7 +76,9 @@ config CAN_KVASER_USB
- Scania VCI2 (if you have the Kvaser logo on top)
- Kvaser BlackBird v2
- Kvaser Leaf Pro HS v2
+ - Kvaser Hybrid CAN/LIN
- Kvaser Hybrid 2xCAN/LIN
+ - Kvaser Hybrid Pro CAN/LIN
- Kvaser Hybrid Pro 2xCAN/LIN
- Kvaser Memorator 2xHS v2
- Kvaser Memorator Pro 2xHS v2
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 90ebcae13409..0cc0fc866a2a 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -79,16 +79,18 @@
#define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 264
#define USB_MEMO_2HS_PRODUCT_ID 265
#define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 266
-#define USB_HYBRID_CANLIN_PRODUCT_ID 267
+#define USB_HYBRID_2CANLIN_PRODUCT_ID 267
#define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 268
#define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 269
-#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 270
+#define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 270
#define USB_U100_PRODUCT_ID 273
#define USB_U100P_PRODUCT_ID 274
#define USB_U100S_PRODUCT_ID 275
#define USB_USBCAN_PRO_4HS_PRODUCT_ID 276
+#define USB_HYBRID_CANLIN_PRODUCT_ID 277
+#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 278
#define USB_HYDRA_PRODUCT_ID_END \
- USB_USBCAN_PRO_4HS_PRODUCT_ID
+ USB_HYBRID_PRO_CANLIN_PRODUCT_ID
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
@@ -187,14 +189,16 @@ static const struct usb_device_id kvaser_usb_table[] = {
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID) },
- { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID) },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 3ca6b394dd5f..6e199454e41d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1084,6 +1084,11 @@ static int b53_setup(struct dsa_switch *ds)
unsigned int port;
int ret;
+ /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
+ * which forces the CPU port to be tagged in all VLANs.
+ */
+ ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+
ret = b53_reset_switch(dev);
if (ret) {
dev_err(ds->dev, "failed to reset switch\n");
@@ -1455,6 +1460,13 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
+static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+
+ return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
+}
+
int b53_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
@@ -1477,7 +1489,7 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
untagged = true;
vl->members |= BIT(port);
- if (untagged && !dsa_is_cpu_port(ds, port))
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag |= BIT(port);
else
vl->untag &= ~BIT(port);
@@ -1514,7 +1526,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
if (pvid == vlan->vid)
pvid = b53_default_pvid(dev);
- if (untagged && !dsa_is_cpu_port(ds, port))
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
vl->untag &= ~(BIT(port));
b53_set_vlan_entry(dev, vlan->vid, vl);
@@ -2660,7 +2672,6 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
- ds->untag_bridge_pvid = true;
dev->vlan_enabled = true;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index aaa12d73784e..3f4249de70c5 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -632,8 +632,7 @@ static int b53_srab_remove(struct platform_device *pdev)
struct b53_srab_priv *priv = dev->priv;
b53_srab_intr_set(priv, false);
- if (dev)
- b53_switch_remove(dev);
+ b53_switch_remove(dev);
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 9150038b60cb..3b018fcf4412 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -821,11 +821,9 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl;
+ u32 reg_rgmii_ctrl = 0;
u32 reg, offset;
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
-
if (priv->type == BCM4908_DEVICE_ID ||
priv->type == BCM7445_DEVICE_ID)
offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
@@ -836,6 +834,7 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
interface == PHY_INTERFACE_MODE_RGMII_TXID ||
interface == PHY_INTERFACE_MODE_MII ||
interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
reg = reg_readl(priv, reg_rgmii_ctrl);
reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 4d78219da253..9fdcc4bde480 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -927,7 +927,6 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
/* Read table */
for (i = 0; i < hellcreek->fdb_entries; ++i) {
- unsigned char null_addr[ETH_ALEN] = { 0 };
struct hellcreek_fdb_entry entry = { 0 };
/* Read entry */
@@ -937,7 +936,7 @@ static int hellcreek_fdb_dump(struct dsa_switch *ds, int port,
hellcreek_write(hellcreek, 0x00, HR_FDBRDH);
/* Check valid */
- if (!memcmp(entry.mac, null_addr, ETH_ALEN))
+ if (is_zero_ether_addr(entry.mac))
continue;
/* Check port mask */
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index ad509a57a945..560f6843bb65 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -6,6 +6,7 @@
* Tristram Ha <Tristram.Ha@microchip.com>
*/
+#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
@@ -15,8 +16,10 @@
#include <linux/phy.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
+#include <linux/micrel_phy.h>
#include <net/dsa.h>
#include <net/switchdev.h>
+#include <linux/phylink.h>
#include "ksz_common.h"
#include "ksz8795_reg.h"
@@ -727,92 +730,114 @@ static void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
u8 restart, speed, ctrl, link;
const u8 *regs = ksz8->regs;
int processed = true;
+ u8 val1, val2;
u16 data = 0;
u8 p = phy;
switch (reg) {
- case PHY_REG_CTRL:
+ case MII_BMCR:
ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
if (restart & PORT_PHY_LOOPBACK)
- data |= PHY_LOOPBACK;
+ data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
- data |= PHY_SPEED_100MBIT;
+ data |= BMCR_SPEED100;
if (ksz_is_ksz88x3(dev)) {
if ((ctrl & PORT_AUTO_NEG_ENABLE))
- data |= PHY_AUTO_NEG_ENABLE;
+ data |= BMCR_ANENABLE;
} else {
if (!(ctrl & PORT_AUTO_NEG_DISABLE))
- data |= PHY_AUTO_NEG_ENABLE;
+ data |= BMCR_ANENABLE;
}
if (restart & PORT_POWER_DOWN)
- data |= PHY_POWER_DOWN;
+ data |= BMCR_PDOWN;
if (restart & PORT_AUTO_NEG_RESTART)
- data |= PHY_AUTO_NEG_RESTART;
+ data |= BMCR_ANRESTART;
if (ctrl & PORT_FORCE_FULL_DUPLEX)
- data |= PHY_FULL_DUPLEX;
+ data |= BMCR_FULLDPLX;
if (speed & PORT_HP_MDIX)
- data |= PHY_HP_MDIX;
+ data |= KSZ886X_BMCR_HP_MDIX;
if (restart & PORT_FORCE_MDIX)
- data |= PHY_FORCE_MDIX;
+ data |= KSZ886X_BMCR_FORCE_MDI;
if (restart & PORT_AUTO_MDIX_DISABLE)
- data |= PHY_AUTO_MDIX_DISABLE;
+ data |= KSZ886X_BMCR_DISABLE_AUTO_MDIX;
if (restart & PORT_TX_DISABLE)
- data |= PHY_TRANSMIT_DISABLE;
+ data |= KSZ886X_BMCR_DISABLE_TRANSMIT;
if (restart & PORT_LED_OFF)
- data |= PHY_LED_DISABLE;
+ data |= KSZ886X_BMCR_DISABLE_LED;
break;
- case PHY_REG_STATUS:
+ case MII_BMSR:
ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
- data = PHY_100BTX_FD_CAPABLE |
- PHY_100BTX_CAPABLE |
- PHY_10BT_FD_CAPABLE |
- PHY_10BT_CAPABLE |
- PHY_AUTO_NEG_CAPABLE;
+ data = BMSR_100FULL |
+ BMSR_100HALF |
+ BMSR_10FULL |
+ BMSR_10HALF |
+ BMSR_ANEGCAPABLE;
if (link & PORT_AUTO_NEG_COMPLETE)
- data |= PHY_AUTO_NEG_ACKNOWLEDGE;
+ data |= BMSR_ANEGCOMPLETE;
if (link & PORT_STAT_LINK_GOOD)
- data |= PHY_LINK_STATUS;
+ data |= BMSR_LSTATUS;
break;
- case PHY_REG_ID_1:
+ case MII_PHYSID1:
data = KSZ8795_ID_HI;
break;
- case PHY_REG_ID_2:
+ case MII_PHYSID2:
if (ksz_is_ksz88x3(dev))
data = KSZ8863_ID_LO;
else
data = KSZ8795_ID_LO;
break;
- case PHY_REG_AUTO_NEGOTIATION:
+ case MII_ADVERTISE:
ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
- data = PHY_AUTO_NEG_802_3;
+ data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
- data |= PHY_AUTO_NEG_SYM_PAUSE;
+ data |= ADVERTISE_PAUSE_CAP;
if (ctrl & PORT_AUTO_NEG_100BTX_FD)
- data |= PHY_AUTO_NEG_100BTX_FD;
+ data |= ADVERTISE_100FULL;
if (ctrl & PORT_AUTO_NEG_100BTX)
- data |= PHY_AUTO_NEG_100BTX;
+ data |= ADVERTISE_100HALF;
if (ctrl & PORT_AUTO_NEG_10BT_FD)
- data |= PHY_AUTO_NEG_10BT_FD;
+ data |= ADVERTISE_10FULL;
if (ctrl & PORT_AUTO_NEG_10BT)
- data |= PHY_AUTO_NEG_10BT;
+ data |= ADVERTISE_10HALF;
break;
- case PHY_REG_REMOTE_CAPABILITY:
+ case MII_LPA:
ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
- data = PHY_AUTO_NEG_802_3;
+ data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
- data |= PHY_AUTO_NEG_SYM_PAUSE;
+ data |= LPA_PAUSE_CAP;
if (link & PORT_REMOTE_100BTX_FD)
- data |= PHY_AUTO_NEG_100BTX_FD;
+ data |= LPA_100FULL;
if (link & PORT_REMOTE_100BTX)
- data |= PHY_AUTO_NEG_100BTX;
+ data |= LPA_100HALF;
if (link & PORT_REMOTE_10BT_FD)
- data |= PHY_AUTO_NEG_10BT_FD;
+ data |= LPA_10FULL;
if (link & PORT_REMOTE_10BT)
- data |= PHY_AUTO_NEG_10BT;
- if (data & ~PHY_AUTO_NEG_802_3)
- data |= PHY_REMOTE_ACKNOWLEDGE_NOT;
+ data |= LPA_10HALF;
+ if (data & ~LPA_SLCT)
+ data |= LPA_LPACK;
+ break;
+ case PHY_REG_LINK_MD:
+ ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (val1 & PORT_START_CABLE_DIAG)
+ data |= PHY_START_CABLE_DIAG;
+
+ if (val1 & PORT_CABLE_10M_SHORT)
+ data |= PHY_CABLE_10M_SHORT;
+
+ data |= FIELD_PREP(PHY_CABLE_DIAG_RESULT_M,
+ FIELD_GET(PORT_CABLE_DIAG_RESULT_M, val1));
+
+ data |= FIELD_PREP(PHY_CABLE_FAULT_COUNTER_M,
+ (FIELD_GET(PORT_CABLE_FAULT_COUNTER_H, val1) << 8) |
+ FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
+ break;
+ case PHY_REG_PHY_CTRL:
+ ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (link & PORT_MDIX_STATUS)
+ data |= KSZ886X_CTRL_MDIX_STAT;
break;
default:
processed = false;
@@ -830,14 +855,14 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
u8 p = phy;
switch (reg) {
- case PHY_REG_CTRL:
+ case MII_BMCR:
/* Do not support PHY reset function. */
- if (val & PHY_RESET)
+ if (val & BMCR_RESET)
break;
ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
data = speed;
- if (val & PHY_HP_MDIX)
+ if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
@@ -846,12 +871,12 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
- if ((val & PHY_AUTO_NEG_ENABLE))
+ if ((val & BMCR_ANENABLE))
data |= PORT_AUTO_NEG_ENABLE;
else
data &= ~PORT_AUTO_NEG_ENABLE;
} else {
- if (!(val & PHY_AUTO_NEG_ENABLE))
+ if (!(val & BMCR_ANENABLE))
data |= PORT_AUTO_NEG_DISABLE;
else
data &= ~PORT_AUTO_NEG_DISABLE;
@@ -861,11 +886,11 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_AUTO_NEG_DISABLE;
}
- if (val & PHY_SPEED_100MBIT)
+ if (val & BMCR_SPEED100)
data |= PORT_FORCE_100_MBIT;
else
data &= ~PORT_FORCE_100_MBIT;
- if (val & PHY_FULL_DUPLEX)
+ if (val & BMCR_FULLDPLX)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
@@ -873,38 +898,38 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
data = restart;
- if (val & PHY_LED_DISABLE)
+ if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
else
data &= ~PORT_LED_OFF;
- if (val & PHY_TRANSMIT_DISABLE)
+ if (val & KSZ886X_BMCR_DISABLE_TRANSMIT)
data |= PORT_TX_DISABLE;
else
data &= ~PORT_TX_DISABLE;
- if (val & PHY_AUTO_NEG_RESTART)
+ if (val & BMCR_ANRESTART)
data |= PORT_AUTO_NEG_RESTART;
else
data &= ~(PORT_AUTO_NEG_RESTART);
- if (val & PHY_POWER_DOWN)
+ if (val & BMCR_PDOWN)
data |= PORT_POWER_DOWN;
else
data &= ~PORT_POWER_DOWN;
- if (val & PHY_AUTO_MDIX_DISABLE)
+ if (val & KSZ886X_BMCR_DISABLE_AUTO_MDIX)
data |= PORT_AUTO_MDIX_DISABLE;
else
data &= ~PORT_AUTO_MDIX_DISABLE;
- if (val & PHY_FORCE_MDIX)
+ if (val & KSZ886X_BMCR_FORCE_MDI)
data |= PORT_FORCE_MDIX;
else
data &= ~PORT_FORCE_MDIX;
- if (val & PHY_LOOPBACK)
+ if (val & BMCR_LOOPBACK)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
if (data != restart)
ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
break;
- case PHY_REG_AUTO_NEGOTIATION:
+ case MII_ADVERTISE:
ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
@@ -912,19 +937,23 @@ static void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
PORT_AUTO_NEG_100BTX |
PORT_AUTO_NEG_10BT_FD |
PORT_AUTO_NEG_10BT);
- if (val & PHY_AUTO_NEG_SYM_PAUSE)
+ if (val & ADVERTISE_PAUSE_CAP)
data |= PORT_AUTO_NEG_SYM_PAUSE;
- if (val & PHY_AUTO_NEG_100BTX_FD)
+ if (val & ADVERTISE_100FULL)
data |= PORT_AUTO_NEG_100BTX_FD;
- if (val & PHY_AUTO_NEG_100BTX)
+ if (val & ADVERTISE_100HALF)
data |= PORT_AUTO_NEG_100BTX;
- if (val & PHY_AUTO_NEG_10BT_FD)
+ if (val & ADVERTISE_10FULL)
data |= PORT_AUTO_NEG_10BT_FD;
- if (val & PHY_AUTO_NEG_10BT)
+ if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
if (data != ctrl)
ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
break;
+ case PHY_REG_LINK_MD:
+ if (val & PHY_START_CABLE_DIAG)
+ ksz_port_cfg(dev, p, REG_PORT_LINK_MD_CTRL, PORT_START_CABLE_DIAG, true);
+ break;
default:
break;
}
@@ -941,6 +970,18 @@ static enum dsa_tag_protocol ksz8_get_tag_protocol(struct dsa_switch *ds,
DSA_TAG_PROTO_KSZ9893 : DSA_TAG_PROTO_KSZ8795;
}
+static u32 ksz8_sw_get_phy_flags(struct dsa_switch *ds, int port)
+{
+ /* Silicon Errata Sheet (DS80000830A):
+ * Port 1 does not work with LinkMD Cable-Testing.
+ * Port 1 does not respond to received PAUSE control frames.
+ */
+ if (!port)
+ return MICREL_KSZ8_P1_ERRATA;
+
+ return 0;
+}
+
static void ksz8_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *buf)
{
@@ -1419,11 +1460,66 @@ static int ksz8_setup(struct dsa_switch *ds)
return 0;
}
+static void ksz8_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct ksz_device *dev = ds->priv;
+
+ if (port == dev->cpu_port) {
+ if (state->interface != PHY_INTERFACE_MODE_RMII &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_NA)
+ goto unsupported;
+ } else {
+ if (state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_NA)
+ goto unsupported;
+ }
+
+ /* Allow all the expected bits */
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+
+ /* Silicon Errata Sheet (DS80000830A):
+ * "Port 1 does not respond to received flow control PAUSE frames"
+ * So, disable Pause support on "Port 1" (port == 0) for all ksz88x3
+ * switches.
+ */
+ if (!ksz_is_ksz88x3(dev) || port)
+ phylink_set(mask, Pause);
+
+ /* Asym pause is not supported on KSZ8863 and KSZ8873 */
+ if (!ksz_is_ksz88x3(dev))
+ phylink_set(mask, Asym_Pause);
+
+ /* 10M and 100M are only supported */
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ return;
+
+unsupported:
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
+ phy_modes(state->interface), port);
+}
+
static const struct dsa_switch_ops ksz8_switch_ops = {
.get_tag_protocol = ksz8_get_tag_protocol,
+ .get_phy_flags = ksz8_sw_get_phy_flags,
.setup = ksz8_setup,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
+ .phylink_validate = ksz8_validate,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
.get_strings = ksz8_get_strings,
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index c2e52c40a54c..a32355624f31 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -249,7 +249,7 @@
#define REG_PORT_4_LINK_MD_CTRL 0x4A
#define PORT_CABLE_10M_SHORT BIT(7)
-#define PORT_CABLE_DIAG_RESULT_M 0x3
+#define PORT_CABLE_DIAG_RESULT_M GENMASK(6, 5)
#define PORT_CABLE_DIAG_RESULT_S 5
#define PORT_CABLE_STAT_NORMAL 0
#define PORT_CABLE_STAT_OPEN 1
@@ -744,68 +744,6 @@
#define PORT_ACL_FORCE_DLR_MISS BIT(0)
-#ifndef PHY_REG_CTRL
-#define PHY_REG_CTRL 0
-
-#define PHY_RESET BIT(15)
-#define PHY_LOOPBACK BIT(14)
-#define PHY_SPEED_100MBIT BIT(13)
-#define PHY_AUTO_NEG_ENABLE BIT(12)
-#define PHY_POWER_DOWN BIT(11)
-#define PHY_MII_DISABLE BIT(10)
-#define PHY_AUTO_NEG_RESTART BIT(9)
-#define PHY_FULL_DUPLEX BIT(8)
-#define PHY_COLLISION_TEST_NOT BIT(7)
-#define PHY_HP_MDIX BIT(5)
-#define PHY_FORCE_MDIX BIT(4)
-#define PHY_AUTO_MDIX_DISABLE BIT(3)
-#define PHY_REMOTE_FAULT_DISABLE BIT(2)
-#define PHY_TRANSMIT_DISABLE BIT(1)
-#define PHY_LED_DISABLE BIT(0)
-
-#define PHY_REG_STATUS 1
-
-#define PHY_100BT4_CAPABLE BIT(15)
-#define PHY_100BTX_FD_CAPABLE BIT(14)
-#define PHY_100BTX_CAPABLE BIT(13)
-#define PHY_10BT_FD_CAPABLE BIT(12)
-#define PHY_10BT_CAPABLE BIT(11)
-#define PHY_MII_SUPPRESS_CAPABLE_NOT BIT(6)
-#define PHY_AUTO_NEG_ACKNOWLEDGE BIT(5)
-#define PHY_REMOTE_FAULT BIT(4)
-#define PHY_AUTO_NEG_CAPABLE BIT(3)
-#define PHY_LINK_STATUS BIT(2)
-#define PHY_JABBER_DETECT_NOT BIT(1)
-#define PHY_EXTENDED_CAPABILITY BIT(0)
-
-#define PHY_REG_ID_1 2
-#define PHY_REG_ID_2 3
-
-#define PHY_REG_AUTO_NEGOTIATION 4
-
-#define PHY_AUTO_NEG_NEXT_PAGE_NOT BIT(15)
-#define PHY_AUTO_NEG_REMOTE_FAULT_NOT BIT(13)
-#define PHY_AUTO_NEG_SYM_PAUSE BIT(10)
-#define PHY_AUTO_NEG_100BT4 BIT(9)
-#define PHY_AUTO_NEG_100BTX_FD BIT(8)
-#define PHY_AUTO_NEG_100BTX BIT(7)
-#define PHY_AUTO_NEG_10BT_FD BIT(6)
-#define PHY_AUTO_NEG_10BT BIT(5)
-#define PHY_AUTO_NEG_SELECTOR 0x001F
-#define PHY_AUTO_NEG_802_3 0x0001
-
-#define PHY_REG_REMOTE_CAPABILITY 5
-
-#define PHY_REMOTE_NEXT_PAGE_NOT BIT(15)
-#define PHY_REMOTE_ACKNOWLEDGE_NOT BIT(14)
-#define PHY_REMOTE_REMOTE_FAULT_NOT BIT(13)
-#define PHY_REMOTE_SYM_PAUSE BIT(10)
-#define PHY_REMOTE_100BTX_FD BIT(8)
-#define PHY_REMOTE_100BTX BIT(7)
-#define PHY_REMOTE_10BT_FD BIT(6)
-#define PHY_REMOTE_10BT BIT(5)
-#endif
-
#define KSZ8795_ID_HI 0x0022
#define KSZ8795_ID_LO 0x1550
#define KSZ8863_ID_LO 0x1430
@@ -815,13 +753,14 @@
#define PHY_REG_LINK_MD 0x1D
#define PHY_START_CABLE_DIAG BIT(15)
+#define PHY_CABLE_DIAG_RESULT_M GENMASK(14, 13)
#define PHY_CABLE_DIAG_RESULT 0x6000
#define PHY_CABLE_STAT_NORMAL 0x0000
#define PHY_CABLE_STAT_OPEN 0x2000
#define PHY_CABLE_STAT_SHORT 0x4000
#define PHY_CABLE_STAT_FAILED 0x6000
#define PHY_CABLE_10M_SHORT BIT(12)
-#define PHY_CABLE_FAULT_COUNTER 0x01FF
+#define PHY_CABLE_FAULT_COUNTER_M GENMASK(8, 0)
#define PHY_REG_PHY_CTRL 0x1F
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 55e5d479acce..854e25f43fa7 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1530,6 +1530,7 @@ static const struct ksz_chip_data ksz9477_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .phy_errata_9477 = true,
},
};
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index db838343fb05..93136f7e69f5 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1273,14 +1273,6 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- /* The real fabric path would be decided on the membership in the
- * entry of VLAN table. PCR_MATRIX set up here with ALL_MEMBERS
- * means potential VLAN can be consisting of certain subset of all
- * ports.
- */
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
-
/* Trapped into security mode allows packet forwarding through VLAN
* table lookup. CPU port is set to fallback mode to let untagged
* frames pass through.
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index ce607fbaaa3a..a2a15919b960 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -940,6 +940,8 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
+
/* Undo the effects of felix_phylink_mac_link_down:
* enable MAC module
*/
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 2473bebe48e6..f966a253d1c7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1227,12 +1227,17 @@ static int vsc9959_qos_port_tas_set(struct ocelot *ocelot, int port,
if (taprio->num_entries > VSC9959_TAS_GCL_ENTRY_MAX)
return -ERANGE;
- /* Set port num and disable ALWAYS_GUARD_BAND_SCH_Q, which means set
- * guard band to be implemented for nonschedule queues to schedule
- * queues transition.
+ /* Enable guard band. The switch will schedule frames without taking
+ * their length into account. Thus we'll always need to enable the
+ * guard band which reserves the time of a maximum sized frame at the
+ * end of the time window.
+ *
+ * Although the ALWAYS_GUARD_BAND_SCH_Q bit is global for all ports, we
+ * need to set PORT_NUM, because subsequent writes to PARAM_CFG_REG_n
+ * operate on the port number.
*/
- ocelot_rmw(ocelot,
- QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port),
+ ocelot_rmw(ocelot, QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(port) |
+ QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M |
QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q,
QSYS_TAS_PARAM_CFG_CTRL);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 84f93a874d50..deae923c8b7a 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1206,6 +1206,11 @@ static int seville_probe(struct platform_device *pdev)
felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Invalid resource\n");
+ goto err_alloc_felix;
+ }
felix->switch_base = res->start;
ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 1f1b7c4dda13..1f63f50f73f1 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -89,26 +89,26 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
*page = regaddr & 0x3ff;
}
-static u32
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum)
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
{
- u32 val;
int ret;
ret = bus->read(bus, phy_id, regnum);
if (ret >= 0) {
- val = ret;
+ *val = ret;
ret = bus->read(bus, phy_id, regnum + 1);
- val |= ret << 16;
+ *val |= ret << 16;
}
if (ret < 0) {
dev_err_ratelimited(&bus->dev,
"failed to read qca8k 32bit register\n");
+ *val = 0;
return ret;
}
- return val;
+ return 0;
}
static void
@@ -148,26 +148,26 @@ qca8k_set_page(struct mii_bus *bus, u16 page)
return 0;
}
-static u32
-qca8k_read(struct qca8k_priv *priv, u32 reg)
+static int
+qca8k_read(struct qca8k_priv *priv, u32 reg, u32 *val)
{
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
- u32 val;
+ int ret;
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
- val = qca8k_set_page(bus, page);
- if (val < 0)
+ ret = qca8k_set_page(bus, page);
+ if (ret < 0)
goto exit;
- val = qca8k_mii_read32(bus, 0x10 | r2, r1);
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, val);
exit:
mutex_unlock(&bus->mdio_lock);
- return val;
+ return ret;
}
static int
@@ -208,11 +208,9 @@ qca8k_rmw(struct qca8k_priv *priv, u32 reg, u32 mask, u32 write_val)
if (ret < 0)
goto exit;
- val = qca8k_mii_read32(bus, 0x10 | r2, r1);
- if (val < 0) {
- ret = val;
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+ if (ret < 0)
goto exit;
- }
val &= ~mask;
val |= write_val;
@@ -240,15 +238,8 @@ static int
qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
- int ret;
-
- ret = qca8k_read(priv, reg);
- if (ret < 0)
- return ret;
-
- *val = ret;
- return 0;
+ return qca8k_read(priv, reg, val);
}
static int
@@ -296,18 +287,18 @@ static struct regmap_config qca8k_regmap_config = {
static int
qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
+ int ret, ret1;
u32 val;
- int ret;
- ret = read_poll_timeout(qca8k_read, val, !(val & mask),
+ ret = read_poll_timeout(qca8k_read, ret1, !(val & mask),
0, QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- priv, reg);
+ priv, reg, &val);
/* Check if qca8k_read has failed for a different reason
* before returning -ETIMEDOUT
*/
- if (ret < 0 && val < 0)
- return val;
+ if (ret < 0 && ret1 < 0)
+ return ret1;
return ret;
}
@@ -316,13 +307,13 @@ static int
qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
u32 reg[4], val;
- int i;
+ int i, ret;
/* load the ARL table into an array */
for (i = 0; i < 4; i++) {
- val = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4));
- if (val < 0)
- return val;
+ ret = qca8k_read(priv, QCA8K_REG_ATU_DATA0 + (i * 4), &val);
+ if (ret < 0)
+ return ret;
reg[i] = val;
}
@@ -396,9 +387,9 @@ qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd, int port)
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_FDB_LOAD) {
- reg = qca8k_read(priv, QCA8K_REG_ATU_FUNC);
- if (reg < 0)
- return reg;
+ ret = qca8k_read(priv, QCA8K_REG_ATU_FUNC, &reg);
+ if (ret < 0)
+ return ret;
if (reg & QCA8K_ATU_FUNC_FULL)
return -1;
}
@@ -477,9 +468,9 @@ qca8k_vlan_access(struct qca8k_priv *priv, enum qca8k_vlan_cmd cmd, u16 vid)
/* Check for table full violation when adding an entry */
if (cmd == QCA8K_VLAN_LOAD) {
- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC1);
- if (reg < 0)
- return reg;
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC1, &reg);
+ if (ret < 0)
+ return ret;
if (reg & QCA8K_VTU_FUNC1_FULL)
return -ENOMEM;
}
@@ -505,11 +496,9 @@ qca8k_vlan_add(struct qca8k_priv *priv, u8 port, u16 vid, bool untagged)
if (ret < 0)
goto out;
- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
- if (reg < 0) {
- ret = reg;
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
goto out;
- }
reg |= QCA8K_VTU_FUNC0_VALID | QCA8K_VTU_FUNC0_IVL_EN;
reg &= ~(QCA8K_VTU_FUNC0_EG_MODE_MASK << QCA8K_VTU_FUNC0_EG_MODE_S(port));
if (untagged)
@@ -542,11 +531,9 @@ qca8k_vlan_del(struct qca8k_priv *priv, u8 port, u16 vid)
if (ret < 0)
goto out;
- reg = qca8k_read(priv, QCA8K_REG_VTU_FUNC0);
- if (reg < 0) {
- ret = reg;
+ ret = qca8k_read(priv, QCA8K_REG_VTU_FUNC0, &reg);
+ if (ret < 0)
goto out;
- }
reg &= ~(3 << QCA8K_VTU_FUNC0_EG_MODE_S(port));
reg |= QCA8K_VTU_FUNC0_EG_MODE_NOT <<
QCA8K_VTU_FUNC0_EG_MODE_S(port);
@@ -638,19 +625,19 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
{
u16 r1, r2, page;
u32 val;
- int ret;
+ int ret, ret1;
qca8k_split_addr(reg, &r1, &r2, &page);
- ret = read_poll_timeout(qca8k_mii_read32, val, !(val & mask), 0,
+ ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
- bus, 0x10 | r2, r1);
+ bus, 0x10 | r2, r1, &val);
/* Check if qca8k_read has failed for a different reason
* before returnting -ETIMEDOUT
*/
- if (ret < 0 && val < 0)
- return val;
+ if (ret < 0 && ret1 < 0)
+ return ret1;
return ret;
}
@@ -725,7 +712,7 @@ qca8k_mdio_read(struct mii_bus *salve_bus, int phy, int regnum)
if (ret)
goto exit;
- val = qca8k_mii_read32(bus, 0x10 | r2, r1);
+ ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
exit:
/* even if the busy_wait timeouts try to clear the MASTER_EN */
@@ -733,10 +720,10 @@ exit:
mutex_unlock(&bus->mdio_lock);
- if (val >= 0)
- val &= QCA8K_MDIO_MASTER_DATA_MASK;
+ if (ret >= 0)
+ ret = val & QCA8K_MDIO_MASTER_DATA_MASK;
- return val;
+ return ret;
}
static int
@@ -1141,6 +1128,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
{
struct qca8k_priv *priv = ds->priv;
u32 reg, val;
+ int ret;
switch (port) {
case 0: /* 1st CPU port */
@@ -1211,7 +1199,9 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
/* Enable/disable SerDes auto-negotiation as necessary */
- val = qca8k_read(priv, QCA8K_REG_PWS);
+ ret = qca8k_read(priv, QCA8K_REG_PWS, &val);
+ if (ret)
+ return;
if (phylink_autoneg_inband(mode))
val &= ~QCA8K_PWS_SERDES_AEN_DIS;
else
@@ -1219,7 +1209,9 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
qca8k_write(priv, QCA8K_REG_PWS, val);
/* Configure the SGMII parameters */
- val = qca8k_read(priv, QCA8K_REG_SGMII_CTRL);
+ ret = qca8k_read(priv, QCA8K_REG_SGMII_CTRL, &val);
+ if (ret)
+ return;
val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
@@ -1314,10 +1306,11 @@ qca8k_phylink_mac_link_state(struct dsa_switch *ds, int port,
{
struct qca8k_priv *priv = ds->priv;
u32 reg;
+ int ret;
- reg = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port));
- if (reg < 0)
- return reg;
+ ret = qca8k_read(priv, QCA8K_REG_PORT_STATUS(port), &reg);
+ if (ret < 0)
+ return ret;
state->link = !!(reg & QCA8K_PORT_STATUS_LINK_UP);
state->an_complete = state->link;
@@ -1419,25 +1412,26 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
const struct qca8k_mib_desc *mib;
u32 reg, i, val;
- u64 hi;
+ u32 hi = 0;
+ int ret;
for (i = 0; i < ARRAY_SIZE(ar8327_mib); i++) {
mib = &ar8327_mib[i];
reg = QCA8K_PORT_MIB_COUNTER(port) + mib->offset;
- val = qca8k_read(priv, reg);
- if (val < 0)
+ ret = qca8k_read(priv, reg, &val);
+ if (ret < 0)
continue;
if (mib->size == 2) {
- hi = qca8k_read(priv, reg + 4);
- if (hi < 0)
+ ret = qca8k_read(priv, reg + 4, &hi);
+ if (ret < 0)
continue;
}
data[i] = val;
if (mib->size == 2)
- data[i] |= hi << 32;
+ data[i] |= (u64)hi << 32;
}
}
@@ -1459,11 +1453,9 @@ qca8k_set_mac_eee(struct dsa_switch *ds, int port, struct ethtool_eee *eee)
int ret;
mutex_lock(&priv->reg_mutex);
- reg = qca8k_read(priv, QCA8K_REG_EEE_CTRL);
- if (reg < 0) {
- ret = reg;
+ ret = qca8k_read(priv, QCA8K_REG_EEE_CTRL, &reg);
+ if (ret < 0)
goto exit;
- }
if (eee->eee_enabled)
reg |= lpi_en;
@@ -1793,14 +1785,15 @@ static int qca8k_read_switch_id(struct qca8k_priv *priv)
const struct qca8k_match_data *data;
u32 val;
u8 id;
+ int ret;
/* get the switches ID from the compatible */
data = of_device_get_match_data(priv->dev);
if (!data)
return -ENODEV;
- val = qca8k_read(priv, QCA8K_REG_MASK_CTRL);
- if (val < 0)
+ ret = qca8k_read(priv, QCA8K_REG_MASK_CTRL, &val);
+ if (ret < 0)
return -ENODEV;
id = QCA8K_MASK_CTRL_DEVICE_ID(val & QCA8K_MASK_CTRL_DEVICE_ID_MASK);
diff --git a/drivers/net/dsa/sja1105/Kconfig b/drivers/net/dsa/sja1105/Kconfig
index 5e83b365f17a..8383cd6d2178 100644
--- a/drivers/net/dsa/sja1105/Kconfig
+++ b/drivers/net/dsa/sja1105/Kconfig
@@ -3,6 +3,7 @@ config NET_DSA_SJA1105
tristate "NXP SJA1105 Ethernet switch family support"
depends on NET_DSA && SPI
select NET_DSA_TAG_SJA1105
+ select PCS_XPCS
select PACKING
select CRC32
help
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index a860e3a910be..40d69e6c0bae 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_NET_DSA_SJA1105) += sja1105.o
sja1105-objs := \
sja1105_spi.o \
sja1105_main.o \
+ sja1105_mdio.o \
sja1105_flower.o \
sja1105_ethtool.o \
sja1105_devlink.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 10fc6b54f9f6..39124726bdd9 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -13,14 +13,12 @@
#include <linux/mutex.h>
#include "sja1105_static_config.h"
-#define SJA1105_NUM_PORTS 5
-#define SJA1105_NUM_TC 8
#define SJA1105ET_FDB_BIN_SIZE 4
/* The hardware value is in multiples of 10 ms.
* The passed parameter is in multiples of 1 ms.
*/
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
-#define SJA1105_NUM_L2_POLICERS 45
+#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
typedef enum {
SPI_READ = 0,
@@ -47,7 +45,6 @@ struct sja1105_regs {
u64 rgu;
u64 vl_status;
u64 config;
- u64 sgmii;
u64 rmii_pll1;
u64 ptppinst;
u64 ptppindur;
@@ -57,19 +54,41 @@ struct sja1105_regs {
u64 ptpclkcorp;
u64 ptpsyncts;
u64 ptpschtm;
- u64 ptpegr_ts[SJA1105_NUM_PORTS];
- u64 pad_mii_tx[SJA1105_NUM_PORTS];
- u64 pad_mii_rx[SJA1105_NUM_PORTS];
- u64 pad_mii_id[SJA1105_NUM_PORTS];
- u64 cgu_idiv[SJA1105_NUM_PORTS];
- u64 mii_tx_clk[SJA1105_NUM_PORTS];
- u64 mii_rx_clk[SJA1105_NUM_PORTS];
- u64 mii_ext_tx_clk[SJA1105_NUM_PORTS];
- u64 mii_ext_rx_clk[SJA1105_NUM_PORTS];
- u64 rgmii_tx_clk[SJA1105_NUM_PORTS];
- u64 rmii_ref_clk[SJA1105_NUM_PORTS];
- u64 rmii_ext_tx_clk[SJA1105_NUM_PORTS];
- u64 stats[__MAX_SJA1105_STATS_AREA][SJA1105_NUM_PORTS];
+ u64 ptpegr_ts[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_tx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_rx[SJA1105_MAX_NUM_PORTS];
+ u64 pad_mii_id[SJA1105_MAX_NUM_PORTS];
+ u64 cgu_idiv[SJA1105_MAX_NUM_PORTS];
+ u64 mii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 mii_ext_rx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rgmii_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ref_clk[SJA1105_MAX_NUM_PORTS];
+ u64 rmii_ext_tx_clk[SJA1105_MAX_NUM_PORTS];
+ u64 stats[__MAX_SJA1105_STATS_AREA][SJA1105_MAX_NUM_PORTS];
+ u64 mdio_100base_tx;
+ u64 mdio_100base_t1;
+ u64 pcs_base[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1105_mdio_private {
+ struct sja1105_private *priv;
+};
+
+enum {
+ SJA1105_SPEED_AUTO,
+ SJA1105_SPEED_10MBPS,
+ SJA1105_SPEED_100MBPS,
+ SJA1105_SPEED_1000MBPS,
+ SJA1105_SPEED_2500MBPS,
+ SJA1105_SPEED_MAX,
+};
+
+enum sja1105_internal_phy_t {
+ SJA1105_NO_PHY = 0,
+ SJA1105_PHY_BASE_TX,
+ SJA1105_PHY_BASE_T1,
};
struct sja1105_info {
@@ -89,6 +108,10 @@ struct sja1105_info {
*/
int ptpegr_ts_bytes;
int num_cbs_shapers;
+ int max_frame_mem;
+ int num_ports;
+ bool multiple_cascade_ports;
+ enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
const struct sja1105_regs *regs;
@@ -108,7 +131,19 @@ struct sja1105_info {
const unsigned char *addr, u16 vid);
void (*ptp_cmd_packing)(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op);
+ bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
+ int (*clocking_setup)(struct sja1105_private *priv);
+ int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
+ int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
const char *name;
+ bool supports_mii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_rgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_sgmii[SJA1105_MAX_NUM_PORTS];
+ bool supports_2500basex[SJA1105_MAX_NUM_PORTS];
+ enum sja1105_internal_phy_t internal_phy[SJA1105_MAX_NUM_PORTS];
+ const u64 port_speed[SJA1105_SPEED_MAX];
};
enum sja1105_key_type {
@@ -206,8 +241,10 @@ enum sja1105_vlan_state {
struct sja1105_private {
struct sja1105_static_config static_config;
- bool rgmii_rx_delay[SJA1105_NUM_PORTS];
- bool rgmii_tx_delay[SJA1105_NUM_PORTS];
+ bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
+ bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
+ phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
+ bool fixed_link[SJA1105_MAX_NUM_PORTS];
bool best_effort_vlan_filtering;
unsigned long learn_ena;
unsigned long ucast_egress_floods;
@@ -220,7 +257,7 @@ struct sja1105_private {
struct list_head dsa_8021q_vlans;
struct list_head bridge_vlans;
struct sja1105_flow_block flow_block;
- struct sja1105_port ports[SJA1105_NUM_PORTS];
+ struct sja1105_port ports[SJA1105_MAX_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
@@ -229,6 +266,10 @@ struct sja1105_private {
enum sja1105_vlan_state vlan_state;
struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
+ struct mii_bus *mdio_base_t1;
+ struct mii_bus *mdio_base_tx;
+ struct mii_bus *mdio_pcs;
+ struct dw_xpcs *xpcs[SJA1105_MAX_NUM_PORTS];
struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
struct sja1105_tas_data tas_data;
@@ -258,6 +299,14 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack);
void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+/* From sja1105_mdio.c */
+int sja1105_mdiobus_register(struct dsa_switch *ds);
+void sja1105_mdiobus_unregister(struct dsa_switch *ds);
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
void sja1105_devlink_teardown(struct dsa_switch *ds);
@@ -291,6 +340,10 @@ extern const struct sja1105_info sja1105p_info;
extern const struct sja1105_info sja1105q_info;
extern const struct sja1105_info sja1105r_info;
extern const struct sja1105_info sja1105s_info;
+extern const struct sja1105_info sja1110a_info;
+extern const struct sja1105_info sja1110b_info;
+extern const struct sja1105_info sja1110c_info;
+extern const struct sja1105_info sja1110d_info;
/* From sja1105_clocking.c */
@@ -306,16 +359,11 @@ typedef enum {
XMII_MODE_SGMII = 3,
} sja1105_phy_interface_t;
-typedef enum {
- SJA1105_SPEED_10MBPS = 3,
- SJA1105_SPEED_100MBPS = 2,
- SJA1105_SPEED_1000MBPS = 1,
- SJA1105_SPEED_AUTO = 0,
-} sja1105_speed_t;
-
int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port);
+int sja1110_setup_rgmii_delay(const void *ctx, int port);
int sja1105_clocking_setup_port(struct sja1105_private *priv, int port);
int sja1105_clocking_setup(struct sja1105_private *priv);
+int sja1110_clocking_setup(struct sja1105_private *priv);
/* From sja1105_ethtool.c */
void sja1105_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data);
@@ -336,6 +384,18 @@ enum sja1105_iotag {
SJA1105_S_TAG = 1, /* Outer VLAN header */
};
+enum sja1110_vlan_type {
+ SJA1110_VLAN_INVALID = 0,
+ SJA1110_VLAN_C_TAG = 1, /* Single inner VLAN tag */
+ SJA1110_VLAN_S_TAG = 2, /* Single outer VLAN tag */
+ SJA1110_VLAN_D_TAG = 3, /* Double tagged, use outer tag for lookup */
+};
+
+enum sja1110_shaper_type {
+ SJA1110_LEAKY_BUCKET_SHAPER = 0,
+ SJA1110_CBS_SHAPER = 1,
+};
+
u8 sja1105et_fdb_hash(struct sja1105_private *priv, const u8 *addr, u16 vid);
int sja1105et_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 2a9b8a6a5306..645edea5a81f 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -6,6 +6,7 @@
#include "sja1105.h"
#define SJA1105_SIZE_CGU_CMD 4
+#define SJA1110_BASE_TIMER_CLK SJA1110_CGU_ADDR(0x74)
/* Common structure for CFG_PAD_MIIx_RX and CFG_PAD_MIIx_TX */
struct sja1105_cfg_pad_mii {
@@ -61,6 +62,12 @@ struct sja1105_cgu_pll_ctrl {
u64 pd;
};
+struct sja1110_cgu_outclk {
+ u64 clksrc;
+ u64 autoblock;
+ u64 pd;
+};
+
enum {
CLKSRC_MII0_TX_CLK = 0x00,
CLKSRC_MII0_RX_CLK = 0x01,
@@ -110,6 +117,9 @@ static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
struct sja1105_cgu_idiv idiv;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->cgu_idiv[port] == SJA1105_RSV_ADDR)
+ return 0;
+
if (enabled && factor != 1 && factor != 10) {
dev_err(dev, "idiv factor must be 1 or 10\n");
return -ERANGE;
@@ -159,6 +169,9 @@ static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int clksrc;
+ if (regs->mii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
if (role == XMII_MAC)
clksrc = mac_clk_sources[port];
else
@@ -188,6 +201,9 @@ sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
CLKSRC_MII4_RX_CLK,
};
+ if (regs->mii_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
mii_rx_clk.clksrc = clk_sources[port];
mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -212,6 +228,9 @@ sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
CLKSRC_IDIV4,
};
+ if (regs->mii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
mii_ext_tx_clk.clksrc = clk_sources[port];
mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -236,6 +255,9 @@ sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
CLKSRC_IDIV4,
};
+ if (regs->mii_ext_rx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
mii_ext_rx_clk.clksrc = clk_sources[port];
mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -313,14 +335,17 @@ sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
}
static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
- int port, sja1105_speed_t speed)
+ int port, u64 speed)
{
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cgu_mii_ctrl txc;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int clksrc;
- if (speed == SJA1105_SPEED_1000MBPS) {
+ if (regs->rgmii_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
clksrc = CLKSRC_PLL0;
} else {
int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
@@ -368,6 +393,9 @@ static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
struct sja1105_cfg_pad_mii pad_mii_tx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->pad_mii_tx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload */
pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
/* high noise/high speed */
@@ -394,6 +422,9 @@ static int sja1105_cfg_pad_rx_config(struct sja1105_private *priv, int port)
struct sja1105_cfg_pad_mii pad_mii_rx = {0};
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->pad_mii_rx[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload */
pad_mii_rx.d32_ih = 0; /* RXD[3:2] input stage hysteresis: */
/* non-Schmitt (default) */
@@ -437,6 +468,35 @@ sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
+static void
+sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
+ enum packing_op op)
+{
+ const int size = SJA1105_SIZE_CGU_CMD;
+ u64 range = 4;
+
+ /* Fields RXC_RANGE and TXC_RANGE select the input frequency range:
+ * 0 = 2.5MHz
+ * 1 = 25MHz
+ * 2 = 50MHz
+ * 3 = 125MHz
+ * 4 = Automatically determined by port speed.
+ * There's no point in defining a structure different than the one for
+ * SJA1105, so just hardcode the frequency range to automatic, just as
+ * before.
+ */
+ sja1105_packing(buf, &cmd->rxc_stable_ovr, 26, 26, size, op);
+ sja1105_packing(buf, &cmd->rxc_delay, 25, 21, size, op);
+ sja1105_packing(buf, &range, 20, 18, size, op);
+ sja1105_packing(buf, &cmd->rxc_bypass, 17, 17, size, op);
+ sja1105_packing(buf, &cmd->rxc_pd, 16, 16, size, op);
+ sja1105_packing(buf, &cmd->txc_stable_ovr, 10, 10, size, op);
+ sja1105_packing(buf, &cmd->txc_delay, 9, 5, size, op);
+ sja1105_packing(buf, &range, 4, 2, size, op);
+ sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
+ sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
+}
+
/* Valid range in degrees is an integer between 73.8 and 101.7 */
static u64 sja1105_rgmii_delay(u64 phase)
{
@@ -495,40 +555,65 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
packed_buf, SJA1105_SIZE_CGU_CMD);
}
+int sja1110_setup_rgmii_delay(const void *ctx, int port)
+{
+ const struct sja1105_private *priv = ctx;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+
+ pad_mii_id.rxc_pd = 1;
+ pad_mii_id.txc_pd = 1;
+
+ if (priv->rgmii_rx_delay[port]) {
+ pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ /* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
+ pad_mii_id.rxc_bypass = 1;
+ pad_mii_id.rxc_pd = 0;
+ }
+
+ if (priv->rgmii_tx_delay[port]) {
+ pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ pad_mii_id.txc_bypass = 1;
+ pad_mii_id.txc_pd = 0;
+ }
+
+ sja1110_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
+
static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
sja1105_mii_role_t role)
{
struct device *dev = priv->ds->dev;
struct sja1105_mac_config_entry *mac;
- sja1105_speed_t speed;
+ u64 speed;
int rc;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
speed = mac[port].speed;
- dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n",
+ dev_dbg(dev, "Configuring port %d RGMII at speed %lldMbps\n",
port, speed);
- switch (speed) {
- case SJA1105_SPEED_1000MBPS:
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS]) {
/* 1000Mbps, IDIV disabled (125 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, false, 1);
- break;
- case SJA1105_SPEED_100MBPS:
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS]) {
/* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, true, 1);
- break;
- case SJA1105_SPEED_10MBPS:
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS]) {
/* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
rc = sja1105_cgu_idiv_config(priv, port, true, 10);
- break;
- case SJA1105_SPEED_AUTO:
+ } else if (speed == priv->info->port_speed[SJA1105_SPEED_AUTO]) {
/* Skip CGU configuration if there is no speed available
* (e.g. link is not established yet)
*/
dev_dbg(dev, "Speed not available, skipping CGU config\n");
return 0;
- default:
+ } else {
rc = -EINVAL;
}
@@ -546,14 +631,9 @@ static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
dev_err(dev, "Failed to configure Tx pad registers\n");
return rc;
}
+
if (!priv->info->setup_rgmii_delay)
return 0;
- /* The role has no hardware effect for RGMII. However we use it as
- * a proxy for this interface being a MAC-to-MAC connection, with
- * the RGMII internal delays needing to be applied by us.
- */
- if (role == XMII_MAC)
- return 0;
return priv->info->setup_rgmii_delay(priv, port);
}
@@ -572,6 +652,9 @@ static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
CLKSRC_MII4_TX_CLK,
};
+ if (regs->rmii_ref_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
ref_clk.clksrc = clk_sources[port];
ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -589,6 +672,9 @@ sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
struct sja1105_cgu_mii_ctrl ext_tx_clk;
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ if (regs->rmii_ext_tx_clk[port] == SJA1105_RSV_ADDR)
+ return 0;
+
/* Payload for packed_buf */
ext_tx_clk.clksrc = CLKSRC_PLL1;
ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
@@ -607,6 +693,9 @@ static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
struct device *dev = priv->ds->dev;
int rc;
+ if (regs->rmii_pll1 == SJA1105_RSV_ADDR)
+ return 0;
+
/* PLL1 must be enabled and output 50 Mhz.
* This is done by writing first 0x0A010941 to
* the PLL_1_C register and then deasserting
@@ -721,12 +810,39 @@ int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
int sja1105_clocking_setup(struct sja1105_private *priv)
{
+ struct dsa_switch *ds = priv->ds;
int port, rc;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
rc = sja1105_clocking_setup_port(priv, port);
if (rc < 0)
return rc;
}
return 0;
}
+
+static void
+sja1110_cgu_outclk_packing(void *buf, struct sja1110_cgu_outclk *outclk,
+ enum packing_op op)
+{
+ const int size = 4;
+
+ sja1105_packing(buf, &outclk->clksrc, 27, 24, size, op);
+ sja1105_packing(buf, &outclk->autoblock, 11, 11, size, op);
+ sja1105_packing(buf, &outclk->pd, 0, 0, size, op);
+}
+
+/* Power down the BASE_TIMER_CLK in order to disable the watchdog */
+int sja1110_clocking_setup(struct sja1105_private *priv)
+{
+ u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
+ struct sja1110_cgu_outclk outclk_7_c = {
+ .clksrc = 0x5,
+ .pd = true,
+ };
+
+ sja1110_cgu_outclk_packing(packed_buf, &outclk_7_c, PACK);
+
+ return sja1105_xfer_buf(priv, SPI_WRITE, SJA1110_BASE_TIMER_CLK,
+ packed_buf, SJA1105_SIZE_CGU_CMD);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index b777d3f37573..4c4c04f04269 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -78,6 +78,9 @@
* on its ENTRY portion, as a result of a SPI write command.
* Only the TCAM-based FDB table on SJA1105 P/Q/R/S supports
* this.
+ * OP_VALID_ANYWAY: Reading some tables through the dynamic config
+ * interface is possible even if the VALIDENT bit is not
+ * set in the writeback. So don't error out in that case.
* - .max_entry_count: The number of entries, counting from zero, that can be
* reconfigured through the dynamic interface. If a static
* table can be reconfigured at all dynamically, this
@@ -103,6 +106,9 @@
#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
+#define SJA1110_SIZE_VL_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_POLICING_ENTRY)
+
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
SJA1105_SIZE_DYN_CMD
@@ -112,9 +118,15 @@
#define SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY)
+#define SJA1110_SIZE_L2_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_ENTRY)
+
#define SJA1105_SIZE_VLAN_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + 4 + SJA1105_SIZE_VLAN_LOOKUP_ENTRY)
+#define SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_VLAN_LOOKUP_ENTRY)
+
#define SJA1105_SIZE_L2_FORWARDING_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_FORWARDING_ENTRY)
@@ -130,12 +142,18 @@
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY)
+
#define SJA1105ET_SIZE_GENERAL_PARAMS_DYN_CMD \
SJA1105_SIZE_DYN_CMD
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY)
+#define SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1110_SIZE_GENERAL_PARAMS_ENTRY)
+
#define SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY)
@@ -148,8 +166,17 @@
#define SJA1105PQRS_SIZE_CBS_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105PQRS_SIZE_CBS_ENTRY)
+#define SJA1110_SIZE_XMII_PARAMS_DYN_CMD \
+ SJA1110_SIZE_XMII_PARAMS_ENTRY
+
+#define SJA1110_SIZE_L2_POLICING_DYN_CMD \
+ (SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_L2_POLICING_ENTRY)
+
+#define SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD \
+ SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY
+
#define SJA1105_MAX_DYN_CMD_SIZE \
- SJA1105PQRS_SIZE_GENERAL_PARAMS_DYN_CMD
+ SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD
struct sja1105_dyn_cmd {
bool search;
@@ -167,9 +194,10 @@ enum sja1105_hostcmd {
SJA1105_HOSTCMD_INVALIDATE = 4,
};
+/* Command and entry overlap */
static void
-sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
- enum packing_op op)
+sja1105et_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
{
const int size = SJA1105_SIZE_DYN_CMD;
@@ -179,6 +207,33 @@ sja1105_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(buf, &cmd->index, 9, 0, size, op);
}
+/* Command and entry are separate */
+static void
+sja1105pqrs_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->errors, 30, 30, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 9, 0, size, op);
+}
+
+static void
+sja1110_vl_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -191,6 +246,18 @@ static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
}
static void
+sja1110_vl_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_VL_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->index, 11, 0, size, op);
+}
+
+static void
sja1105pqrs_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -308,6 +375,18 @@ sja1105pqrs_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
return sja1105pqrs_l2_lookup_entry_packing(buf, entry_ptr, op);
}
+static size_t sja1110_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1110_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
static void
sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
@@ -419,6 +498,39 @@ sja1105_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
SJA1105_SIZE_VLAN_LOOKUP_ENTRY, op);
}
+/* In SJA1110 there is no gap between the command and the data, yay... */
+static void
+sja1110_vlan_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+ u64 type_entry = 0;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ /* Hack: treat 'vlanid' field of struct sja1105_vlan_lookup_entry as
+ * cmd->index.
+ */
+ sja1105_packing(buf, &cmd->index, 38, 27,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, op);
+
+ /* But the VALIDENT bit has disappeared, now we are supposed to
+ * invalidate an entry through the TYPE_ENTRY field of the entry..
+ * This is a hack to transform the non-zero quality of the TYPE_ENTRY
+ * field into a VALIDENT bit.
+ */
+ if (op == PACK && !cmd->valident) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, PACK);
+ } else if (op == UNPACK) {
+ sja1105_packing(buf, &type_entry, 40, 39,
+ SJA1110_SIZE_VLAN_LOOKUP_ENTRY, UNPACK);
+ cmd->valident = !!type_entry;
+ }
+}
+
static void
sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
@@ -433,6 +545,19 @@ sja1105_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
}
static void
+sja1110_l2_forwarding_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
+static void
sja1105et_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -487,6 +612,19 @@ sja1105pqrs_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
}
static void
+sja1110_mac_config_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 3, 0, size, op);
+}
+
+static void
sja1105et_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -519,6 +657,18 @@ sja1105pqrs_l2_lookup_params_cmd_packing(void *buf,
}
static void
+sja1110_l2_lookup_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
sja1105et_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -553,6 +703,18 @@ sja1105pqrs_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
}
static void
+sja1110_general_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+}
+
+static void
sja1105pqrs_avb_params_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -578,6 +740,20 @@ sja1105_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(p, &cmd->index, 5, 0, size, op);
}
+static void
+sja1110_retagging_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_RETAGGING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->valident, 28, 28, size, op);
+ sja1105_packing(p, &cmd->index, 4, 0, size, op);
+}
+
static void sja1105et_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
{
@@ -617,6 +793,18 @@ static void sja1105pqrs_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
sja1105_packing(p, &cmd->index, 3, 0, size, op);
}
+static void sja1110_cbs_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105PQRS_SIZE_CBS_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 7, 0, size, op);
+}
+
static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -632,16 +820,50 @@ static size_t sja1105pqrs_cbs_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_cbs_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_CBS_ENTRY;
+ struct sja1105_cbs_entry *entry = entry_ptr;
+ u64 entry_type = SJA1110_CBS_SHAPER;
+
+ sja1105_packing(buf, &entry_type, 159, 159, size, op);
+ sja1105_packing(buf, &entry->credit_lo, 151, 120, size, op);
+ sja1105_packing(buf, &entry->credit_hi, 119, 88, size, op);
+ sja1105_packing(buf, &entry->send_slope, 87, 56, size, op);
+ sja1105_packing(buf, &entry->idle_slope, 55, 24, size, op);
+ return size;
+}
+
+static void sja1110_dummy_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+}
+
+static void
+sja1110_l2_policing_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
+ enum packing_op op)
+{
+ u8 *p = buf + SJA1105_SIZE_L2_POLICING_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(p, &cmd->valid, 31, 31, size, op);
+ sja1105_packing(p, &cmd->rdwrset, 30, 30, size, op);
+ sja1105_packing(p, &cmd->errors, 29, 29, size, op);
+ sja1105_packing(p, &cmd->index, 6, 0, size, op);
+}
+
#define OP_READ BIT(0)
#define OP_WRITE BIT(1)
#define OP_DEL BIT(2)
#define OP_SEARCH BIT(3)
+#define OP_VALID_ANYWAY BIT(4)
/* SJA1105E/T: First generation */
const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105et_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105et_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
@@ -658,7 +880,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_MGMT_ROUTE] = {
.entry_packing = sja1105et_mgmt_route_entry_packing,
.cmd_packing = sja1105et_mgmt_route_cmd_packing,
- .access = (OP_READ | OP_WRITE),
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
.max_entry_count = SJA1105_NUM_PORTS,
.packed_size = SJA1105ET_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x20,
@@ -725,7 +947,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_VL_LOOKUP] = {
.entry_packing = sja1105_vl_lookup_entry_packing,
- .cmd_packing = sja1105_vl_lookup_cmd_packing,
+ .cmd_packing = sja1105pqrs_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
.packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
@@ -742,7 +964,7 @@ const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_MGMT_ROUTE] = {
.entry_packing = sja1105pqrs_mgmt_route_entry_packing,
.cmd_packing = sja1105pqrs_mgmt_route_cmd_packing,
- .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH | OP_VALID_ANYWAY),
.max_entry_count = SJA1105_NUM_PORTS,
.packed_size = SJA1105PQRS_SIZE_L2_LOOKUP_DYN_CMD,
.addr = 0x24,
@@ -813,6 +1035,122 @@ const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
},
};
+/* SJA1110: Third generation */
+const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
+ [BLK_IDX_VL_LOOKUP] = {
+ .entry_packing = sja1110_vl_lookup_entry_packing,
+ .cmd_packing = sja1110_vl_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x124),
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .entry_packing = sja1110_vl_policing_entry_packing,
+ .cmd_packing = sja1110_vl_policing_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ .packed_size = SJA1110_SIZE_VL_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x310),
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .entry_packing = sja1110_dyn_l2_lookup_entry_packing,
+ .cmd_packing = sja1105pqrs_l2_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL | OP_SEARCH),
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x8c),
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .entry_packing = sja1110_vlan_lookup_entry_packing,
+ .cmd_packing = sja1110_vlan_lookup_cmd_packing,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ .packed_size = SJA1110_SIZE_VLAN_LOOKUP_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xb4),
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .entry_packing = sja1110_l2_forwarding_entry_packing,
+ .cmd_packing = sja1110_l2_forwarding_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105_SIZE_L2_FORWARDING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xa8),
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .entry_packing = sja1110_mac_config_entry_packing,
+ .cmd_packing = sja1110_mac_config_cmd_packing,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_MAC_CONFIG_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x134),
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .entry_packing = sja1110_l2_lookup_params_entry_packing,
+ .cmd_packing = sja1110_l2_lookup_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x158),
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .entry_packing = sja1105pqrs_avb_params_entry_packing,
+ .cmd_packing = sja1105pqrs_avb_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_AVB_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2000C),
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .entry_packing = sja1110_general_params_entry_packing,
+ .cmd_packing = sja1110_general_params_cmd_packing,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_GENERAL_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xe8),
+ },
+ [BLK_IDX_RETAGGING] = {
+ .entry_packing = sja1110_retagging_entry_packing,
+ .cmd_packing = sja1110_retagging_cmd_packing,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_DEL),
+ .packed_size = SJA1105_SIZE_RETAGGING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xdc),
+ },
+ [BLK_IDX_CBS] = {
+ .entry_packing = sja1110_cbs_entry_packing,
+ .cmd_packing = sja1110_cbs_cmd_packing,
+ .max_entry_count = SJA1110_MAX_CBS_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1105PQRS_SIZE_CBS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0xc4),
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .entry_packing = sja1110_xmii_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_XMII_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x3c),
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .entry_packing = sja1110_l2_policing_entry_packing,
+ .cmd_packing = sja1110_l2_policing_cmd_packing,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ .access = (OP_READ | OP_WRITE | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_POLICING_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x2fc),
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .entry_packing = sja1110_l2_forwarding_params_entry_packing,
+ .cmd_packing = sja1110_dummy_cmd_packing,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ .access = (OP_READ | OP_VALID_ANYWAY),
+ .packed_size = SJA1110_SIZE_L2_FORWARDING_PARAMS_DYN_CMD,
+ .addr = SJA1110_SPI_ADDR(0x20000),
+ },
+};
+
/* Provides read access to the settings through the dynamic interface
* of the switch.
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
@@ -896,11 +1234,8 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
cmd = (struct sja1105_dyn_cmd) {0};
ops->cmd_packing(packed_buf, &cmd, UNPACK);
- /* UM10944: [valident] will always be found cleared
- * during a read access with MGMTROUTE set.
- * So don't error out in that case.
- */
- if (!cmd.valident && blk_idx != BLK_IDX_MGMT_ROUTE)
+
+ if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
return -ENOENT;
cpu_relax();
} while (cmd.valid && --retries);
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
index 28d4eb5efb8b..a1472f80a059 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.h
@@ -36,5 +36,6 @@ struct sja1105_mgmt_entry {
extern const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN];
extern const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN];
+extern const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN];
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 973761132fc3..6c10ffa968ce 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -35,6 +35,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
bool new_rule = false;
unsigned long p;
int rc;
@@ -59,7 +60,7 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
- if (policing[(SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port].sharindx != port) {
+ if (policing[(ds->num_ports * SJA1105_NUM_TC) + port].sharindx != port) {
NL_SET_ERR_MSG_MOD(extack,
"Port already has a broadcast policer");
rc = -EEXIST;
@@ -71,8 +72,8 @@ static int sja1105_setup_bcast_policer(struct sja1105_private *priv,
/* Make the broadcast policers of all ports attached to this block
* point to the newly allocated policer
*/
- for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + p;
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + p;
policing[bcast].sharindx = rule->bcast_pol.sharindx;
}
@@ -143,7 +144,7 @@ static int sja1105_setup_tc_policer(struct sja1105_private *priv,
/* Make the policers for traffic class @tc of all ports attached to
* this block point to the newly allocated policer
*/
- for_each_set_bit(p, &rule->port_mask, SJA1105_NUM_PORTS) {
+ for_each_set_bit(p, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
int index = (p * SJA1105_NUM_TC) + tc;
policing[index].sharindx = rule->tc_pol.sharindx;
@@ -435,7 +436,7 @@ int sja1105_cls_flower_del(struct dsa_switch *ds, int port,
policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
if (rule->type == SJA1105_RULE_BCAST_POLICER) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
old_sharindx = policing[bcast].sharindx;
policing[bcast].sharindx = port;
@@ -486,7 +487,7 @@ void sja1105_flower_setup(struct dsa_switch *ds)
INIT_LIST_HEAD(&priv->flow_block.rules);
- for (port = 0; port < SJA1105_NUM_PORTS; port++)
+ for (port = 0; port < ds->num_ports; port++)
priv->flow_block.l2_policer_used[port] = true;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 802314e90e64..8e5cdf93c23b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -16,16 +16,17 @@
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/of_device.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/dsa/8021q.h>
#include "sja1105.h"
-#include "sja1105_sgmii.h"
#include "sja1105_tas.h"
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
+#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1)
static const struct dsa_switch_ops sja1105_switch_ops;
@@ -56,14 +57,6 @@ static bool sja1105_can_forward(struct sja1105_l2_forwarding_entry *l2_fwd,
return !!(l2_fwd[from].reach_port & BIT(to));
}
-/* Structure used to temporarily transport device tree
- * settings into sja1105_setup
- */
-struct sja1105_dt_port {
- phy_interface_t phy_mode;
- sja1105_mii_role_t role;
-};
-
static int sja1105_init_mac_settings(struct sja1105_private *priv)
{
struct sja1105_mac_config_entry default_mac = {
@@ -79,7 +72,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
/* Always put the MAC speed in automatic mode, where it can be
* adjusted at runtime by PHYLINK.
*/
- .speed = SJA1105_SPEED_AUTO,
+ .speed = priv->info->port_speed[SJA1105_SPEED_AUTO],
/* No static correction for 1-step 1588 events */
.tp_delin = 0,
.tp_delout = 0,
@@ -106,6 +99,7 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
.ingress = false,
};
struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int i;
@@ -117,16 +111,16 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_NUM_PORTS,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_NUM_PORTS;
+ table->entry_count = table->ops->max_entry_count;
mac = table->entries;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
mac[i] = default_mac;
if (i == dsa_upstream_port(priv->ds, i)) {
/* STP doesn't get called for CPU port, so we need to
@@ -141,26 +135,11 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
return 0;
}
-static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
-{
- if (priv->info->part_no != SJA1105R_PART_NO &&
- priv->info->part_no != SJA1105S_PART_NO)
- return false;
-
- if (port != SJA1105_SGMII_PORT)
- return false;
-
- if (dsa_is_unused_port(priv->ds, port))
- return false;
-
- return true;
-}
-
-static int sja1105_init_mii_settings(struct sja1105_private *priv,
- struct sja1105_dt_port *ports)
+static int sja1105_init_mii_settings(struct sja1105_private *priv)
{
struct device *dev = &priv->spidev->dev;
struct sja1105_xmii_params_entry *mii;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int i;
@@ -172,51 +151,81 @@ static int sja1105_init_mii_settings(struct sja1105_private *priv,
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
/* Override table based on PHYLINK DT bindings */
- table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
mii = table->entries;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
+ sja1105_mii_role_t role = XMII_MAC;
+
if (dsa_is_unused_port(priv->ds, i))
continue;
- switch (ports[i].phy_mode) {
+ switch (priv->phy_mode[i]) {
+ case PHY_INTERFACE_MODE_INTERNAL:
+ if (priv->info->internal_phy[i] == SJA1105_NO_PHY)
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_MII;
+ if (priv->info->internal_phy[i] == SJA1105_PHY_BASE_TX)
+ mii->special[i] = true;
+
+ break;
+ case PHY_INTERFACE_MODE_REVMII:
+ role = XMII_PHY;
+ fallthrough;
case PHY_INTERFACE_MODE_MII:
+ if (!priv->info->supports_mii[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_MII;
break;
+ case PHY_INTERFACE_MODE_REVRMII:
+ role = XMII_PHY;
+ fallthrough;
case PHY_INTERFACE_MODE_RMII:
+ if (!priv->info->supports_rmii[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_RMII;
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (!priv->info->supports_rgmii[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_RGMII;
break;
case PHY_INTERFACE_MODE_SGMII:
- if (!sja1105_supports_sgmii(priv, i))
- return -EINVAL;
+ if (!priv->info->supports_sgmii[i])
+ goto unsupported;
+
+ mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (!priv->info->supports_2500basex[i])
+ goto unsupported;
+
mii->xmii_mode[i] = XMII_MODE_SGMII;
+ mii->special[i] = true;
break;
+unsupported:
default:
- dev_err(dev, "Unsupported PHY mode %s!\n",
- phy_modes(ports[i].phy_mode));
+ dev_err(dev, "Unsupported PHY mode %s on port %d!\n",
+ phy_modes(priv->phy_mode[i]), i);
+ return -EINVAL;
}
- /* Even though the SerDes port is able to drive SGMII autoneg
- * like a PHY would, from the perspective of the XMII tables,
- * the SGMII port should always be put in MAC mode.
- */
- if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
- mii->phy_mac[i] = XMII_MAC;
- else
- mii->phy_mac[i] = ports[i].role;
+ mii->phy_mac[i] = role;
}
return 0;
}
@@ -265,8 +274,6 @@ static int sja1105_init_static_fdb(struct sja1105_private *priv)
static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
{
- struct sja1105_table *table;
- u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
/* Learned FDB entries are forgotten after 300 seconds */
.maxage = SJA1105_AGEING_TIME_MS(300000),
@@ -274,8 +281,6 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
/* And the P/Q/R/S equivalent setting: */
.start_dynspc = 0,
- .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
- max_fdb_entries, max_fdb_entries, },
/* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
.poly = 0x97,
/* This selects between Independent VLAN Learning (IVL) and
@@ -299,6 +304,23 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
.owr_dyn = true,
.drpnolearn = true,
};
+ struct dsa_switch *ds = priv->ds;
+ int port, num_used_ports = 0;
+ struct sja1105_table *table;
+ u64 max_fdb_entries;
+
+ for (port = 0; port < ds->num_ports; port++)
+ if (!dsa_is_unused_port(ds, port))
+ num_used_ports++;
+
+ max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / num_used_ports;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ default_l2_lookup_params.maxaddrp[port] = max_fdb_entries;
+ }
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
@@ -307,12 +329,12 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
/* This table only has a single entry */
((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
@@ -321,26 +343,30 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
return 0;
}
+/* Set up a default VLAN for untagged traffic injected from the CPU
+ * using management routes (e.g. STP, PTP) as opposed to tag_8021q.
+ * All DT-defined ports are members of this VLAN, and there are no
+ * restrictions on forwarding (since the CPU selects the destination).
+ * Frames from this VLAN will always be transmitted as untagged, and
+ * neither the bridge nor the 8021q module cannot create this VLAN ID.
+ */
static int sja1105_init_static_vlan(struct sja1105_private *priv)
{
struct sja1105_table *table;
struct sja1105_vlan_lookup_entry pvid = {
+ .type_entry = SJA1110_VLAN_D_TAG,
.ving_mirr = 0,
.vegr_mirr = 0,
.vmemb_port = 0,
.vlan_bc = 0,
.tag_port = 0,
- .vlanid = 1,
+ .vlanid = SJA1105_DEFAULT_VLAN,
};
struct dsa_switch *ds = priv->ds;
int port;
table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
- /* The static VLAN table will only contain the initial pvid of 1.
- * All other VLANs are to be configured through dynamic entries,
- * and kept in the static configuration table as backing memory.
- */
if (table->entry_count) {
kfree(table->entries);
table->entry_count = 0;
@@ -353,9 +379,6 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
table->entry_count = 1;
- /* VLAN 1: all DT-defined ports are members; no restrictions on
- * forwarding; always transmit as untagged.
- */
for (port = 0; port < ds->num_ports; port++) {
struct sja1105_bridge_vlan *v;
@@ -366,15 +389,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
pvid.vlan_bc |= BIT(port);
pvid.tag_port &= ~BIT(port);
- /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
- * transmitted as untagged.
- */
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v)
return -ENOMEM;
v->port = port;
- v->vid = 1;
+ v->vid = SJA1105_DEFAULT_VLAN;
v->untagged = true;
if (dsa_is_cpu_port(ds, port))
v->pvid = true;
@@ -388,6 +408,7 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv)
static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_entry *l2fwd;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int i, j;
@@ -398,19 +419,22 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
+ table->entry_count = table->ops->max_entry_count;
l2fwd = table->entries;
/* First 5 entries define the forwarding rules */
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
unsigned int upstream = dsa_upstream_port(priv->ds, i);
+ if (dsa_is_unused_port(ds, i))
+ continue;
+
for (j = 0; j < SJA1105_NUM_TC; j++)
l2fwd[i].vlan_pmap[j] = j;
@@ -432,24 +456,66 @@ static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
l2fwd[upstream].bc_domain |= BIT(i);
l2fwd[upstream].fl_domain |= BIT(i);
}
+
/* Next 8 entries define VLAN PCP mapping from ingress to egress.
* Create a one-to-one mapping.
*/
- for (i = 0; i < SJA1105_NUM_TC; i++)
- for (j = 0; j < SJA1105_NUM_PORTS; j++)
- l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
+ for (i = 0; i < SJA1105_NUM_TC; i++) {
+ for (j = 0; j < ds->num_ports; j++) {
+ if (dsa_is_unused_port(ds, j))
+ continue;
+
+ l2fwd[ds->num_ports + i].vlan_pmap[j] = i;
+ }
+
+ l2fwd[ds->num_ports + i].type_egrpcp2outputq = true;
+ }
+
+ return 0;
+}
+
+static int sja1110_init_pcp_remapping(struct sja1105_private *priv)
+{
+ struct sja1110_pcp_remapping_entry *pcp_remap;
+ struct dsa_switch *ds = priv->ds;
+ struct sja1105_table *table;
+ int port, tc;
+
+ table = &priv->static_config.tables[BLK_IDX_PCP_REMAPPING];
+
+ /* Nothing to do for SJA1105 */
+ if (!table->ops->max_entry_count)
+ return 0;
+
+ if (table->entry_count) {
+ kfree(table->entries);
+ table->entry_count = 0;
+ }
+
+ table->entries = kcalloc(table->ops->max_entry_count,
+ table->ops->unpacked_entry_size, GFP_KERNEL);
+ if (!table->entries)
+ return -ENOMEM;
+
+ table->entry_count = table->ops->max_entry_count;
+
+ pcp_remap = table->entries;
+
+ /* Repeat the configuration done for vlan_pmap */
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ for (tc = 0; tc < SJA1105_NUM_TC; tc++)
+ pcp_remap[port].egrpcp[tc] = tc;
+ }
return 0;
}
static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
{
- struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
- /* Disallow dynamic reconfiguration of vlan_pmap */
- .max_dynp = 0,
- /* Use a single memory partition for all ingress queues */
- .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
- };
+ struct sja1105_l2_forwarding_params_entry *l2fwd_params;
struct sja1105_table *table;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
@@ -459,16 +525,20 @@ static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
/* This table only has a single entry */
- ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
- default_l2fwd_params;
+ l2fwd_params = table->entries;
+
+ /* Disallow dynamic reconfiguration of vlan_pmap */
+ l2fwd_params->max_dynp = 0;
+ /* Use a single memory partition for all ingress queues */
+ l2fwd_params->part_spc[0] = priv->info->max_frame_mem;
return 0;
}
@@ -477,16 +547,14 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
{
struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
+ int max_mem = priv->info->max_frame_mem;
struct sja1105_table *table;
- int max_mem;
/* VLAN retagging is implemented using a loopback port that consumes
* frame buffers. That leaves less for us.
*/
if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
- max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
- else
- max_mem = SJA1105_MAX_FRAME_MEMORY;
+ max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
l2_fwd_params = table->entries;
@@ -508,6 +576,60 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
}
+/* SJA1110 TDMACONFIGIDX values:
+ *
+ * | 100 Mbps ports | 1Gbps ports | 2.5Gbps ports | Disabled ports
+ * -----+----------------+---------------+---------------+---------------
+ * 0 | 0, [5:10] | [1:2] | [3:4] | retag
+ * 1 |0, [5:10], retag| [1:2] | [3:4] | -
+ * 2 | 0, [5:10] | [1:3], retag | 4 | -
+ * 3 | 0, [5:10] |[1:2], 4, retag| 3 | -
+ * 4 | 0, 2, [5:10] | 1, retag | [3:4] | -
+ * 5 | 0, 1, [5:10] | 2, retag | [3:4] | -
+ * 14 | 0, [5:10] | [1:4], retag | - | -
+ * 15 | [5:10] | [0:4], retag | - | -
+ */
+static void sja1110_select_tdmaconfigidx(struct sja1105_private *priv)
+{
+ struct sja1105_general_params_entry *general_params;
+ struct sja1105_table *table;
+ bool port_1_is_base_tx;
+ bool port_3_is_2500;
+ bool port_4_is_2500;
+ u64 tdmaconfigidx;
+
+ if (priv->info->device_id != SJA1110_DEVICE_ID)
+ return;
+
+ table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
+ general_params = table->entries;
+
+ /* All the settings below are "as opposed to SGMII", which is the
+ * other pinmuxing option.
+ */
+ port_1_is_base_tx = priv->phy_mode[1] == PHY_INTERFACE_MODE_INTERNAL;
+ port_3_is_2500 = priv->phy_mode[3] == PHY_INTERFACE_MODE_2500BASEX;
+ port_4_is_2500 = priv->phy_mode[4] == PHY_INTERFACE_MODE_2500BASEX;
+
+ if (port_1_is_base_tx)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 5;
+ else if (port_3_is_2500 && port_4_is_2500)
+ /* Retagging port will operate at 100 Mbps */
+ tdmaconfigidx = 1;
+ else if (port_3_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 3;
+ else if (port_4_is_2500)
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 2;
+ else
+ /* Retagging port will operate at 1 Gbps */
+ tdmaconfigidx = 14;
+
+ general_params->tdmaconfigidx = tdmaconfigidx;
+}
+
static int sja1105_init_general_params(struct sja1105_private *priv)
{
struct sja1105_general_params_entry default_general_params = {
@@ -531,17 +653,9 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
* receieved on host_port itself would be dropped, except
* by installing a temporary 'management route'
*/
- .host_port = dsa_upstream_port(priv->ds, 0),
+ .host_port = priv->ds->num_ports,
/* Default to an invalid value */
- .mirr_port = SJA1105_NUM_PORTS,
- /* Link-local traffic received on casc_port will be forwarded
- * to host_port without embedding the source port and device ID
- * info in the destination MAC address (presumably because it
- * is a cascaded port and a downstream SJA switch already did
- * that). Default to an invalid port (to disable the feature)
- * and overwrite this if we find any DSA (cascaded) ports.
- */
- .casc_port = SJA1105_NUM_PORTS,
+ .mirr_port = priv->ds->num_ports,
/* No TTEthernet */
.vllupformat = SJA1105_VL_FORMAT_PSFP,
.vlmarker = 0,
@@ -553,8 +667,22 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
*/
.tpid = ETH_P_SJA1105,
.tpid2 = ETH_P_SJA1105,
+ /* Enable the TTEthernet engine on SJA1110 */
+ .tte_en = true,
+ /* Set up the EtherType for control packets on SJA1110 */
+ .header_type = ETH_P_SJA1110,
};
+ struct sja1105_general_params_entry *general_params;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
+ int port;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (dsa_is_cpu_port(ds, port)) {
+ default_general_params.host_port = port;
+ break;
+ }
+ }
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
@@ -563,16 +691,32 @@ static int sja1105_init_general_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
+
+ general_params = table->entries;
/* This table only has a single entry */
- ((struct sja1105_general_params_entry *)table->entries)[0] =
- default_general_params;
+ general_params[0] = default_general_params;
+
+ sja1110_select_tdmaconfigidx(priv);
+
+ /* Link-local traffic received on casc_port will be forwarded
+ * to host_port without embedding the source port and device ID
+ * info in the destination MAC address, and no RX timestamps will be
+ * taken either (presumably because it is a cascaded port and a
+ * downstream SJA switch already did that).
+ * To disable the feature, we need to do different things depending on
+ * switch generation. On SJA1105 we need to set an invalid port, while
+ * on SJA1110 which support multiple cascaded ports, this field is a
+ * bitmask so it must be left zero.
+ */
+ if (!priv->info->multiple_cascade_ports)
+ general_params->casc_port = ds->num_ports;
return 0;
}
@@ -590,12 +734,12 @@ static int sja1105_init_avb_params(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
+ table->entry_count = table->ops->max_entry_count;
avb = table->entries;
@@ -662,6 +806,7 @@ static int sja1105_init_avb_params(struct sja1105_private *priv)
static int sja1105_init_l2_policing(struct sja1105_private *priv)
{
struct sja1105_l2_policing_entry *policing;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int port, tc;
@@ -673,27 +818,31 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
table->entry_count = 0;
}
- table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
+ table->entries = kcalloc(table->ops->max_entry_count,
table->ops->unpacked_entry_size, GFP_KERNEL);
if (!table->entries)
return -ENOMEM;
- table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
+ table->entry_count = table->ops->max_entry_count;
policing = table->entries;
/* Setup shared indices for the matchall policers */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
- int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
+ for (port = 0; port < ds->num_ports; port++) {
+ int mcast = (ds->num_ports * (SJA1105_NUM_TC + 1)) + port;
+ int bcast = (ds->num_ports * SJA1105_NUM_TC) + port;
for (tc = 0; tc < SJA1105_NUM_TC; tc++)
policing[port * SJA1105_NUM_TC + tc].sharindx = port;
policing[bcast].sharindx = port;
+ /* Only SJA1110 has multicast policers */
+ if (mcast <= table->ops->max_entry_count)
+ policing[mcast].sharindx = port;
}
/* Setup the matchall policer parameters */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
if (dsa_is_cpu_port(priv->ds, port))
@@ -708,8 +857,7 @@ static int sja1105_init_l2_policing(struct sja1105_private *priv)
return 0;
}
-static int sja1105_static_config_load(struct sja1105_private *priv,
- struct sja1105_dt_port *ports)
+static int sja1105_static_config_load(struct sja1105_private *priv)
{
int rc;
@@ -724,7 +872,7 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_mac_settings(priv);
if (rc < 0)
return rc;
- rc = sja1105_init_mii_settings(priv, ports);
+ rc = sja1105_init_mii_settings(priv);
if (rc < 0)
return rc;
rc = sja1105_init_static_fdb(priv);
@@ -751,37 +899,39 @@ static int sja1105_static_config_load(struct sja1105_private *priv,
rc = sja1105_init_avb_params(priv);
if (rc < 0)
return rc;
+ rc = sja1110_init_pcp_remapping(priv);
+ if (rc < 0)
+ return rc;
/* Send initial configuration to hardware via SPI */
return sja1105_static_config_upload(priv);
}
-static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
- const struct sja1105_dt_port *ports)
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
{
- int i;
+ struct dsa_switch *ds = priv->ds;
+ int port;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (ports[i].role == XMII_MAC)
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->fixed_link[port])
continue;
- if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
- ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_rx_delay[i] = true;
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
+ priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_rx_delay[port] = true;
- if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
- ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_tx_delay[i] = true;
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
+ priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
+ priv->rgmii_tx_delay[port] = true;
- if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
- !priv->info->setup_rgmii_delay)
+ if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
+ !priv->info->setup_rgmii_delay)
return -EINVAL;
}
return 0;
}
static int sja1105_parse_ports_node(struct sja1105_private *priv,
- struct sja1105_dt_port *ports,
struct device_node *ports_node)
{
struct device *dev = &priv->spidev->dev;
@@ -810,7 +960,6 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
of_node_put(child);
return -ENODEV;
}
- ports[index].phy_mode = phy_mode;
phy_node = of_parse_phandle(child, "phy-handle", 0);
if (!phy_node) {
@@ -823,25 +972,18 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
/* phy-handle is missing, but fixed-link isn't.
* So it's a fixed link. Default to PHY role.
*/
- ports[index].role = XMII_PHY;
+ priv->fixed_link[index] = true;
} else {
- /* phy-handle present => put port in MAC role */
- ports[index].role = XMII_MAC;
of_node_put(phy_node);
}
- /* The MAC/PHY role can be overridden with explicit bindings */
- if (of_property_read_bool(child, "sja1105,role-mac"))
- ports[index].role = XMII_MAC;
- else if (of_property_read_bool(child, "sja1105,role-phy"))
- ports[index].role = XMII_PHY;
+ priv->phy_mode[index] = phy_mode;
}
return 0;
}
-static int sja1105_parse_dt(struct sja1105_private *priv,
- struct sja1105_dt_port *ports)
+static int sja1105_parse_dt(struct sja1105_private *priv)
{
struct device *dev = &priv->spidev->dev;
struct device_node *switch_node = dev->of_node;
@@ -849,113 +991,41 @@ static int sja1105_parse_dt(struct sja1105_private *priv,
int rc;
ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
return -ENODEV;
}
- rc = sja1105_parse_ports_node(priv, ports, ports_node);
+ rc = sja1105_parse_ports_node(priv, ports_node);
of_node_put(ports_node);
return rc;
}
-static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
-{
- const struct sja1105_regs *regs = priv->info->regs;
- u32 val;
- int rc;
-
- rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
- NULL);
- if (rc < 0)
- return rc;
-
- return val;
-}
-
-static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
- u16 pcs_val)
-{
- const struct sja1105_regs *regs = priv->info->regs;
- u32 val = pcs_val;
- int rc;
-
- rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
- NULL);
- if (rc < 0)
- return rc;
-
- return val;
-}
-
-static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
- bool an_enabled, bool an_master)
-{
- u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
-
- /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
- * stop the clock during LPI mode, make the MAC reconfigure
- * autonomously after PCS autoneg is done, flush the internal FIFOs.
- */
- sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
- SJA1105_DC1_CLOCK_STOP_EN |
- SJA1105_DC1_MAC_AUTO_SW |
- SJA1105_DC1_INIT);
- /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
- sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
- /* AUTONEG_CONTROL: Use SGMII autoneg */
- if (an_master)
- ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
- sja1105_sgmii_write(priv, SJA1105_AC, ac);
- /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
- * sja1105_sgmii_pcs_force_speed must be called later for the link
- * to become operational.
- */
- if (an_enabled)
- sja1105_sgmii_write(priv, MII_BMCR,
- BMCR_ANENABLE | BMCR_ANRESTART);
-}
-
-static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
- int speed)
+/* Convert link speed from SJA1105 to ethtool encoding */
+static int sja1105_port_speed_to_ethtool(struct sja1105_private *priv,
+ u64 speed)
{
- int pcs_speed;
-
- switch (speed) {
- case SPEED_1000:
- pcs_speed = BMCR_SPEED1000;
- break;
- case SPEED_100:
- pcs_speed = BMCR_SPEED100;
- break;
- case SPEED_10:
- pcs_speed = BMCR_SPEED10;
- break;
- default:
- dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
- return;
- }
- sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
+ if (speed == priv->info->port_speed[SJA1105_SPEED_10MBPS])
+ return SPEED_10;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_100MBPS])
+ return SPEED_100;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_1000MBPS])
+ return SPEED_1000;
+ if (speed == priv->info->port_speed[SJA1105_SPEED_2500MBPS])
+ return SPEED_2500;
+ return SPEED_UNKNOWN;
}
-/* Convert link speed from SJA1105 to ethtool encoding */
-static int sja1105_speed[] = {
- [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
- [SJA1105_SPEED_10MBPS] = SPEED_10,
- [SJA1105_SPEED_100MBPS] = SPEED_100,
- [SJA1105_SPEED_1000MBPS] = SPEED_1000,
-};
-
/* Set link speed in the MAC configuration for a specific port. */
static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
int speed_mbps)
{
- struct sja1105_xmii_params_entry *mii;
struct sja1105_mac_config_entry *mac;
struct device *dev = priv->ds->dev;
- sja1105_phy_interface_t phy_mode;
- sja1105_speed_t speed;
+ u64 speed;
int rc;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
@@ -965,7 +1035,6 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* reasonable approximation for both E/T and P/Q/R/S.
*/
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
switch (speed_mbps) {
case SPEED_UNKNOWN:
@@ -976,16 +1045,19 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* ok for power consumption in case AN will never complete -
* otherwise PHYLINK should come back with a new update.
*/
- speed = SJA1105_SPEED_AUTO;
+ speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
break;
case SPEED_10:
- speed = SJA1105_SPEED_10MBPS;
+ speed = priv->info->port_speed[SJA1105_SPEED_10MBPS];
break;
case SPEED_100:
- speed = SJA1105_SPEED_100MBPS;
+ speed = priv->info->port_speed[SJA1105_SPEED_100MBPS];
break;
case SPEED_1000:
- speed = SJA1105_SPEED_1000MBPS;
+ speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ break;
+ case SPEED_2500:
+ speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
break;
default:
dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
@@ -999,8 +1071,10 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* Actually for the SGMII port, the MAC is fixed at 1 Gbps and
* we need to configure the PCS only (if even that).
*/
- if (sja1105_supports_sgmii(priv, port))
- mac[port].speed = SJA1105_SPEED_1000MBPS;
+ if (priv->phy_mode[port] == PHY_INTERFACE_MODE_SGMII)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_1000MBPS];
+ else if (priv->phy_mode[port] == PHY_INTERFACE_MODE_2500BASEX)
+ mac[port].speed = priv->info->port_speed[SJA1105_SPEED_2500MBPS];
else
mac[port].speed = speed;
@@ -1018,8 +1092,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* the clock setup does interrupt the clock signal for a certain time
* which causes trouble for all PHYs relying on this signal.
*/
- phy_mode = mii->xmii_mode[port];
- if (phy_mode != XMII_MODE_RGMII)
+ if (!phy_interface_mode_is_rgmii(priv->phy_mode[port]))
return 0;
return sja1105_clocking_setup_port(priv, port);
@@ -1035,35 +1108,16 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
phy_interface_t interface)
{
- struct sja1105_xmii_params_entry *mii;
- sja1105_phy_interface_t phy_mode;
-
- mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
- phy_mode = mii->xmii_mode[port];
-
- switch (interface) {
- case PHY_INTERFACE_MODE_MII:
- return (phy_mode != XMII_MODE_MII);
- case PHY_INTERFACE_MODE_RMII:
- return (phy_mode != XMII_MODE_RMII);
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- return (phy_mode != XMII_MODE_RGMII);
- case PHY_INTERFACE_MODE_SGMII:
- return (phy_mode != XMII_MODE_SGMII);
- default:
- return true;
- }
+ return priv->phy_mode[port] != interface;
}
static void sja1105_mac_config(struct dsa_switch *ds, int port,
unsigned int mode,
const struct phylink_link_state *state)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
- bool is_sgmii = sja1105_supports_sgmii(priv, port);
+ struct dw_xpcs *xpcs;
if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
@@ -1071,14 +1125,10 @@ static void sja1105_mac_config(struct dsa_switch *ds, int port,
return;
}
- if (phylink_autoneg_inband(mode) && !is_sgmii) {
- dev_err(ds->dev, "In-band AN not supported!\n");
- return;
- }
+ xpcs = priv->xpcs[port];
- if (is_sgmii)
- sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
- false);
+ if (xpcs)
+ phylink_set_pcs(dp->pl, &xpcs->pcs);
}
static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
@@ -1099,9 +1149,6 @@ static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
sja1105_adjust_port_config(priv, port, speed);
- if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
- sja1105_sgmii_pcs_force_speed(priv, speed);
-
sja1105_inhibit_tx(priv, BIT(port), false);
}
@@ -1140,44 +1187,16 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
mii->xmii_mode[port] == XMII_MODE_SGMII)
phylink_set(mask, 1000baseT_Full);
+ if (priv->info->supports_2500basex[port]) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
bitmap_and(state->advertising, state->advertising, mask,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct sja1105_private *priv = ds->priv;
- int ais;
-
- /* Read the vendor-specific AUTONEG_INTR_STATUS register */
- ais = sja1105_sgmii_read(priv, SJA1105_AIS);
- if (ais < 0)
- return ais;
-
- switch (SJA1105_AIS_SPEED(ais)) {
- case 0:
- state->speed = SPEED_10;
- break;
- case 1:
- state->speed = SPEED_100;
- break;
- case 2:
- state->speed = SPEED_1000;
- break;
- default:
- dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
- SJA1105_AIS_SPEED(ais));
- }
- state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
- state->an_complete = SJA1105_AIS_COMPLETE(ais);
- state->link = SJA1105_AIS_LINK_STATUS(ais);
-
- return 0;
-}
-
static int
sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
const struct sja1105_l2_lookup_entry *requested)
@@ -1636,7 +1655,7 @@ static int sja1105_bridge_member(struct dsa_switch *ds, int port,
l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
/* Add this port to the forwarding matrix of the
* other ports in the same bridge, and viceversa.
*/
@@ -1834,12 +1853,12 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
{
struct ptp_system_timestamp ptp_sts_before;
struct ptp_system_timestamp ptp_sts_after;
+ int speed_mbps[SJA1105_MAX_NUM_PORTS];
+ u16 bmcr[SJA1105_MAX_NUM_PORTS] = {0};
struct sja1105_mac_config_entry *mac;
- int speed_mbps[SJA1105_NUM_PORTS];
struct dsa_switch *ds = priv->ds;
s64 t1, t2, t3, t4;
s64 t12, t34;
- u16 bmcr = 0;
int rc, i;
s64 now;
@@ -1852,13 +1871,16 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
* switch wants to see in the static config in order to allow us to
* change it through the dynamic interface later.
*/
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- speed_mbps[i] = sja1105_speed[mac[i].speed];
- mac[i].speed = SJA1105_SPEED_AUTO;
- }
+ for (i = 0; i < ds->num_ports; i++) {
+ u32 reg_addr = mdiobus_c45_addr(MDIO_MMD_VEND2, MDIO_CTRL1);
+
+ speed_mbps[i] = sja1105_port_speed_to_ethtool(priv,
+ mac[i].speed);
+ mac[i].speed = priv->info->port_speed[SJA1105_SPEED_AUTO];
- if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
- bmcr = sja1105_sgmii_read(priv, MII_BMCR);
+ if (priv->xpcs[i])
+ bmcr[i] = mdiobus_read(priv->mdio_pcs, i, reg_addr);
+ }
/* No PTP operations can run right now */
mutex_lock(&priv->ptp_data.lock);
@@ -1900,32 +1922,46 @@ out_unlock_ptp:
* For these interfaces there is no dynamic configuration
* needed, since PLLs have same settings at all speeds.
*/
- rc = sja1105_clocking_setup(priv);
+ rc = priv->info->clocking_setup(priv);
if (rc < 0)
goto out;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
+ struct dw_xpcs *xpcs = priv->xpcs[i];
+ unsigned int mode;
+
rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
if (rc < 0)
goto out;
- }
- if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
- bool an_enabled = !!(bmcr & BMCR_ANENABLE);
+ if (!xpcs)
+ continue;
- sja1105_sgmii_pcs_config(priv, an_enabled, false);
+ if (bmcr[i] & BMCR_ANENABLE)
+ mode = MLO_AN_INBAND;
+ else if (priv->fixed_link[i])
+ mode = MLO_AN_FIXED;
+ else
+ mode = MLO_AN_PHY;
+
+ rc = xpcs_do_config(xpcs, priv->phy_mode[i], mode);
+ if (rc < 0)
+ goto out;
- if (!an_enabled) {
+ if (!phylink_autoneg_inband(mode)) {
int speed = SPEED_UNKNOWN;
- if (bmcr & BMCR_SPEED1000)
+ if (priv->phy_mode[i] == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else if (bmcr[i] & BMCR_SPEED1000)
speed = SPEED_1000;
- else if (bmcr & BMCR_SPEED100)
+ else if (bmcr[i] & BMCR_SPEED100)
speed = SPEED_100;
else
speed = SPEED_10;
- sja1105_sgmii_pcs_force_speed(priv, speed);
+ xpcs_link_up(&xpcs->pcs, mode, priv->phy_mode[i],
+ speed, DUPLEX_FULL);
}
}
@@ -2033,7 +2069,9 @@ static enum dsa_tag_protocol
sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
enum dsa_tag_protocol mp)
{
- return DSA_TAG_PROTO_SJA1105;
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->tag_proto;
}
static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
@@ -2273,6 +2311,7 @@ sja1105_build_bridge_vlans(struct sja1105_private *priv,
new_vlan[match].vlan_bc |= BIT(v->port);
if (!v->untagged)
new_vlan[match].tag_port |= BIT(v->port);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
}
return 0;
@@ -2295,6 +2334,7 @@ sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
new_vlan[match].vlan_bc |= BIT(v->port);
if (!v->untagged)
new_vlan[match].tag_port |= BIT(v->port);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
}
return 0;
@@ -2355,6 +2395,7 @@ static int sja1105_build_subvlans(struct sja1105_private *priv,
new_vlan[match].tag_port |= BIT(v->port);
/* But it's always tagged towards the CPU */
new_vlan[match].tag_port |= BIT(upstream);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
/* The Retagging Table generates packet *clones* with
* the new VLAN. This is a very odd hardware quirk
@@ -2522,6 +2563,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
if (!tmp->untagged)
new_vlan[match].tag_port |= BIT(tmp->port);
new_vlan[match].tag_port |= BIT(upstream);
+ new_vlan[match].type_entry = SJA1110_VLAN_D_TAG;
/* Deny egress of @rx_vid towards our front-panel port.
* This will force the switch to drop it, and we'll see
* only the re-retagged packets (having the original,
@@ -2612,7 +2654,7 @@ out:
static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
{
- u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
+ u16 subvlan_map[SJA1105_MAX_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
struct sja1105_retagging_entry *new_retagging;
struct sja1105_vlan_lookup_entry *new_vlan;
struct sja1105_table *table;
@@ -2817,11 +2859,22 @@ static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
struct sja1105_bridge_vlan *v;
- list_for_each_entry(v, vlan_list, list)
- if (v->port == port && v->vid == vid &&
- v->untagged == untagged && v->pvid == pvid)
+ list_for_each_entry(v, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
/* Already added */
- return 0;
+ if (v->untagged == untagged && v->pvid == pvid)
+ /* Nothing changed */
+ return 0;
+
+ /* It's the same VLAN, but some of the flags changed
+ * and the user did not bother to delete it first.
+ * Update it and trigger sja1105_build_vlan_table.
+ */
+ v->untagged = untagged;
+ v->pvid = pvid;
+ return 1;
+ }
+ }
v = kzalloc(sizeof(*v), GFP_KERNEL);
if (!v) {
@@ -2948,11 +3001,10 @@ static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
*/
static int sja1105_setup(struct dsa_switch *ds)
{
- struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
struct sja1105_private *priv = ds->priv;
int rc;
- rc = sja1105_parse_dt(priv, ports);
+ rc = sja1105_parse_dt(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
return rc;
@@ -2961,7 +3013,7 @@ static int sja1105_setup(struct dsa_switch *ds)
/* Error out early if internal delays are required through DT
* and we can't apply them.
*/
- rc = sja1105_parse_rgmii_delays(priv, ports);
+ rc = sja1105_parse_rgmii_delays(priv);
if (rc < 0) {
dev_err(ds->dev, "RGMII delay not supported\n");
return rc;
@@ -2972,17 +3024,25 @@ static int sja1105_setup(struct dsa_switch *ds)
dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
return rc;
}
+
+ rc = sja1105_mdiobus_register(ds);
+ if (rc < 0) {
+ dev_err(ds->dev, "Failed to register MDIO bus: %pe\n",
+ ERR_PTR(rc));
+ goto out_ptp_clock_unregister;
+ }
+
/* Create and send configuration down to device */
- rc = sja1105_static_config_load(priv, ports);
+ rc = sja1105_static_config_load(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to load static config: %d\n", rc);
- return rc;
+ goto out_mdiobus_unregister;
}
/* Configure the CGU (PHY link modes and speeds) */
- rc = sja1105_clocking_setup(priv);
+ rc = priv->info->clocking_setup(priv);
if (rc < 0) {
dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
- return rc;
+ goto out_static_config_free;
}
/* On SJA1105, VLAN filtering per se is always enabled in hardware.
* The only thing we can do to disable it is lie about what the 802.1Q
@@ -3003,7 +3063,7 @@ static int sja1105_setup(struct dsa_switch *ds)
rc = sja1105_devlink_setup(ds);
if (rc < 0)
- return rc;
+ goto out_static_config_free;
/* The DSA/switchdev model brings up switch ports in standalone mode by
* default, and that means vlan_filtering is 0 since they're not under
@@ -3012,6 +3072,19 @@ static int sja1105_setup(struct dsa_switch *ds)
rtnl_lock();
rc = sja1105_setup_8021q_tagging(ds, true);
rtnl_unlock();
+ if (rc)
+ goto out_devlink_teardown;
+
+ return 0;
+
+out_devlink_teardown:
+ sja1105_devlink_teardown(ds);
+out_mdiobus_unregister:
+ sja1105_mdiobus_unregister(ds);
+out_ptp_clock_unregister:
+ sja1105_ptp_clock_unregister(ds);
+out_static_config_free:
+ sja1105_static_config_free(&priv->static_config);
return rc;
}
@@ -3022,7 +3095,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
struct sja1105_bridge_vlan *v, *n;
int port;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
struct sja1105_port *sp = &priv->ports[port];
if (!dsa_is_user_port(ds, port))
@@ -3225,6 +3298,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
{
struct sja1105_general_params_entry *general_params;
struct sja1105_mac_config_entry *mac;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
bool already_enabled;
u64 new_mirr_port;
@@ -3235,7 +3309,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
+ already_enabled = (general_params->mirr_port != ds->num_ports);
if (already_enabled && enabled && general_params->mirr_port != to) {
dev_err(priv->ds->dev,
"Delete mirroring rules towards port %llu first\n",
@@ -3249,7 +3323,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
int port;
/* Anybody still referencing mirr_port? */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (mac[port].ing_mirr || mac[port].egr_mirr) {
keep = true;
break;
@@ -3257,7 +3331,7 @@ static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
}
/* Unset already_enabled for next time */
if (!keep)
- new_mirr_port = SJA1105_NUM_PORTS;
+ new_mirr_port = ds->num_ports;
}
if (new_mirr_port != general_params->mirr_port) {
general_params->mirr_port = new_mirr_port;
@@ -3468,7 +3542,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
.phylink_validate = sja1105_phylink_validate,
- .phylink_mac_link_state = sja1105_mac_pcs_get_state,
.phylink_mac_config = sja1105_mac_config,
.phylink_mac_link_up = sja1105_mac_link_up,
.phylink_mac_link_down = sja1105_mac_link_down,
@@ -3640,7 +3713,7 @@ static int sja1105_probe(struct spi_device *spi)
return -ENOMEM;
ds->dev = dev;
- ds->num_ports = SJA1105_NUM_PORTS;
+ ds->num_ports = priv->info->num_ports;
ds->ops = &sja1105_switch_ops;
ds->priv = priv;
priv->ds = ds;
@@ -3674,12 +3747,14 @@ static int sja1105_probe(struct spi_device *spi)
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
GFP_KERNEL);
- if (!priv->cbs)
- return -ENOMEM;
+ if (!priv->cbs) {
+ rc = -ENOMEM;
+ goto out_unregister_switch;
+ }
}
/* Connections between dsa_port and sja1105_port */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
struct sja1105_port *sp = &priv->ports[port];
struct dsa_port *dp = dsa_to_port(ds, port);
struct net_device *slave;
@@ -3700,7 +3775,7 @@ static int sja1105_probe(struct spi_device *spi)
dev_err(ds->dev,
"failed to create deferred xmit thread: %d\n",
rc);
- goto out;
+ goto out_destroy_workers;
}
skb_queue_head_init(&sp->xmit_queue);
sp->xmit_tpid = ETH_P_SJA1105;
@@ -3710,7 +3785,8 @@ static int sja1105_probe(struct spi_device *spi)
}
return 0;
-out:
+
+out_destroy_workers:
while (port-- > 0) {
struct sja1105_port *sp = &priv->ports[port];
@@ -3719,6 +3795,10 @@ out:
kthread_destroy_worker(sp->xmit_worker);
}
+
+out_unregister_switch:
+ dsa_unregister_switch(ds);
+
return rc;
}
@@ -3737,6 +3817,10 @@ static const struct of_device_id sja1105_dt_ids[] = {
{ .compatible = "nxp,sja1105q", .data = &sja1105q_info },
{ .compatible = "nxp,sja1105r", .data = &sja1105r_info },
{ .compatible = "nxp,sja1105s", .data = &sja1105s_info },
+ { .compatible = "nxp,sja1110a", .data = &sja1110a_info },
+ { .compatible = "nxp,sja1110b", .data = &sja1110b_info },
+ { .compatible = "nxp,sja1110c", .data = &sja1110c_info },
+ { .compatible = "nxp,sja1110d", .data = &sja1110d_info },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
new file mode 100644
index 000000000000..19aea8fb76f6
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021, NXP Semiconductors
+ */
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/of_mdio.h>
+#include "sja1105.h"
+
+#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
+
+int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return 0xffff;
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1105_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1105_XPCS_ID & GENMASK(15, 0);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ tmp = val;
+
+ if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
+ return -EINVAL;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
+ return NXP_SJA1110_XPCS_ID >> 16;
+ if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID2)
+ return NXP_SJA1110_XPCS_ID & GENMASK(15, 0);
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ int offset, bank;
+ u64 addr;
+ u32 tmp;
+ u16 mmd;
+ int rc;
+
+ if (!(reg & MII_ADDR_C45))
+ return -EINVAL;
+
+ if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
+ return -ENODEV;
+
+ mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ addr = (mmd << 16) | (reg & GENMASK(15, 0));
+
+ bank = addr >> 8;
+ offset = addr & GENMASK(7, 0);
+
+ /* This addressing scheme reserves register 0xff for the bank address
+ * register, so that can never be addressed.
+ */
+ if (WARN_ON(offset == 0xff))
+ return -ENODEV;
+
+ tmp = bank;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE,
+ regs->pcs_base[phy] + SJA1110_PCS_BANK_REG,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ tmp = val;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->pcs_base[phy] + offset,
+ &tmp, NULL);
+}
+
+enum sja1105_mdio_opcode {
+ SJA1105_C45_ADDR = 0,
+ SJA1105_C22 = 1,
+ SJA1105_C45_DATA = 2,
+ SJA1105_C45_DATA_AUTOINC = 3,
+};
+
+static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
+ int phy, enum sja1105_mdio_opcode op,
+ int xad)
+{
+ const struct sja1105_regs *regs = priv->info->regs;
+
+ return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
+}
+
+static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+ }
+
+ /* Clause 22 read */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
+
+ if (reg & MII_ADDR_C45) {
+ u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
+ mmd);
+
+ tmp = reg & MII_REGADDR_C45_MASK;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
+ mmd);
+
+ tmp = val & 0xffff;
+
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+ }
+
+ /* Clause 22 write */
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+
+ tmp = val & 0xffff;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
+
+static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp;
+ int rc;
+
+ rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+ if (rc < 0)
+ return rc;
+
+ return tmp & 0xffff;
+}
+
+static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 tmp = val;
+
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
+ &tmp, NULL);
+}
+
+static int sja1105_mdiobus_base_tx_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_find_compatible_node(mdio_node, NULL,
+ "nxp,sja1110-base-tx-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-TX MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-tx",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_tx_mdio_read;
+ bus->write = sja1105_base_tx_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_tx = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_tx_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_tx)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_tx);
+ mdiobus_free(priv->mdio_base_tx);
+ priv->mdio_base_tx = NULL;
+}
+
+static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
+ struct device_node *mdio_node)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct device_node *np;
+ struct mii_bus *bus;
+ int rc = 0;
+
+ np = of_find_compatible_node(mdio_node, NULL,
+ "nxp,sja1110-base-t1-mdio");
+ if (!np)
+ return 0;
+
+ if (!of_device_is_available(np))
+ goto out_put_np;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus) {
+ rc = -ENOMEM;
+ goto out_put_np;
+ }
+
+ bus->name = "SJA1110 100base-T1 MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
+ dev_name(priv->ds->dev));
+ bus->read = sja1105_base_t1_mdio_read;
+ bus->write = sja1105_base_t1_mdio_write;
+ bus->parent = priv->ds->dev;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc) {
+ mdiobus_free(bus);
+ goto out_put_np;
+ }
+
+ priv->mdio_base_t1 = bus;
+
+out_put_np:
+ of_node_put(np);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_base_t1_unregister(struct sja1105_private *priv)
+{
+ if (!priv->mdio_base_t1)
+ return;
+
+ mdiobus_unregister(priv->mdio_base_t1);
+ mdiobus_free(priv->mdio_base_t1);
+ priv->mdio_base_t1 = NULL;
+}
+
+static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
+{
+ struct sja1105_mdio_private *mdio_priv;
+ struct dsa_switch *ds = priv->ds;
+ struct mii_bus *bus;
+ int rc = 0;
+ int port;
+
+ if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ return 0;
+
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = "SJA1105 PCS MDIO bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
+ dev_name(ds->dev));
+ bus->read = priv->info->pcs_mdio_read;
+ bus->write = priv->info->pcs_mdio_write;
+ bus->parent = ds->dev;
+ /* There is no PHY on this MDIO bus => mask out all PHY addresses
+ * from auto probing.
+ */
+ bus->phy_mask = ~0;
+ mdio_priv = bus->priv;
+ mdio_priv->priv = priv;
+
+ rc = mdiobus_register(bus);
+ if (rc) {
+ mdiobus_free(bus);
+ return rc;
+ }
+
+ for (port = 0; port < ds->num_ports; port++) {
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ if (dsa_is_unused_port(ds, port))
+ continue;
+
+ if (priv->phy_mode[port] != PHY_INTERFACE_MODE_SGMII &&
+ priv->phy_mode[port] != PHY_INTERFACE_MODE_2500BASEX)
+ continue;
+
+ mdiodev = mdio_device_create(bus, port);
+ if (IS_ERR(mdiodev)) {
+ rc = PTR_ERR(mdiodev);
+ goto out_pcs_free;
+ }
+
+ xpcs = xpcs_create(mdiodev, priv->phy_mode[port]);
+ if (IS_ERR(xpcs)) {
+ rc = PTR_ERR(xpcs);
+ goto out_pcs_free;
+ }
+
+ priv->xpcs[port] = xpcs;
+ }
+
+ priv->mdio_pcs = bus;
+
+ return 0;
+
+out_pcs_free:
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+
+ return rc;
+}
+
+static void sja1105_mdiobus_pcs_unregister(struct sja1105_private *priv)
+{
+ struct dsa_switch *ds = priv->ds;
+ int port;
+
+ if (!priv->mdio_pcs)
+ return;
+
+ for (port = 0; port < ds->num_ports; port++) {
+ if (!priv->xpcs[port])
+ continue;
+
+ mdio_device_free(priv->xpcs[port]->mdiodev);
+ xpcs_destroy(priv->xpcs[port]);
+ priv->xpcs[port] = NULL;
+ }
+
+ mdiobus_unregister(priv->mdio_pcs);
+ mdiobus_free(priv->mdio_pcs);
+ priv->mdio_pcs = NULL;
+}
+
+int sja1105_mdiobus_register(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ struct device_node *switch_node = ds->dev->of_node;
+ struct device_node *mdio_node;
+ int rc;
+
+ rc = sja1105_mdiobus_pcs_register(priv);
+ if (rc)
+ return rc;
+
+ mdio_node = of_get_child_by_name(switch_node, "mdios");
+ if (!mdio_node)
+ return 0;
+
+ if (!of_device_is_available(mdio_node))
+ goto out_put_mdio_node;
+
+ if (regs->mdio_100base_tx != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_tx_register(priv, mdio_node);
+ if (rc)
+ goto err_put_mdio_node;
+ }
+
+ if (regs->mdio_100base_t1 != SJA1105_RSV_ADDR) {
+ rc = sja1105_mdiobus_base_t1_register(priv, mdio_node);
+ if (rc)
+ goto err_free_base_tx_mdiobus;
+ }
+
+out_put_mdio_node:
+ of_node_put(mdio_node);
+
+ return 0;
+
+err_free_base_tx_mdiobus:
+ sja1105_mdiobus_base_tx_unregister(priv);
+err_put_mdio_node:
+ of_node_put(mdio_node);
+ sja1105_mdiobus_pcs_unregister(priv);
+
+ return rc;
+}
+
+void sja1105_mdiobus_unregister(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ sja1105_mdiobus_base_t1_unregister(priv);
+ sja1105_mdiobus_base_tx_unregister(priv);
+ sja1105_mdiobus_pcs_unregister(priv);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index 0bc566b9e958..691f6dd7e669 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -79,6 +79,7 @@ static int sja1105_change_rxtstamping(struct sja1105_private *priv,
priv->tagger_data.stampable_skb = NULL;
}
ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
return sja1105_static_config_reload(priv, SJA1105_RX_HWTSTAMPING);
@@ -397,7 +398,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
*shwt = (struct skb_shared_hwtstamps) {0};
- ts = SJA1105_SKB_CB(skb)->meta_tstamp;
+ ts = SJA1105_SKB_CB(skb)->tstamp;
ts = sja1105_tstamp_reconstruct(ds, ticks, ts);
shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
@@ -413,9 +414,7 @@ static long sja1105_rxtstamp_work(struct ptp_clock_info *ptp)
return -1;
}
-/* Called from dsa_skb_defer_rx_timestamp */
-bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
- struct sk_buff *skb, unsigned int type)
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
@@ -431,6 +430,89 @@ bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
return true;
}
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
+ u64 ts = SJA1105_SKB_CB(skb)->tstamp;
+
+ *shwt = (struct skb_shared_hwtstamps) {0};
+
+ shwt->hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(ts));
+
+ /* Don't defer */
+ return false;
+}
+
+/* Called from dsa_skb_defer_rx_timestamp */
+bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct sja1105_private *priv = ds->priv;
+
+ return priv->info->rxtstamp(ds, port, skb);
+}
+
+void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, u8 ts_id,
+ enum sja1110_meta_tstamp dir, u64 tstamp)
+{
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sk_buff *skb, *skb_tmp, *skb_match = NULL;
+ struct skb_shared_hwtstamps shwt = {0};
+
+ /* We don't care about RX timestamps on the CPU port */
+ if (dir == SJA1110_META_TSTAMP_RX)
+ return;
+
+ spin_lock(&ptp_data->skb_txtstamp_queue.lock);
+
+ skb_queue_walk_safe(&ptp_data->skb_txtstamp_queue, skb, skb_tmp) {
+ if (SJA1105_SKB_CB(skb)->ts_id != ts_id)
+ continue;
+
+ __skb_unlink(skb, &ptp_data->skb_txtstamp_queue);
+ skb_match = skb;
+
+ break;
+ }
+
+ spin_unlock(&ptp_data->skb_txtstamp_queue.lock);
+
+ if (WARN_ON(!skb_match))
+ return;
+
+ shwt.hwtstamp = ns_to_ktime(sja1105_ticks_to_ns(tstamp));
+ skb_complete_tx_timestamp(skb_match, &shwt);
+}
+EXPORT_SYMBOL_GPL(sja1110_process_meta_tstamp);
+
+/* In addition to cloning the skb which is done by the common
+ * sja1105_port_txtstamp, we need to generate a timestamp ID and save the
+ * packet to the TX timestamping queue.
+ */
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct sk_buff *clone = SJA1105_SKB_CB(skb)->clone;
+ struct sja1105_private *priv = ds->priv;
+ struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
+ struct sja1105_port *sp = &priv->ports[port];
+ u8 ts_id;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ spin_lock(&sp->data->meta_lock);
+
+ ts_id = sp->data->ts_id;
+ /* Deal automatically with 8-bit wraparound */
+ sp->data->ts_id++;
+
+ SJA1105_SKB_CB(clone)->ts_id = ts_id;
+
+ spin_unlock(&sp->data->meta_lock);
+
+ skb_queue_tail(&ptp_data->skb_txtstamp_queue, clone);
+}
+
/* Called from dsa_skb_tx_timestamp. This callback is just to clone
* the skb and have it available in SJA1105_SKB_CB in the .port_deferred_xmit
* callback, where we will timestamp it synchronously.
@@ -449,6 +531,9 @@ void sja1105_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
return;
SJA1105_SKB_CB(skb)->clone = clone;
+
+ if (priv->info->txtstamp)
+ priv->info->txtstamp(ds, port, skb);
}
static int sja1105_ptp_reset(struct dsa_switch *ds)
@@ -865,7 +950,10 @@ int sja1105_ptp_clock_register(struct dsa_switch *ds)
.n_per_out = 1,
};
+ /* Only used on SJA1105 */
skb_queue_head_init(&ptp_data->skb_rxtstamp_queue);
+ /* Only used on SJA1110 */
+ skb_queue_head_init(&ptp_data->skb_txtstamp_queue);
spin_lock_init(&tagger_data->meta_lock);
ptp_data->clock = ptp_clock_register(&ptp_data->caps, ds->dev);
@@ -890,6 +978,7 @@ void sja1105_ptp_clock_unregister(struct dsa_switch *ds)
del_timer_sync(&ptp_data->extts_timer);
ptp_cancel_worker_sync(ptp_data->clock);
+ skb_queue_purge(&ptp_data->skb_txtstamp_queue);
skb_queue_purge(&ptp_data->skb_rxtstamp_queue);
ptp_clock_unregister(ptp_data->clock);
ptp_data->clock = NULL;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 34f97f58a355..3c874bb4c17b 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -75,7 +75,12 @@ struct sja1105_ptp_cmd {
struct sja1105_ptp_data {
struct timer_list extts_timer;
+ /* Used only on SJA1105 to reconstruct partial timestamps */
struct sk_buff_head skb_rxtstamp_queue;
+ /* Used on SJA1110 where meta frames are generated only for
+ * 2-step TX timestamps
+ */
+ struct sk_buff_head skb_txtstamp_queue;
struct ptp_clock_info caps;
struct ptp_clock *clock;
struct sja1105_ptp_cmd cmd;
@@ -122,6 +127,10 @@ int __sja1105_ptp_adjtime(struct dsa_switch *ds, s64 delta);
int sja1105_ptp_commit(struct dsa_switch *ds, struct sja1105_ptp_cmd *cmd,
sja1105_spi_rw_mode_t rw);
+bool sja1105_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+bool sja1110_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void sja1110_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+
#else
struct sja1105_ptp_cmd;
@@ -184,6 +193,10 @@ static inline int sja1105_ptp_commit(struct dsa_switch *ds,
#define sja1105_hwtstamp_set NULL
+#define sja1105_rxtstamp NULL
+#define sja1110_rxtstamp NULL
+#define sja1110_txtstamp NULL
+
#endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */
#endif /* _SJA1105_PTP_H */
diff --git a/drivers/net/dsa/sja1105/sja1105_sgmii.h b/drivers/net/dsa/sja1105/sja1105_sgmii.h
deleted file mode 100644
index 24d9bc046e70..000000000000
--- a/drivers/net/dsa/sja1105/sja1105_sgmii.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright 2020, NXP Semiconductors
- */
-#ifndef _SJA1105_SGMII_H
-#define _SJA1105_SGMII_H
-
-#define SJA1105_SGMII_PORT 4
-
-/* DIGITAL_CONTROL_1 (address 1f8000h) */
-#define SJA1105_DC1 0x8000
-#define SJA1105_DC1_VS_RESET BIT(15)
-#define SJA1105_DC1_REMOTE_LOOPBACK BIT(14)
-#define SJA1105_DC1_EN_VSMMD1 BIT(13)
-#define SJA1105_DC1_POWER_SAVE BIT(11)
-#define SJA1105_DC1_CLOCK_STOP_EN BIT(10)
-#define SJA1105_DC1_MAC_AUTO_SW BIT(9)
-#define SJA1105_DC1_INIT BIT(8)
-#define SJA1105_DC1_TX_DISABLE BIT(4)
-#define SJA1105_DC1_AUTONEG_TIMER_OVRR BIT(3)
-#define SJA1105_DC1_BYP_POWERUP BIT(1)
-#define SJA1105_DC1_PHY_MODE_CONTROL BIT(0)
-
-/* DIGITAL_CONTROL_2 register (address 1f80E1h) */
-#define SJA1105_DC2 0x80e1
-#define SJA1105_DC2_TX_POL_INV_DISABLE BIT(4)
-#define SJA1105_DC2_RX_POL_INV BIT(0)
-
-/* DIGITAL_ERROR_CNT register (address 1f80E2h) */
-#define SJA1105_DEC 0x80e2
-#define SJA1105_DEC_ICG_EC_ENA BIT(4)
-#define SJA1105_DEC_CLEAR_ON_READ BIT(0)
-
-/* AUTONEG_CONTROL register (address 1f8001h) */
-#define SJA1105_AC 0x8001
-#define SJA1105_AC_MII_CONTROL BIT(8)
-#define SJA1105_AC_SGMII_LINK BIT(4)
-#define SJA1105_AC_PHY_MODE BIT(3)
-#define SJA1105_AC_AUTONEG_MODE(x) (((x) << 1) & GENMASK(2, 1))
-#define SJA1105_AC_AUTONEG_MODE_SGMII SJA1105_AC_AUTONEG_MODE(2)
-
-/* AUTONEG_INTR_STATUS register (address 1f8002h) */
-#define SJA1105_AIS 0x8002
-#define SJA1105_AIS_LINK_STATUS(x) (!!((x) & BIT(4)))
-#define SJA1105_AIS_SPEED(x) (((x) & GENMASK(3, 2)) >> 2)
-#define SJA1105_AIS_DUPLEX_MODE(x) (!!((x) & BIT(1)))
-#define SJA1105_AIS_COMPLETE(x) (!!((x) & BIT(0)))
-
-/* DEBUG_CONTROL register (address 1f8005h) */
-#define SJA1105_DC 0x8005
-#define SJA1105_DC_SUPPRESS_LOS BIT(4)
-#define SJA1105_DC_RESTART_SYNC BIT(0)
-
-#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index df3a780e9dcc..4aed16d23f21 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -7,8 +7,6 @@
#include <linux/packing.h>
#include "sja1105.h"
-#define SJA1105_SIZE_RESET_CMD 4
-
struct sja1105_chunk {
u8 *buf;
size_t len;
@@ -179,28 +177,30 @@ static int sja1105et_reset_cmd(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
- const int size = SJA1105_SIZE_RESET_CMD;
- u64 cold_rst = 1;
-
- sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
+ u32 cold_reset = BIT(3);
- return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
- SJA1105_SIZE_RESET_CMD);
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
}
static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
{
struct sja1105_private *priv = ds->priv;
const struct sja1105_regs *regs = priv->info->regs;
- u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
- const int size = SJA1105_SIZE_RESET_CMD;
- u64 cold_rst = 1;
+ u32 cold_reset = BIT(2);
+
+ /* Cold reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &cold_reset, NULL);
+}
- sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
+static int sja1110_reset_cmd(struct dsa_switch *ds)
+{
+ struct sja1105_private *priv = ds->priv;
+ const struct sja1105_regs *regs = priv->info->regs;
+ u32 switch_reset = BIT(20);
- return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
- SJA1105_SIZE_RESET_CMD);
+ /* Switch core reset */
+ return sja1105_xfer_u32(priv, SPI_WRITE, regs->rgu, &switch_reset, NULL);
}
int sja1105_inhibit_tx(const struct sja1105_private *priv,
@@ -281,7 +281,8 @@ int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
char *final_header_ptr;
int crc_len;
- valid = sja1105_static_config_check_valid(config);
+ valid = sja1105_static_config_check_valid(config,
+ priv->info->max_frame_mem);
if (valid != SJA1105_CONFIG_OK) {
dev_err(&priv->spidev->dev,
sja1105_static_config_error_msg[valid]);
@@ -309,10 +310,10 @@ int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
int sja1105_static_config_upload(struct sja1105_private *priv)
{
- unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
struct sja1105_static_config *config = &priv->static_config;
const struct sja1105_regs *regs = priv->info->regs;
struct device *dev = &priv->spidev->dev;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_status status;
int rc, retries = RETRIES;
u8 *config_buf;
@@ -333,7 +334,7 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
* Tx on all ports and waiting for current packet to drain.
* Otherwise, the PHY will see an unterminated Ethernet packet.
*/
- rc = sja1105_inhibit_tx(priv, port_bitmap, true);
+ rc = sja1105_inhibit_tx(priv, GENMASK_ULL(ds->num_ports - 1, 0), true);
if (rc < 0) {
dev_err(dev, "Failed to inhibit Tx on ports\n");
rc = -ENXIO;
@@ -403,7 +404,7 @@ out:
return rc;
}
-static struct sja1105_regs sja1105et_regs = {
+static const struct sja1105_regs sja1105et_regs = {
.device_id = 0x0,
.prod_id = 0x100BC3,
.status = 0x1,
@@ -435,9 +436,11 @@ static struct sja1105_regs sja1105et_regs = {
.ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
.ptpclkrate = 0x1A,
.ptpclkcorp = 0x1D,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
};
-static struct sja1105_regs sja1105pqrs_regs = {
+static const struct sja1105_regs sja1105pqrs_regs = {
.device_id = 0x0,
.prod_id = 0x100BC3,
.status = 0x1,
@@ -449,7 +452,6 @@ static struct sja1105_regs sja1105pqrs_regs = {
.pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
.pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
.pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
- .sgmii = 0x1F0000,
.rmii_pll1 = 0x10000A,
.cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
.stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208},
@@ -473,6 +475,95 @@ static struct sja1105_regs sja1105pqrs_regs = {
.ptpclkrate = 0x1B,
.ptpclkcorp = 0x1E,
.ptpsyncts = 0x1F,
+ .mdio_100base_tx = SJA1105_RSV_ADDR,
+ .mdio_100base_t1 = SJA1105_RSV_ADDR,
+};
+
+static const struct sja1105_regs sja1110_regs = {
+ .device_id = SJA1110_SPI_ADDR(0x0),
+ .prod_id = SJA1110_ACU_ADDR(0xf00),
+ .status = SJA1110_SPI_ADDR(0x4),
+ .port_control = SJA1110_SPI_ADDR(0x50), /* actually INHIB_TX */
+ .vl_status = 0x10000,
+ .config = 0x020000,
+ .rgu = SJA1110_RGU_ADDR(0x100), /* Reset Control Register 0 */
+ /* Ports 2 and 3 are capable of xMII, but there isn't anything to
+ * configure in the CGU/ACU for them.
+ */
+ .pad_mii_tx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_rx = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .pad_mii_id = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1110_ACU_ADDR(0x18), SJA1110_ACU_ADDR(0x28),
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .rmii_pll1 = SJA1105_RSV_ADDR,
+ .cgu_idiv = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .stats[MAC] = {0x200, 0x202, 0x204, 0x206, 0x208, 0x20a,
+ 0x20c, 0x20e, 0x210, 0x212, 0x214},
+ .stats[HL1] = {0x400, 0x410, 0x420, 0x430, 0x440, 0x450,
+ 0x460, 0x470, 0x480, 0x490, 0x4a0},
+ .stats[HL2] = {0x600, 0x610, 0x620, 0x630, 0x640, 0x650,
+ 0x660, 0x670, 0x680, 0x690, 0x6a0},
+ .stats[ETHER] = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460, 0x1478,
+ 0x1490, 0x14a8, 0x14c0, 0x14d8, 0x14f0},
+ .mii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .mii_ext_rx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rgmii_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ref_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
+ .rmii_ext_tx_clk = {SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR},
+ .ptpschtm = SJA1110_SPI_ADDR(0x54),
+ .ptppinst = SJA1110_SPI_ADDR(0x5c),
+ .ptppindur = SJA1110_SPI_ADDR(0x64),
+ .ptp_control = SJA1110_SPI_ADDR(0x68),
+ .ptpclkval = SJA1110_SPI_ADDR(0x6c),
+ .ptpclkrate = SJA1110_SPI_ADDR(0x74),
+ .ptpclkcorp = SJA1110_SPI_ADDR(0x80),
+ .ptpsyncts = SJA1110_SPI_ADDR(0x84),
+ .mdio_100base_tx = 0x1c2400,
+ .mdio_100base_t1 = 0x1c1000,
+ .pcs_base = {SJA1105_RSV_ADDR, 0x1c1400, 0x1c1800, 0x1c1c00, 0x1c2000,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR,
+ SJA1105_RSV_ADDR, SJA1105_RSV_ADDR, SJA1105_RSV_ADDR},
};
const struct sja1105_info sja1105e_info = {
@@ -481,15 +572,30 @@ const struct sja1105_info sja1105e_info = {
.static_ops = sja1105e_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105E",
};
@@ -499,15 +605,30 @@ const struct sja1105_info sja1105t_info = {
.static_ops = sja1105t_table_ops,
.dyn_ops = sja1105et_dyn_ops,
.qinq_tpid = ETH_P_8021Q,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = false,
.ptp_ts_bits = 24,
.ptpegr_ts_bytes = 4,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
.reset_cmd = sja1105et_reset_cmd,
.fdb_add_cmd = sja1105et_fdb_add,
.fdb_del_cmd = sja1105et_fdb_del,
.ptp_cmd_packing = sja1105et_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105et_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105T",
};
@@ -517,16 +638,31 @@ const struct sja1105_info sja1105p_info = {
.static_ops = sja1105p_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105P",
};
@@ -536,16 +672,31 @@ const struct sja1105_info sja1105q_info = {
.static_ops = sja1105q_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
.regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
.name = "SJA1105Q",
};
@@ -555,16 +706,34 @@ const struct sja1105_info sja1105r_info = {
.static_ops = sja1105r_table_ops,
.dyn_ops = sja1105pqrs_dyn_ops,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
.regs = &sja1105pqrs_regs,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
.name = "SJA1105R",
};
@@ -575,14 +744,236 @@ const struct sja1105_info sja1105s_info = {
.dyn_ops = sja1105pqrs_dyn_ops,
.regs = &sja1105pqrs_regs,
.qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1105,
.can_limit_mcast_flood = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1105_MAX_FRAME_MEMORY,
+ .num_ports = SJA1105_NUM_PORTS,
.num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
.setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
.reset_cmd = sja1105pqrs_reset_cmd,
.fdb_add_cmd = sja1105pqrs_fdb_add,
.fdb_del_cmd = sja1105pqrs_fdb_del,
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1105_rxtstamp,
+ .clocking_setup = sja1105_clocking_setup,
+ .pcs_mdio_read = sja1105_pcs_mdio_read,
+ .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 3,
+ [SJA1105_SPEED_100MBPS] = 2,
+ [SJA1105_SPEED_1000MBPS] = 1,
+ [SJA1105_SPEED_2500MBPS] = 0, /* Not supported */
+ },
+ .supports_mii = {true, true, true, true, true},
+ .supports_rmii = {true, true, true, true, true},
+ .supports_rgmii = {true, true, true, true, true},
+ .supports_sgmii = {false, false, false, false, true},
.name = "SJA1105S",
};
+
+const struct sja1105_info sja1110a_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110A_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .clocking_setup = sja1110_clocking_setup,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, true},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1},
+ .name = "SJA1110A",
+};
+
+const struct sja1105_info sja1110b_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110B_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .clocking_setup = sja1110_clocking_setup,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, true, true, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY},
+ .name = "SJA1110B",
+};
+
+const struct sja1105_info sja1110c_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110C_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .clocking_setup = sja1110_clocking_setup,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, true, true, true, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, true, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, false, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_PHY_BASE_TX,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110C",
+};
+
+const struct sja1105_info sja1110d_info = {
+ .device_id = SJA1110_DEVICE_ID,
+ .part_no = SJA1110D_PART_NO,
+ .static_ops = sja1110_table_ops,
+ .dyn_ops = sja1110_dyn_ops,
+ .regs = &sja1110_regs,
+ .qinq_tpid = ETH_P_8021AD,
+ .tag_proto = DSA_TAG_PROTO_SJA1110,
+ .can_limit_mcast_flood = true,
+ .multiple_cascade_ports = true,
+ .ptp_ts_bits = 32,
+ .ptpegr_ts_bytes = 8,
+ .max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
+ .num_ports = SJA1110_NUM_PORTS,
+ .num_cbs_shapers = SJA1110_MAX_CBS_COUNT,
+ .setup_rgmii_delay = sja1110_setup_rgmii_delay,
+ .reset_cmd = sja1110_reset_cmd,
+ .fdb_add_cmd = sja1105pqrs_fdb_add,
+ .fdb_del_cmd = sja1105pqrs_fdb_del,
+ .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
+ .rxtstamp = sja1110_rxtstamp,
+ .txtstamp = sja1110_txtstamp,
+ .clocking_setup = sja1110_clocking_setup,
+ .pcs_mdio_read = sja1110_pcs_mdio_read,
+ .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .port_speed = {
+ [SJA1105_SPEED_AUTO] = 0,
+ [SJA1105_SPEED_10MBPS] = 4,
+ [SJA1105_SPEED_100MBPS] = 3,
+ [SJA1105_SPEED_1000MBPS] = 2,
+ [SJA1105_SPEED_2500MBPS] = 1,
+ },
+ .supports_mii = {true, false, true, false, false,
+ true, true, true, false, false, false},
+ .supports_rmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_rgmii = {false, false, true, false, false,
+ false, false, false, false, false, false},
+ .supports_sgmii = {false, true, true, true, true,
+ false, false, false, false, false, false},
+ .supports_2500basex = {false, false, false, true, true,
+ false, false, false, false, false, false},
+ .internal_phy = {SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY, SJA1105_PHY_BASE_T1,
+ SJA1105_PHY_BASE_T1, SJA1105_PHY_BASE_T1,
+ SJA1105_NO_PHY, SJA1105_NO_PHY,
+ SJA1105_NO_PHY},
+ .name = "SJA1110D",
+};
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index a8efb7fac395..1491b72008f3 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -180,6 +180,43 @@ size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_general_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY;
+
+ sja1105_packing(buf, &entry->vllupformat, 447, 447, size, op);
+ sja1105_packing(buf, &entry->mirr_ptacu, 446, 446, size, op);
+ sja1105_packing(buf, &entry->switchid, 445, 442, size, op);
+ sja1105_packing(buf, &entry->hostprio, 441, 439, size, op);
+ sja1105_packing(buf, &entry->mac_fltres1, 438, 391, size, op);
+ sja1105_packing(buf, &entry->mac_fltres0, 390, 343, size, op);
+ sja1105_packing(buf, &entry->mac_flt1, 342, 295, size, op);
+ sja1105_packing(buf, &entry->mac_flt0, 294, 247, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt1, 246, 246, size, op);
+ sja1105_packing(buf, &entry->incl_srcpt0, 245, 245, size, op);
+ sja1105_packing(buf, &entry->send_meta1, 244, 244, size, op);
+ sja1105_packing(buf, &entry->send_meta0, 243, 243, size, op);
+ sja1105_packing(buf, &entry->casc_port, 242, 232, size, op);
+ sja1105_packing(buf, &entry->host_port, 231, 228, size, op);
+ sja1105_packing(buf, &entry->mirr_port, 227, 224, size, op);
+ sja1105_packing(buf, &entry->vlmarker, 223, 192, size, op);
+ sja1105_packing(buf, &entry->vlmask, 191, 160, size, op);
+ sja1105_packing(buf, &entry->tpid2, 159, 144, size, op);
+ sja1105_packing(buf, &entry->ignore2stf, 143, 143, size, op);
+ sja1105_packing(buf, &entry->tpid, 142, 127, size, op);
+ sja1105_packing(buf, &entry->queue_ts, 126, 126, size, op);
+ sja1105_packing(buf, &entry->egrmirrvid, 125, 114, size, op);
+ sja1105_packing(buf, &entry->egrmirrpcp, 113, 111, size, op);
+ sja1105_packing(buf, &entry->egrmirrdei, 110, 110, size, op);
+ sja1105_packing(buf, &entry->replay_port, 109, 106, size, op);
+ sja1105_packing(buf, &entry->tdmaconfigidx, 70, 67, size, op);
+ sja1105_packing(buf, &entry->header_type, 64, 49, size, op);
+ sja1105_packing(buf, &entry->tte_en, 16, 16, size, op);
+ return size;
+}
+
static size_t
sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -195,6 +232,20 @@ sja1105_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ sja1105_packing(buf, &entry->max_dynp, 95, 93, size, op);
+ for (i = 0, offset = 5; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->part_spc[i],
+ offset + 10, offset + 0, size, op);
+ return size;
+}
+
size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -211,6 +262,27 @@ size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_FORWARDING_ENTRY;
+ int offset, i;
+
+ if (entry->type_egrpcp2outputq) {
+ for (i = 0, offset = 31; i < SJA1110_NUM_PORTS;
+ i++, offset += 3) {
+ sja1105_packing(buf, &entry->vlan_pmap[i],
+ offset + 2, offset + 0, size, op);
+ }
+ } else {
+ sja1105_packing(buf, &entry->bc_domain, 63, 53, size, op);
+ sja1105_packing(buf, &entry->reach_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->fl_domain, 41, 31, size, op);
+ }
+ return size;
+}
+
static size_t
sja1105et_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -249,6 +321,28 @@ size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_params_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 70; i < SJA1110_NUM_PORTS; i++, offset += 11)
+ sja1105_packing(buf, &entry->maxaddrp[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->maxage, 69, 55, size, op);
+ sja1105_packing(buf, &entry->start_dynspc, 54, 45, size, op);
+ sja1105_packing(buf, &entry->drpnolearn, 44, 34, size, op);
+ sja1105_packing(buf, &entry->shared_learn, 33, 33, size, op);
+ sja1105_packing(buf, &entry->no_enf_hostprt, 32, 32, size, op);
+ sja1105_packing(buf, &entry->no_mgmt_learn, 31, 31, size, op);
+ sja1105_packing(buf, &entry->use_static, 30, 30, size, op);
+ sja1105_packing(buf, &entry->owr_dyn, 29, 29, size, op);
+ sja1105_packing(buf, &entry->learn_once, 28, 28, size, op);
+ return size;
+}
+
size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -291,6 +385,36 @@ size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_L2_LOOKUP_ENTRY;
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+
+ if (entry->lockeds) {
+ sja1105_packing(buf, &entry->trap, 168, 168, size, op);
+ sja1105_packing(buf, &entry->mirrvlan, 167, 156, size, op);
+ sja1105_packing(buf, &entry->takets, 155, 155, size, op);
+ sja1105_packing(buf, &entry->mirr, 154, 154, size, op);
+ sja1105_packing(buf, &entry->retag, 153, 153, size, op);
+ } else {
+ sja1105_packing(buf, &entry->touched, 168, 168, size, op);
+ sja1105_packing(buf, &entry->age, 167, 153, size, op);
+ }
+ sja1105_packing(buf, &entry->mask_iotag, 152, 152, size, op);
+ sja1105_packing(buf, &entry->mask_vlanid, 151, 140, size, op);
+ sja1105_packing(buf, &entry->mask_macaddr, 139, 92, size, op);
+ sja1105_packing(buf, &entry->mask_srcport, 91, 88, size, op);
+ sja1105_packing(buf, &entry->iotag, 87, 87, size, op);
+ sja1105_packing(buf, &entry->vlanid, 86, 75, size, op);
+ sja1105_packing(buf, &entry->macaddr, 74, 27, size, op);
+ sja1105_packing(buf, &entry->srcport, 26, 23, size, op);
+ sja1105_packing(buf, &entry->destports, 22, 12, size, op);
+ sja1105_packing(buf, &entry->enfport, 11, 11, size, op);
+ sja1105_packing(buf, &entry->index, 10, 1, size, op);
+ return size;
+}
+
static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -305,6 +429,20 @@ static size_t sja1105_l2_policing_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_L2_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->sharindx, 63, 57, size, op);
+ sja1105_packing(buf, &entry->smax, 56, 39, size, op);
+ sja1105_packing(buf, &entry->rate, 38, 21, size, op);
+ sja1105_packing(buf, &entry->maxlen, 20, 10, size, op);
+ sja1105_packing(buf, &entry->partition, 9, 7, size, op);
+ return size;
+}
+
static size_t sja1105et_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -373,6 +511,40 @@ size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY;
+ struct sja1105_mac_config_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 104; i < 8; i++, offset += 19) {
+ sja1105_packing(buf, &entry->enabled[i],
+ offset + 0, offset + 0, size, op);
+ sja1105_packing(buf, &entry->base[i],
+ offset + 9, offset + 1, size, op);
+ sja1105_packing(buf, &entry->top[i],
+ offset + 18, offset + 10, size, op);
+ }
+ sja1105_packing(buf, &entry->speed, 98, 96, size, op);
+ sja1105_packing(buf, &entry->tp_delin, 95, 80, size, op);
+ sja1105_packing(buf, &entry->tp_delout, 79, 64, size, op);
+ sja1105_packing(buf, &entry->maxage, 63, 56, size, op);
+ sja1105_packing(buf, &entry->vlanprio, 55, 53, size, op);
+ sja1105_packing(buf, &entry->vlanid, 52, 41, size, op);
+ sja1105_packing(buf, &entry->ing_mirr, 40, 40, size, op);
+ sja1105_packing(buf, &entry->egr_mirr, 39, 39, size, op);
+ sja1105_packing(buf, &entry->drpnona664, 38, 38, size, op);
+ sja1105_packing(buf, &entry->drpdtag, 37, 37, size, op);
+ sja1105_packing(buf, &entry->drpuntag, 34, 34, size, op);
+ sja1105_packing(buf, &entry->retag, 33, 33, size, op);
+ sja1105_packing(buf, &entry->dyn_learn, 32, 32, size, op);
+ sja1105_packing(buf, &entry->egress, 31, 31, size, op);
+ sja1105_packing(buf, &entry->ingress, 30, 30, size, op);
+ sja1105_packing(buf, &entry->ifg, 10, 5, size, op);
+ return size;
+}
+
static size_t
sja1105_schedule_entry_points_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -398,6 +570,19 @@ sja1105_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t
+sja1110_schedule_entry_points_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_entry_points_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY;
+
+ sja1105_packing(buf, &entry->subschindx, 63, 61, size, op);
+ sja1105_packing(buf, &entry->delta, 60, 43, size, op);
+ sja1105_packing(buf, &entry->address, 42, 31, size, op);
+ return size;
+}
+
static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -411,6 +596,19 @@ static size_t sja1105_schedule_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_schedule_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_schedule_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 0; i < 8; i++, offset += 12)
+ sja1105_packing(buf, &entry->subscheind[i],
+ offset + 11, offset + 0, size, op);
+ return size;
+}
+
static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -430,6 +628,25 @@ static size_t sja1105_schedule_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_schedule_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_SCHEDULE_ENTRY;
+ struct sja1105_schedule_entry *entry = entry_ptr;
+
+ sja1105_packing(buf, &entry->winstindex, 95, 84, size, op);
+ sja1105_packing(buf, &entry->winend, 83, 83, size, op);
+ sja1105_packing(buf, &entry->winst, 82, 82, size, op);
+ sja1105_packing(buf, &entry->destports, 81, 71, size, op);
+ sja1105_packing(buf, &entry->setvalid, 70, 70, size, op);
+ sja1105_packing(buf, &entry->txen, 69, 69, size, op);
+ sja1105_packing(buf, &entry->resmedia_en, 68, 68, size, op);
+ sja1105_packing(buf, &entry->resmedia, 67, 60, size, op);
+ sja1105_packing(buf, &entry->vlindex, 59, 48, size, op);
+ sja1105_packing(buf, &entry->delta, 47, 30, size, op);
+ return size;
+}
+
static size_t
sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
@@ -445,6 +662,21 @@ sja1105_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t
+sja1110_vl_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_params_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < 8; i++, offset += 11)
+ sja1105_packing(buf, &entry->partspc[i],
+ offset + 10, offset + 0, size, op);
+ sja1105_packing(buf, &entry->debugen, 7, 7, size, op);
+ return size;
+}
+
static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -458,6 +690,19 @@ static size_t sja1105_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
return size;
}
+static size_t sja1110_vl_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_forwarding_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_FORWARDING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 31, 31, size, op);
+ sja1105_packing(buf, &entry->priority, 30, 28, size, op);
+ sja1105_packing(buf, &entry->partition, 27, 25, size, op);
+ sja1105_packing(buf, &entry->destports, 24, 14, size, op);
+ return size;
+}
+
size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -492,6 +737,40 @@ size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_LOOKUP_ENTRY;
+
+ if (entry->format == SJA1105_VL_FORMAT_PSFP) {
+ /* Interpreting vllupformat as 0 */
+ sja1105_packing(buf, &entry->destports,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->iscritical,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->macaddr,
+ 82, 35, size, op);
+ sja1105_packing(buf, &entry->vlanid,
+ 34, 23, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ sja1105_packing(buf, &entry->vlanprior,
+ 18, 16, size, op);
+ } else {
+ /* Interpreting vllupformat as 1 */
+ sja1105_packing(buf, &entry->egrmirr,
+ 94, 84, size, op);
+ sja1105_packing(buf, &entry->ingrmirr,
+ 83, 83, size, op);
+ sja1105_packing(buf, &entry->vlid,
+ 50, 35, size, op);
+ sja1105_packing(buf, &entry->port,
+ 22, 19, size, op);
+ }
+ return size;
+}
+
static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -508,6 +787,22 @@ static size_t sja1105_vl_policing_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vl_policing_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_VL_POLICING_ENTRY;
+
+ sja1105_packing(buf, &entry->type, 63, 63, size, op);
+ sja1105_packing(buf, &entry->maxlen, 62, 52, size, op);
+ sja1105_packing(buf, &entry->sharindx, 51, 40, size, op);
+ if (entry->type == 0) {
+ sja1105_packing(buf, &entry->bag, 41, 28, size, op);
+ sja1105_packing(buf, &entry->jitter, 27, 18, size, op);
+ }
+ return size;
+}
+
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -523,6 +818,22 @@ size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_vlan_lookup_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY;
+
+ sja1105_packing(buf, &entry->ving_mirr, 95, 85, size, op);
+ sja1105_packing(buf, &entry->vegr_mirr, 84, 74, size, op);
+ sja1105_packing(buf, &entry->vmemb_port, 73, 63, size, op);
+ sja1105_packing(buf, &entry->vlan_bc, 62, 52, size, op);
+ sja1105_packing(buf, &entry->tag_port, 51, 41, size, op);
+ sja1105_packing(buf, &entry->type_entry, 40, 39, size, op);
+ sja1105_packing(buf, &entry->vlanid, 38, 27, size, op);
+ return size;
+}
+
static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -539,6 +850,24 @@ static size_t sja1105_xmii_params_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ const size_t size = SJA1110_SIZE_XMII_PARAMS_ENTRY;
+ struct sja1105_xmii_params_entry *entry = entry_ptr;
+ int offset, i;
+
+ for (i = 0, offset = 20; i < SJA1110_NUM_PORTS; i++, offset += 4) {
+ sja1105_packing(buf, &entry->xmii_mode[i],
+ offset + 1, offset + 0, size, op);
+ sja1105_packing(buf, &entry->phy_mac[i],
+ offset + 2, offset + 2, size, op);
+ sja1105_packing(buf, &entry->special[i],
+ offset + 3, offset + 3, size, op);
+ }
+ return size;
+}
+
size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -555,6 +884,36 @@ size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
return size;
}
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_retagging_entry *entry = entry_ptr;
+ const size_t size = SJA1105_SIZE_RETAGGING_ENTRY;
+
+ sja1105_packing(buf, &entry->egr_port, 63, 53, size, op);
+ sja1105_packing(buf, &entry->ing_port, 52, 42, size, op);
+ sja1105_packing(buf, &entry->vlan_ing, 41, 30, size, op);
+ sja1105_packing(buf, &entry->vlan_egr, 29, 18, size, op);
+ sja1105_packing(buf, &entry->do_not_learn, 17, 17, size, op);
+ sja1105_packing(buf, &entry->use_dest_ports, 16, 16, size, op);
+ sja1105_packing(buf, &entry->destports, 15, 5, size, op);
+ return size;
+}
+
+static size_t sja1110_pcp_remapping_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1110_pcp_remapping_entry *entry = entry_ptr;
+ const size_t size = SJA1110_SIZE_PCP_REMAPPING_ENTRY;
+ int offset, i;
+
+ for (i = 0, offset = 8; i < SJA1105_NUM_TC; i++, offset += 3)
+ sja1105_packing(buf, &entry->egrpcp[i],
+ offset + 2, offset + 0, size, op);
+
+ return size;
+}
+
size_t sja1105_table_header_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
@@ -619,6 +978,7 @@ static u64 blk_id_map[BLK_IDX_MAX] = {
[BLK_IDX_GENERAL_PARAMS] = BLKID_GENERAL_PARAMS,
[BLK_IDX_RETAGGING] = BLKID_RETAGGING,
[BLK_IDX_XMII_PARAMS] = BLKID_XMII_PARAMS,
+ [BLK_IDX_PCP_REMAPPING] = BLKID_PCP_REMAPPING,
};
const char *sja1105_static_config_error_msg[] = {
@@ -657,11 +1017,11 @@ const char *sja1105_static_config_error_msg[] = {
};
static sja1105_config_valid_t
-static_config_check_memory_size(const struct sja1105_table *tables)
+static_config_check_memory_size(const struct sja1105_table *tables, int max_mem)
{
const struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
const struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
- int i, max_mem, mem = 0;
+ int i, mem = 0;
l2_fwd_params = tables[BLK_IDX_L2_FORWARDING_PARAMS].entries;
@@ -675,9 +1035,7 @@ static_config_check_memory_size(const struct sja1105_table *tables)
}
if (tables[BLK_IDX_RETAGGING].entry_count)
- max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
- else
- max_mem = SJA1105_MAX_FRAME_MEMORY;
+ max_mem -= SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD;
if (mem > max_mem)
return SJA1105_OVERCOMMITTED_FRAME_MEMORY;
@@ -686,7 +1044,8 @@ static_config_check_memory_size(const struct sja1105_table *tables)
}
sja1105_config_valid_t
-sja1105_static_config_check_valid(const struct sja1105_static_config *config)
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem)
{
const struct sja1105_table *tables = config->tables;
#define IS_FULL(blk_idx) \
@@ -754,7 +1113,7 @@ sja1105_static_config_check_valid(const struct sja1105_static_config *config)
if (!IS_FULL(BLK_IDX_XMII_PARAMS))
return SJA1105_MISSING_XMII_TABLE;
- return static_config_check_memory_size(tables);
+ return static_config_check_memory_size(tables, max_mem);
#undef IS_FULL
}
@@ -1401,6 +1760,130 @@ const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX] = {
},
};
+/* SJA1110A: Third generation */
+const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX] = {
+ [BLK_IDX_SCHEDULE] = {
+ .packing = sja1110_schedule_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY,
+ .max_entry_count = SJA1110_MAX_SCHEDULE_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS] = {
+ .packing = sja1110_schedule_entry_points_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_entry),
+ .packed_entry_size = SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT,
+ },
+ [BLK_IDX_VL_LOOKUP] = {
+ .packing = sja1110_vl_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_lookup_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_LOOKUP_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_LOOKUP_COUNT,
+ },
+ [BLK_IDX_VL_POLICING] = {
+ .packing = sja1110_vl_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_POLICING_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING] = {
+ .packing = sja1110_vl_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_VL_FORWARDING_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP] = {
+ .packing = sja1110_l2_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_POLICING] = {
+ .packing = sja1110_l2_policing_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_policing_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_POLICING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_POLICING_COUNT,
+ },
+ [BLK_IDX_VLAN_LOOKUP] = {
+ .packing = sja1110_vlan_lookup_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vlan_lookup_entry),
+ .packed_entry_size = SJA1110_SIZE_VLAN_LOOKUP_ENTRY,
+ .max_entry_count = SJA1105_MAX_VLAN_LOOKUP_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING] = {
+ .packing = sja1110_l2_forwarding_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_ENTRY,
+ .max_entry_count = SJA1110_MAX_L2_FORWARDING_COUNT,
+ },
+ [BLK_IDX_MAC_CONFIG] = {
+ .packing = sja1110_mac_config_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_mac_config_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY,
+ .max_entry_count = SJA1110_MAX_MAC_CONFIG_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_PARAMS] = {
+ .packing = sja1110_schedule_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_PARAMS_COUNT,
+ },
+ [BLK_IDX_SCHEDULE_ENTRY_POINTS_PARAMS] = {
+ .packing = sja1105_schedule_entry_points_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_schedule_entry_points_params_entry),
+ .packed_entry_size = SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT,
+ },
+ [BLK_IDX_VL_FORWARDING_PARAMS] = {
+ .packing = sja1110_vl_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_vl_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_LOOKUP_PARAMS] = {
+ .packing = sja1110_l2_lookup_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_lookup_params_entry),
+ .packed_entry_size = SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
+ },
+ [BLK_IDX_L2_FORWARDING_PARAMS] = {
+ .packing = sja1110_l2_forwarding_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_l2_forwarding_params_entry),
+ .packed_entry_size = SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
+ },
+ [BLK_IDX_AVB_PARAMS] = {
+ .packing = sja1105pqrs_avb_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_avb_params_entry),
+ .packed_entry_size = SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_AVB_PARAMS_COUNT,
+ },
+ [BLK_IDX_GENERAL_PARAMS] = {
+ .packing = sja1110_general_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_general_params_entry),
+ .packed_entry_size = SJA1110_SIZE_GENERAL_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT,
+ },
+ [BLK_IDX_RETAGGING] = {
+ .packing = sja1110_retagging_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_retagging_entry),
+ .packed_entry_size = SJA1105_SIZE_RETAGGING_ENTRY,
+ .max_entry_count = SJA1105_MAX_RETAGGING_COUNT,
+ },
+ [BLK_IDX_XMII_PARAMS] = {
+ .packing = sja1110_xmii_params_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1105_xmii_params_entry),
+ .packed_entry_size = SJA1110_SIZE_XMII_PARAMS_ENTRY,
+ .max_entry_count = SJA1105_MAX_XMII_PARAMS_COUNT,
+ },
+ [BLK_IDX_PCP_REMAPPING] = {
+ .packing = sja1110_pcp_remapping_entry_packing,
+ .unpacked_entry_size = sizeof(struct sja1110_pcp_remapping_entry),
+ .packed_entry_size = SJA1110_SIZE_PCP_REMAPPING_ENTRY,
+ .max_entry_count = SJA1110_MAX_PCP_REMAPPING_COUNT,
+ },
+};
+
int sja1105_static_config_init(struct sja1105_static_config *config,
const struct sja1105_table_ops *static_ops,
u64 device_id)
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index 779eb6840f05..bce0f5c03d0b 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -9,21 +9,30 @@
#include <linux/types.h>
#include <asm/types.h>
+#define SJA1105_NUM_PORTS 5
+#define SJA1110_NUM_PORTS 11
+#define SJA1105_MAX_NUM_PORTS SJA1110_NUM_PORTS
+#define SJA1105_NUM_TC 8
+
#define SJA1105_SIZE_SPI_MSG_HEADER 4
#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
#define SJA1105_SIZE_DEVICE_ID 4
#define SJA1105_SIZE_TABLE_HEADER 12
#define SJA1105_SIZE_SCHEDULE_ENTRY 8
+#define SJA1110_SIZE_SCHEDULE_ENTRY 12
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 4
+#define SJA1110_SIZE_SCHEDULE_ENTRY_POINTS_ENTRY 8
#define SJA1105_SIZE_VL_LOOKUP_ENTRY 12
#define SJA1105_SIZE_VL_POLICING_ENTRY 8
#define SJA1105_SIZE_VL_FORWARDING_ENTRY 4
#define SJA1105_SIZE_L2_POLICING_ENTRY 8
#define SJA1105_SIZE_VLAN_LOOKUP_ENTRY 8
+#define SJA1110_SIZE_VLAN_LOOKUP_ENTRY 12
#define SJA1105_SIZE_L2_FORWARDING_ENTRY 8
#define SJA1105_SIZE_L2_FORWARDING_PARAMS_ENTRY 12
#define SJA1105_SIZE_RETAGGING_ENTRY 8
#define SJA1105_SIZE_XMII_PARAMS_ENTRY 4
+#define SJA1110_SIZE_XMII_PARAMS_ENTRY 8
#define SJA1105_SIZE_SCHEDULE_PARAMS_ENTRY 12
#define SJA1105_SIZE_SCHEDULE_ENTRY_POINTS_PARAMS_ENTRY 4
#define SJA1105_SIZE_VL_FORWARDING_PARAMS_ENTRY 12
@@ -34,11 +43,15 @@
#define SJA1105ET_SIZE_AVB_PARAMS_ENTRY 12
#define SJA1105ET_SIZE_CBS_ENTRY 16
#define SJA1105PQRS_SIZE_L2_LOOKUP_ENTRY 20
+#define SJA1110_SIZE_L2_LOOKUP_ENTRY 24
#define SJA1105PQRS_SIZE_MAC_CONFIG_ENTRY 32
#define SJA1105PQRS_SIZE_L2_LOOKUP_PARAMS_ENTRY 16
+#define SJA1110_SIZE_L2_LOOKUP_PARAMS_ENTRY 28
#define SJA1105PQRS_SIZE_GENERAL_PARAMS_ENTRY 44
+#define SJA1110_SIZE_GENERAL_PARAMS_ENTRY 56
#define SJA1105PQRS_SIZE_AVB_PARAMS_ENTRY 16
#define SJA1105PQRS_SIZE_CBS_ENTRY 20
+#define SJA1110_SIZE_PCP_REMAPPING_ENTRY 4
/* UM10944.pdf Page 11, Table 2. Configuration Blocks */
enum {
@@ -61,6 +74,7 @@ enum {
BLKID_GENERAL_PARAMS = 0x11,
BLKID_RETAGGING = 0x12,
BLKID_CBS = 0x13,
+ BLKID_PCP_REMAPPING = 0x1C,
BLKID_XMII_PARAMS = 0x4E,
};
@@ -85,6 +99,7 @@ enum sja1105_blk_idx {
BLK_IDX_RETAGGING,
BLK_IDX_CBS,
BLK_IDX_XMII_PARAMS,
+ BLK_IDX_PCP_REMAPPING,
BLK_IDX_MAX,
/* Fake block indices that are only valid for dynamic access */
BLK_IDX_MGMT_ROUTE,
@@ -93,15 +108,22 @@ enum sja1105_blk_idx {
};
#define SJA1105_MAX_SCHEDULE_COUNT 1024
+#define SJA1110_MAX_SCHEDULE_COUNT 4096
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_COUNT 2048
#define SJA1105_MAX_VL_LOOKUP_COUNT 1024
+#define SJA1110_MAX_VL_LOOKUP_COUNT 4096
#define SJA1105_MAX_VL_POLICING_COUNT 1024
+#define SJA1110_MAX_VL_POLICING_COUNT 4096
#define SJA1105_MAX_VL_FORWARDING_COUNT 1024
+#define SJA1110_MAX_VL_FORWARDING_COUNT 4096
#define SJA1105_MAX_L2_LOOKUP_COUNT 1024
#define SJA1105_MAX_L2_POLICING_COUNT 45
+#define SJA1110_MAX_L2_POLICING_COUNT 110
#define SJA1105_MAX_VLAN_LOOKUP_COUNT 4096
#define SJA1105_MAX_L2_FORWARDING_COUNT 13
+#define SJA1110_MAX_L2_FORWARDING_COUNT 19
#define SJA1105_MAX_MAC_CONFIG_COUNT 5
+#define SJA1110_MAX_MAC_CONFIG_COUNT 11
#define SJA1105_MAX_SCHEDULE_PARAMS_COUNT 1
#define SJA1105_MAX_SCHEDULE_ENTRY_POINTS_PARAMS_COUNT 1
#define SJA1105_MAX_VL_FORWARDING_PARAMS_COUNT 1
@@ -113,21 +135,40 @@ enum sja1105_blk_idx {
#define SJA1105_MAX_AVB_PARAMS_COUNT 1
#define SJA1105ET_MAX_CBS_COUNT 10
#define SJA1105PQRS_MAX_CBS_COUNT 16
+#define SJA1110_MAX_CBS_COUNT 80
+#define SJA1110_MAX_PCP_REMAPPING_COUNT 11
#define SJA1105_MAX_FRAME_MEMORY 929
-#define SJA1105_MAX_FRAME_MEMORY_RETAGGING 910
+#define SJA1110_MAX_FRAME_MEMORY 1820
+#define SJA1105_FRAME_MEMORY_RETAGGING_OVERHEAD 19
#define SJA1105_VL_FRAME_MEMORY 100
#define SJA1105E_DEVICE_ID 0x9C00000Cull
#define SJA1105T_DEVICE_ID 0x9E00030Eull
#define SJA1105PR_DEVICE_ID 0xAF00030Eull
#define SJA1105QS_DEVICE_ID 0xAE00030Eull
+#define SJA1110_DEVICE_ID 0xB700030Full
#define SJA1105ET_PART_NO 0x9A83
#define SJA1105P_PART_NO 0x9A84
#define SJA1105Q_PART_NO 0x9A85
#define SJA1105R_PART_NO 0x9A86
#define SJA1105S_PART_NO 0x9A87
+#define SJA1110A_PART_NO 0x1110
+#define SJA1110B_PART_NO 0x1111
+#define SJA1110C_PART_NO 0x1112
+#define SJA1110D_PART_NO 0x1113
+
+#define SJA1110_ACU 0x1c4400
+#define SJA1110_RGU 0x1c6000
+#define SJA1110_CGU 0x1c6400
+
+#define SJA1110_SPI_ADDR(x) ((x) / 4)
+#define SJA1110_ACU_ADDR(x) (SJA1110_ACU + SJA1110_SPI_ADDR(x))
+#define SJA1110_CGU_ADDR(x) (SJA1110_CGU + SJA1110_SPI_ADDR(x))
+#define SJA1110_RGU_ADDR(x) (SJA1110_RGU + SJA1110_SPI_ADDR(x))
+
+#define SJA1105_RSV_ADDR 0xffffffffffffffffull
struct sja1105_schedule_entry {
u64 winstindex;
@@ -173,6 +214,10 @@ struct sja1105_general_params_entry {
u64 egrmirrpcp;
u64 egrmirrdei;
u64 replay_port;
+ /* SJA1110 only */
+ u64 tte_en;
+ u64 tdmaconfigidx;
+ u64 header_type;
};
struct sja1105_schedule_entry_points_entry {
@@ -193,6 +238,7 @@ struct sja1105_vlan_lookup_entry {
u64 vlan_bc;
u64 tag_port;
u64 vlanid;
+ u64 type_entry; /* SJA1110 only */
};
struct sja1105_l2_lookup_entry {
@@ -205,11 +251,17 @@ struct sja1105_l2_lookup_entry {
u64 mask_iotag;
u64 mask_vlanid;
u64 mask_macaddr;
+ u64 mask_srcport;
u64 iotag;
+ u64 srcport;
u64 lockeds;
union {
/* LOCKEDS=1: Static FDB entries */
struct {
+ /* TSREG is deprecated in SJA1110, TRAP is supported only
+ * in SJA1110.
+ */
+ u64 trap;
u64 tsreg;
u64 mirrvlan;
u64 takets;
@@ -225,7 +277,7 @@ struct sja1105_l2_lookup_entry {
};
struct sja1105_l2_lookup_params_entry {
- u64 maxaddrp[5]; /* P/Q/R/S only */
+ u64 maxaddrp[SJA1105_MAX_NUM_PORTS]; /* P/Q/R/S only */
u64 start_dynspc; /* P/Q/R/S only */
u64 drpnolearn; /* P/Q/R/S only */
u64 use_static; /* P/Q/R/S only */
@@ -243,7 +295,9 @@ struct sja1105_l2_forwarding_entry {
u64 bc_domain;
u64 reach_port;
u64 fl_domain;
- u64 vlan_pmap[8];
+ /* This is actually max(SJA1105_NUM_TC, SJA1105_MAX_NUM_PORTS) */
+ u64 vlan_pmap[SJA1105_MAX_NUM_PORTS];
+ bool type_egrpcp2outputq;
};
struct sja1105_l2_forwarding_params_entry {
@@ -298,8 +352,8 @@ struct sja1105_retagging_entry {
};
struct sja1105_cbs_entry {
- u64 port;
- u64 prio;
+ u64 port; /* Not used for SJA1110 */
+ u64 prio; /* Not used for SJA1110 */
u64 credit_hi;
u64 credit_lo;
u64 send_slope;
@@ -307,8 +361,19 @@ struct sja1105_cbs_entry {
};
struct sja1105_xmii_params_entry {
- u64 phy_mac[5];
- u64 xmii_mode[5];
+ u64 phy_mac[SJA1105_MAX_NUM_PORTS];
+ u64 xmii_mode[SJA1105_MAX_NUM_PORTS];
+ /* The SJA1110 insists being a snowflake, and requires SGMII,
+ * 2500base-x and internal MII ports connected to the 100base-TX PHY to
+ * set this bit. We set it unconditionally from the high-level logic,
+ * and only sja1110_xmii_params_entry_packing writes it to the static
+ * config. I have no better name for it than "special".
+ */
+ u64 special[SJA1105_MAX_NUM_PORTS];
+};
+
+struct sja1110_pcp_remapping_entry {
+ u64 egrpcp[SJA1105_NUM_TC];
};
enum {
@@ -389,6 +454,7 @@ extern const struct sja1105_table_ops sja1105p_table_ops[BLK_IDX_MAX];
extern const struct sja1105_table_ops sja1105q_table_ops[BLK_IDX_MAX];
extern const struct sja1105_table_ops sja1105r_table_ops[BLK_IDX_MAX];
extern const struct sja1105_table_ops sja1105s_table_ops[BLK_IDX_MAX];
+extern const struct sja1105_table_ops sja1110_table_ops[BLK_IDX_MAX];
size_t sja1105_table_header_packing(void *buf, void *hdr, enum packing_op op);
void
@@ -414,7 +480,8 @@ typedef enum {
extern const char *sja1105_static_config_error_msg[];
sja1105_config_valid_t
-sja1105_static_config_check_valid(const struct sja1105_static_config *config);
+sja1105_static_config_check_valid(const struct sja1105_static_config *config,
+ int max_mem);
void
sja1105_static_config_pack(void *buf, struct sja1105_static_config *config);
int sja1105_static_config_init(struct sja1105_static_config *config,
@@ -435,23 +502,47 @@ void sja1105_packing(void *buf, u64 *val, int start, int end,
/* Common implementations for the static and dynamic configs */
size_t sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_general_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_l2_lookup_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_l2_forwarding_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105et_l2_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_vlan_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105_retagging_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_retagging_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_mac_config_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_mac_config_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
size_t sja1105pqrs_avb_params_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
size_t sja1105_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op);
+size_t sja1110_vl_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_vl_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_xmii_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_policing_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
+size_t sja1110_l2_forwarding_params_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op);
#endif
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.c b/drivers/net/dsa/sja1105/sja1105_tas.c
index 31d8acff1f01..e6153848a950 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.c
+++ b/drivers/net/dsa/sja1105/sja1105_tas.c
@@ -27,7 +27,7 @@ static int sja1105_tas_set_runtime_params(struct sja1105_private *priv)
tas_data->enabled = false;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
const struct tc_taprio_qopt_offload *offload;
offload = tas_data->offload[port];
@@ -164,6 +164,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
struct sja1105_tas_data *tas_data = &priv->tas_data;
struct sja1105_gating_config *gating_cfg = &tas_data->gating_cfg;
struct sja1105_schedule_entry *schedule;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_table *table;
int schedule_start_idx;
s64 entry_point_delta;
@@ -207,7 +208,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
}
/* Figure out the dimensioning of the problem */
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (tas_data->offload[port]) {
num_entries += tas_data->offload[port]->num_entries;
num_cycles++;
@@ -269,7 +270,7 @@ int sja1105_init_scheduling(struct sja1105_private *priv)
schedule_entry_points_params->clksrc = SJA1105_TAS_CLKSRC_PTP;
schedule_entry_points_params->actsubsch = num_cycles - 1;
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
const struct tc_taprio_qopt_offload *offload;
/* Relative base time */
s64 rbt;
@@ -468,6 +469,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
size_t num_entries = gating_cfg->num_entries;
struct tc_taprio_qopt_offload *dummy;
+ struct dsa_switch *ds = priv->ds;
struct sja1105_gate_entry *e;
bool conflict;
int i = 0;
@@ -491,7 +493,7 @@ bool sja1105_gating_check_conflicts(struct sja1105_private *priv, int port,
if (port != -1) {
conflict = sja1105_tas_check_conflicts(priv, port, dummy);
} else {
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
conflict = sja1105_tas_check_conflicts(priv, port,
dummy);
if (conflict)
@@ -554,7 +556,7 @@ int sja1105_setup_tc_taprio(struct dsa_switch *ds, int port,
}
}
- for (other_port = 0; other_port < SJA1105_NUM_PORTS; other_port++) {
+ for (other_port = 0; other_port < ds->num_ports; other_port++) {
if (other_port == port)
continue;
@@ -885,7 +887,7 @@ void sja1105_tas_teardown(struct dsa_switch *ds)
cancel_work_sync(&priv->tas_data.tas_work);
- for (port = 0; port < SJA1105_NUM_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
offload = priv->tas_data.offload[port];
if (!offload)
continue;
diff --git a/drivers/net/dsa/sja1105/sja1105_tas.h b/drivers/net/dsa/sja1105/sja1105_tas.h
index 0c173ff51751..c05bd07e8221 100644
--- a/drivers/net/dsa/sja1105/sja1105_tas.h
+++ b/drivers/net/dsa/sja1105/sja1105_tas.h
@@ -39,7 +39,7 @@ struct sja1105_gating_config {
};
struct sja1105_tas_data {
- struct tc_taprio_qopt_offload *offload[SJA1105_NUM_PORTS];
+ struct tc_taprio_qopt_offload *offload[SJA1105_MAX_NUM_PORTS];
struct sja1105_gating_config gating_cfg;
enum sja1105_tas_state state;
enum sja1105_ptp_op last_op;
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index ffc4042b4502..f6e13e6c6a18 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -386,7 +386,7 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
if (rule->type != SJA1105_RULE_VL)
continue;
- for_each_set_bit(port, &rule->port_mask, SJA1105_NUM_PORTS) {
+ for_each_set_bit(port, &rule->port_mask, SJA1105_MAX_NUM_PORTS) {
vl_lookup[k].format = SJA1105_VL_FORMAT_PSFP;
vl_lookup[k].port = port;
vl_lookup[k].macaddr = rule->key.vl.dmac;
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index fde6e99274b6..130abb0f1438 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -79,6 +79,9 @@ static const struct xrs700x_mib xrs700x_mibs[] = {
XRS700X_MIB(XRS_EARLY_DROP_L, "early_drop", tx_dropped),
};
+static const u8 eth_hsrsup_addr[ETH_ALEN] = {
+ 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00};
+
static void xrs700x_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -329,6 +332,54 @@ static int xrs700x_port_add_bpdu_ipf(struct dsa_switch *ds, int port)
return 0;
}
+/* Add an inbound policy filter which matches the HSR/PRP supervision MAC
+ * range and forwards to the CPU port without discarding duplicates.
+ * This is required to correctly populate the HSR/PRP node_table.
+ * Leave the policy disabled, it will be enabled as needed.
+ */
+static int xrs700x_port_add_hsrsup_ipf(struct dsa_switch *ds, int port,
+ int fwdport)
+{
+ struct xrs700x *priv = ds->priv;
+ unsigned int val = 0;
+ int i = 0;
+ int ret;
+
+ /* Compare 40 bits of the destination MAC address. */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 40 << 2);
+ if (ret)
+ return ret;
+
+ /* match HSR/PRP supervision destination 01:15:4e:00:01:XX */
+ for (i = 0; i < sizeof(eth_hsrsup_addr); i += 2) {
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_0(port, 1) + i,
+ eth_hsrsup_addr[i] |
+ (eth_hsrsup_addr[i + 1] << 8));
+ if (ret)
+ return ret;
+ }
+
+ /* Mirror HSR/PRP supervision to CPU port */
+ for (i = 0; i < ds->num_ports; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ val |= BIT(i);
+ }
+
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_MIRROR(port, 1), val);
+ if (ret)
+ return ret;
+
+ if (fwdport >= 0)
+ val |= BIT(fwdport);
+
+ /* Allow must be set prevent duplicate discard */
+ ret = regmap_write(priv->regmap, XRS_ETH_ADDR_FWD_ALLOW(port, 1), val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int xrs700x_port_setup(struct dsa_switch *ds, int port)
{
bool cpu_port = dsa_is_cpu_port(ds, port);
@@ -511,6 +562,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
struct net_device *slave;
int ret, i, hsr_pair[2];
enum hsr_version ver;
+ bool fwd = false;
ret = hsr_get_version(hsr, &ver);
if (ret)
@@ -556,6 +608,7 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
if (ver == HSR_V1) {
val &= ~BIT(partner->index);
val &= ~BIT(port);
+ fwd = true;
}
val &= ~BIT(dsa_upstream_port(ds, port));
regmap_write(priv->regmap, XRS_PORT_FWD_MASK(partner->index), val);
@@ -565,6 +618,23 @@ static int xrs700x_hsr_join(struct dsa_switch *ds, int port,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+ /* Enable inbound policy which allows HSR/PRP supervision forwarding
+ * to the CPU port without discarding duplicates. Continue to
+ * forward to redundant ports when in HSR mode while discarding
+ * duplicates.
+ */
+ ret = xrs700x_port_add_hsrsup_ipf(ds, partner->index, fwd ? port : -1);
+ if (ret)
+ return ret;
+
+ ret = xrs700x_port_add_hsrsup_ipf(ds, port, fwd ? partner->index : -1);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(priv->regmap,
+ XRS_ETH_ADDR_CFG(partner->index, 1), 1, 1);
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 1);
+
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
@@ -611,6 +681,14 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
XRS_PORT_FORWARDING);
regmap_fields_write(priv->ps_forward, port, XRS_PORT_FORWARDING);
+ /* Disable inbound policy added by xrs700x_port_add_hsrsup_ipf()
+ * which allows HSR/PRP supervision forwarding to the CPU port without
+ * discarding duplicates.
+ */
+ regmap_update_bits(priv->regmap,
+ XRS_ETH_ADDR_CFG(partner->index, 1), 1, 0);
+ regmap_update_bits(priv->regmap, XRS_ETH_ADDR_CFG(port, 1), 1, 0);
+
hsr_pair[0] = port;
hsr_pair[1] = partner->index;
for (i = 0; i < ARRAY_SIZE(hsr_pair); i++) {
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index 4164eacc5c28..f5ec35fa4c63 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -1042,8 +1042,6 @@ enum ena_admin_aenq_group {
};
enum ena_admin_aenq_notification_syndrome {
- ENA_ADMIN_SUSPEND = 0,
- ENA_ADMIN_RESUME = 1,
ENA_ADMIN_UPDATE_HINTS = 2,
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 764852ead1d6..ab413fc1f68e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1979,7 +1979,8 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
if (rc)
return rc;
- if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ if (get_resp.u.max_queue_ext.version !=
+ ENA_FEATURE_MAX_QUEUE_EXT_VER)
return -EINVAL;
memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index c3be751e7379..3d6f0a466a9e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -151,11 +151,14 @@ static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
return 0;
/* bounce buffer was used, so write it and get a new one */
- if (pkt_ctrl->idx) {
+ if (likely(pkt_ctrl->idx)) {
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to write bounce buffer to device\n");
return rc;
+ }
pkt_ctrl->curr_bounce_buf =
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
@@ -185,8 +188,11 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
if (!pkt_ctrl->descs_left_in_line) {
rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to write bounce buffer to device\n");
return rc;
+ }
pkt_ctrl->curr_bounce_buf =
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
@@ -406,8 +412,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
}
if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
- !buffer_to_push))
+ !buffer_to_push)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Push header wasn't provided in LLQ mode\n");
return -EINVAL;
+ }
rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
if (unlikely(rc))
@@ -423,6 +432,9 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* If the caller doesn't want to send packets */
if (unlikely(!num_bufs && !header_len)) {
rc = ena_com_close_bounce_buffer(io_sq);
+ if (rc)
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to write buffers to LLQ\n");
*nb_hw_desc = io_sq->tail - start_tail;
return rc;
}
@@ -482,8 +494,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
/* The first desc share the same desc as the header */
if (likely(i != 0)) {
rc = ena_com_sq_update_tail(io_sq);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to update sq tail\n");
return rc;
+ }
desc = get_sq_desc(io_sq);
if (unlikely(!desc))
@@ -512,8 +527,11 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
rc = ena_com_sq_update_tail(io_sq);
- if (unlikely(rc))
+ if (unlikely(rc)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Failed to update sq tail of the last descriptor\n");
return rc;
+ }
rc = ena_com_close_bounce_buffer(io_sq);
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 2fe7ccee55b2..27dae632efcb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -233,10 +233,13 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
{
struct ena_adapter *adapter = netdev_priv(netdev);
- if (sset != ETH_SS_STATS)
- return -EOPNOTSUPP;
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ena_get_sw_stats_count(adapter) +
+ ena_get_hw_stats_count(adapter);
+ }
- return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter);
+ return -EOPNOTSUPP;
}
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
@@ -314,10 +317,11 @@ static void ena_get_ethtool_strings(struct net_device *netdev,
{
struct ena_adapter *adapter = netdev_priv(netdev);
- if (sset != ETH_SS_STATS)
- return;
-
- ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ switch (sset) {
+ case ETH_SS_STATS:
+ ena_get_strings(adapter, data, adapter->eni_stats_supported);
+ break;
+ }
}
static int ena_get_link_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 881f88754bf6..3bb0e66b2c7e 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -35,9 +35,6 @@ MODULE_LICENSE("GPL");
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | \
NETIF_MSG_TX_DONE | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR)
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static struct ena_aenq_handlers aenq_handlers;
@@ -89,6 +86,12 @@ static void ena_increase_stat(u64 *statp, u64 cnt,
u64_stats_update_end(syncp);
}
+static void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
+{
+ ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
+ ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
+}
+
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ena_adapter *adapter = netdev_priv(dev);
@@ -147,7 +150,7 @@ static int ena_xmit_common(struct net_device *dev,
netif_dbg(adapter, tx_queued, dev,
"llq tx max burst size of queue %d achieved, writing doorbell to send burst\n",
ring->qid);
- ena_com_write_sq_doorbell(ring->ena_com_io_sq);
+ ena_ring_tx_doorbell(ring);
}
/* prepare the packet's descriptors to dma engine */
@@ -197,7 +200,6 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
int ret;
xdp_ring = ena_napi->xdp_ring;
- xdp_ring->first_interrupt = ena_napi->first_interrupt;
xdp_budget = budget;
@@ -229,6 +231,7 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
xdp_ring->tx_stats.napi_comp += napi_comp_call;
xdp_ring->tx_stats.tx_poll++;
u64_stats_update_end(&xdp_ring->syncp);
+ xdp_ring->tx_stats.last_napi_jiffies = jiffies;
return ret;
}
@@ -316,14 +319,12 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
xdpf->len);
if (rc)
goto error_unmap_dma;
- /* trigger the dma engine. ena_com_write_sq_doorbell()
- * has a mb
+
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
*/
- if (flags & XDP_XMIT_FLUSH) {
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
- &xdp_ring->syncp);
- }
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(xdp_ring);
return rc;
@@ -364,11 +365,8 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
}
/* Ring doorbell to make device aware of the packets */
- if (flags & XDP_XMIT_FLUSH) {
- ena_com_write_sq_doorbell(xdp_ring->ena_com_io_sq);
- ena_increase_stat(&xdp_ring->tx_stats.doorbells, 1,
- &xdp_ring->syncp);
- }
+ if (flags & XDP_XMIT_FLUSH)
+ ena_ring_tx_doorbell(xdp_ring);
spin_unlock(&xdp_ring->xdp_tx_lock);
@@ -383,7 +381,6 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
u32 verdict = XDP_PASS;
struct xdp_frame *xdpf;
u64 *xdp_stat;
- int qid;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
@@ -404,8 +401,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
}
/* Find xmit queue */
- qid = rx_ring->qid + rx_ring->adapter->num_io_queues;
- xdp_ring = &rx_ring->adapter->tx_ring[qid];
+ xdp_ring = rx_ring->xdp_ring;
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock);
@@ -532,7 +528,7 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
} else {
ena_xdp_unregister_rxq_info(rx_ring);
- rx_ring->rx_headroom = 0;
+ rx_ring->rx_headroom = NET_SKB_PAD;
}
}
}
@@ -681,7 +677,6 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0;
ring->cpu = 0;
- ring->first_interrupt = false;
ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp);
}
@@ -724,7 +719,9 @@ static void ena_init_io_rings(struct ena_adapter *adapter,
rxr->smoothed_interval =
ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
rxr->empty_rx_queue = 0;
+ rxr->rx_headroom = NET_SKB_PAD;
adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
}
}
}
@@ -978,47 +975,66 @@ static void ena_free_all_io_rx_resources(struct ena_adapter *adapter)
ena_free_rx_resources(adapter, i);
}
-static int ena_alloc_rx_page(struct ena_ring *rx_ring,
- struct ena_rx_buffer *rx_info, gfp_t gfp)
+static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
+ dma_addr_t *dma)
{
- int headroom = rx_ring->rx_headroom;
- struct ena_com_buf *ena_buf;
struct page *page;
- dma_addr_t dma;
- /* restore page offset value in case it has been changed by device */
- rx_info->page_offset = headroom;
-
- /* if previous allocated page is not used */
- if (unlikely(rx_info->page))
- return 0;
-
- page = alloc_page(gfp);
- if (unlikely(!page)) {
+ /* This would allocate the page on the same NUMA node the executing code
+ * is running on.
+ */
+ page = dev_alloc_page();
+ if (!page) {
ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
&rx_ring->syncp);
- return -ENOMEM;
+ return ERR_PTR(-ENOSPC);
}
/* To enable NIC-side port-mirroring, AKA SPAN port,
* we make the buffer readable from the nic as well
*/
- dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
- DMA_BIDIRECTIONAL);
- if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
+ *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) {
ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1,
&rx_ring->syncp);
-
__free_page(page);
- return -EIO;
+ return ERR_PTR(-EIO);
}
+
+ return page;
+}
+
+static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info)
+{
+ int headroom = rx_ring->rx_headroom;
+ struct ena_com_buf *ena_buf;
+ struct page *page;
+ dma_addr_t dma;
+ int tailroom;
+
+ /* restore page offset value in case it has been changed by device */
+ rx_info->page_offset = headroom;
+
+ /* if previous allocated page is not used */
+ if (unlikely(rx_info->page))
+ return 0;
+
+ /* We handle DMA here */
+ page = ena_alloc_map_page(rx_ring, &dma);
+ if (unlikely(IS_ERR(page)))
+ return PTR_ERR(page);
+
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"Allocate page %p, rx_info %p\n", page, rx_info);
+ tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
rx_info->page = page;
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = dma + headroom;
- ena_buf->len = ENA_PAGE_SIZE - headroom;
+ ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
return 0;
}
@@ -1065,8 +1081,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rx_info = &rx_ring->rx_buffer_info[req_id];
- rc = ena_alloc_rx_page(rx_ring, rx_info,
- GFP_ATOMIC | __GFP_COMP);
+ rc = ena_alloc_rx_buffer(rx_ring, rx_info);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate buffer for rx queue %d\n",
@@ -1384,21 +1399,23 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
return tx_pkts;
}
-static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
+static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
{
struct sk_buff *skb;
- if (frags)
- skb = napi_get_frags(rx_ring->napi);
- else
+ if (!first_frag)
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_copybreak);
+ else
+ skb = build_skb(first_frag, ENA_PAGE_SIZE);
if (unlikely(!skb)) {
ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
&rx_ring->syncp);
+
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
- "Failed to allocate skb. frags: %d\n", frags);
+ "Failed to allocate skb. first_frag %s\n",
+ first_frag ? "provided" : "not provided");
return NULL;
}
@@ -1410,10 +1427,12 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u32 descs,
u16 *next_to_clean)
{
- struct sk_buff *skb;
struct ena_rx_buffer *rx_info;
u16 len, req_id, buf = 0;
- void *va;
+ struct sk_buff *skb;
+ void *page_addr;
+ u32 page_offset;
+ void *data_addr;
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
@@ -1431,12 +1450,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info, rx_info->page);
/* save virt address of first buffer */
- va = page_address(rx_info->page) + rx_info->page_offset;
+ page_addr = page_address(rx_info->page);
+ page_offset = rx_info->page_offset;
+ data_addr = page_addr + page_offset;
- prefetch(va);
+ prefetch(data_addr);
if (len <= rx_ring->rx_copybreak) {
- skb = ena_alloc_skb(rx_ring, false);
+ skb = ena_alloc_skb(rx_ring, NULL);
if (unlikely(!skb))
return NULL;
@@ -1449,7 +1470,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
dma_unmap_addr(&rx_info->ena_buf, paddr),
len,
DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb, va, len);
+ skb_copy_to_linear_data(skb, data_addr, len);
dma_sync_single_for_device(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr),
len,
@@ -1463,16 +1484,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return skb;
}
- skb = ena_alloc_skb(rx_ring, true);
+ ena_unmap_rx_buff(rx_ring, rx_info);
+
+ skb = ena_alloc_skb(rx_ring, page_addr);
if (unlikely(!skb))
return NULL;
- do {
- ena_unmap_rx_buff(rx_ring, rx_info);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
- rx_info->page_offset, len, ENA_PAGE_SIZE);
+ /* Populate skb's linear part */
+ skb_reserve(skb, page_offset);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ do {
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"RX skb updated. len %d. data_len %d\n",
skb->len, skb->data_len);
@@ -1491,6 +1514,12 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
+
+ ena_unmap_rx_buff(rx_ring, rx_info);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+ rx_info->page_offset, len, ENA_PAGE_SIZE);
+
} while (1);
return skb;
@@ -1703,14 +1732,12 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
skb_record_rx_queue(skb, rx_ring->qid);
- if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) {
- total_len += rx_ring->ena_bufs[0].len;
+ if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak)
rx_copybreak_pkt++;
- napi_gro_receive(napi, skb);
- } else {
- total_len += skb->len;
- napi_gro_frags(napi);
- }
+
+ total_len += skb->len;
+
+ napi_gro_receive(napi, skb);
res_budget--;
} while (likely(res_budget));
@@ -1922,9 +1949,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring = ena_napi->tx_ring;
rx_ring = ena_napi->rx_ring;
- tx_ring->first_interrupt = ena_napi->first_interrupt;
- rx_ring->first_interrupt = ena_napi->first_interrupt;
-
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
@@ -1979,6 +2003,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring->tx_stats.tx_poll++;
u64_stats_update_end(&tx_ring->syncp);
+ tx_ring->tx_stats.last_napi_jiffies = jiffies;
+
return ret;
}
@@ -2003,7 +2029,8 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{
struct ena_napi *ena_napi = data;
- ena_napi->first_interrupt = true;
+ /* Used to check HW health */
+ WRITE_ONCE(ena_napi->first_interrupt, true);
WRITE_ONCE(ena_napi->interrupts_masked, true);
smp_wmb(); /* write interrupts_masked before calling napi */
@@ -3089,14 +3116,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
- /* trigger the dma engine. ena_com_write_sq_doorbell()
- * has a mb
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ /* trigger the dma engine. ena_ring_tx_doorbell()
+ * calls a memory barrier inside it.
*/
- ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
- ena_increase_stat(&tx_ring->tx_stats.doorbells, 1,
- &tx_ring->syncp);
- }
+ ena_ring_tx_doorbell(tx_ring);
return NETDEV_TX_OK;
@@ -3346,7 +3370,7 @@ static int ena_set_queues_placement_policy(struct pci_dev *pdev,
llq_feature_mask = 1 << ENA_ADMIN_LLQ;
if (!(ena_dev->supported_features & llq_feature_mask)) {
- dev_err(&pdev->dev,
+ dev_warn(&pdev->dev,
"LLQ is not supported Fallback to host mode policy.\n");
ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
return 0;
@@ -3657,7 +3681,9 @@ static void ena_fw_reset_device(struct work_struct *work)
static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
struct ena_ring *rx_ring)
{
- if (likely(rx_ring->first_interrupt))
+ struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi);
+
+ if (likely(READ_ONCE(ena_napi->first_interrupt)))
return 0;
if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
@@ -3681,6 +3707,10 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
+ struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
+ unsigned int time_since_last_napi;
+ unsigned int missing_tx_comp_to;
+ bool is_tx_comp_time_expired;
struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies;
u32 missed_tx = 0;
@@ -3694,8 +3724,10 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
/* no pending Tx at this location */
continue;
- if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies +
- 2 * adapter->missing_tx_completion_to))) {
+ is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
+ 2 * adapter->missing_tx_completion_to);
+
+ if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) {
/* If after graceful period interrupt is still not
* received, we schedule a reset
*/
@@ -3708,12 +3740,17 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
return -EIO;
}
- if (unlikely(time_is_before_jiffies(last_jiffies +
- adapter->missing_tx_completion_to))) {
- if (!tx_buf->print_once)
+ is_tx_comp_time_expired = time_is_before_jiffies(last_jiffies +
+ adapter->missing_tx_completion_to);
+
+ if (unlikely(is_tx_comp_time_expired)) {
+ if (!tx_buf->print_once) {
+ time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies);
+ missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to);
netif_notice(adapter, tx_err, adapter->netdev,
- "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
- tx_ring->qid, i);
+ "Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi execution. Missing Tx timeout value %u msecs\n",
+ tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to);
+ }
tx_buf->print_once = 1;
missed_tx++;
@@ -4244,7 +4281,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ena_dev = ena_dev;
adapter->netdev = netdev;
adapter->pdev = pdev;
- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+ adapter->msg_enable = DEFAULT_MSG_ENABLE;
ena_dev->net_device = netdev;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 74af15d62ee1..0c39fc2fa345 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -55,12 +55,6 @@
#define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
#define ENA_DEFAULT_RX_COPYBREAK (256 - NET_IP_ALIGN)
-/* limit the buffer size to 600 bytes to handle MTU changes from very
- * small to very large, in which case the number of buffers per packet
- * could exceed ENA_PKT_MAX_BUFS
- */
-#define ENA_DEFAULT_MIN_RX_BUFF_ALLOC_SIZE 600
-
#define ENA_MIN_MTU 128
#define ENA_NAME_MAX_LEN 20
@@ -135,12 +129,12 @@ struct ena_irq {
};
struct ena_napi {
- struct napi_struct napi ____cacheline_aligned;
+ u8 first_interrupt ____cacheline_aligned;
+ u8 interrupts_masked;
+ struct napi_struct napi;
struct ena_ring *tx_ring;
struct ena_ring *rx_ring;
struct ena_ring *xdp_ring;
- bool first_interrupt;
- bool interrupts_masked;
u32 qid;
struct dim dim;
};
@@ -212,6 +206,7 @@ struct ena_stats_tx {
u64 llq_buffer_copy;
u64 missed_tx;
u64 unmask_interrupt;
+ u64 last_napi_jiffies;
};
struct ena_stats_rx {
@@ -259,6 +254,10 @@ struct ena_ring {
struct bpf_prog *xdp_bpf_prog;
struct xdp_rxq_info xdp_rxq;
spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */
+ /* Used for rx queues only to point to the xdp tx ring, to
+ * which traffic should be redirected from this rx ring.
+ */
+ struct ena_ring *xdp_ring;
u16 next_to_use;
u16 next_to_clean;
@@ -271,7 +270,6 @@ struct ena_ring {
/* The maximum header length the device can handle */
u8 tx_max_header_size;
- bool first_interrupt;
bool disable_meta_caching;
u16 no_interrupt_event_cnt;
@@ -414,11 +412,6 @@ enum ena_xdp_errors_t {
ENA_XDP_NO_ENOUGH_QUEUES,
};
-static inline bool ena_xdp_queues_present(struct ena_adapter *adapter)
-{
- return adapter->xdp_first_ring != 0;
-}
-
static inline bool ena_xdp_present(struct ena_adapter *adapter)
{
return !!adapter->xdp_bpf_prog;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index 9d70cb7544f1..43d821fe7a54 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -63,7 +63,7 @@
#define AT_MAX_RECEIVE_QUEUE 4
#define AT_DEF_RECEIVE_QUEUE 1
-#define AT_MAX_TRANSMIT_QUEUE 2
+#define AT_MAX_TRANSMIT_QUEUE 4
#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL
#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL
@@ -294,11 +294,6 @@ enum atl1c_nic_type {
athr_mt,
};
-enum atl1c_trans_queue {
- atl1c_trans_normal = 0,
- atl1c_trans_high = 1
-};
-
struct atl1c_hw_stats {
/* rx */
unsigned long rx_ok; /* The number of good packet received. */
@@ -475,13 +470,16 @@ struct atl1c_buffer {
/* transimit packet descriptor (tpd) ring */
struct atl1c_tpd_ring {
+ struct atl1c_adapter *adapter;
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
+ u16 num;
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use;
atomic_t next_to_clean;
struct atl1c_buffer *buffer_info;
+ struct napi_struct napi;
};
/* receive free descriptor (rfd) ring */
@@ -497,27 +495,30 @@ struct atl1c_rfd_ring {
/* receive return descriptor (rrd) ring */
struct atl1c_rrd_ring {
+ struct atl1c_adapter *adapter;
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
+ u16 num;
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use;
u16 next_to_clean;
+ struct napi_struct napi;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
};
/* board specific private data structure */
struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
- struct napi_struct napi;
- struct napi_struct tx_napi;
- struct page *rx_page;
- unsigned int rx_page_offset;
unsigned int rx_frag_size;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
struct mii_if_info mii; /* MII interface info */
u16 rx_buffer_len;
+ unsigned int tx_queue_count;
+ unsigned int rx_queue_count;
unsigned long flags;
#define __AT_TESTING 0x0001
@@ -543,8 +544,8 @@ struct atl1c_adapter {
/* All Descriptor memory */
struct atl1c_ring_header ring_header;
struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
- struct atl1c_rfd_ring rfd_ring;
- struct atl1c_rrd_ring rrd_ring;
+ struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE];
+ struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE];
u32 bd_number; /* board number;*/
};
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index c263b326cec5..c567c920628f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -528,15 +528,24 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define REG_RX_BASE_ADDR_HI 0x1540
#define REG_TX_BASE_ADDR_HI 0x1544
#define REG_RFD0_HEAD_ADDR_LO 0x1550
+#define REG_RFD1_HEAD_ADDR_LO 0x1554
+#define REG_RFD2_HEAD_ADDR_LO 0x1558
+#define REG_RFD3_HEAD_ADDR_LO 0x155C
#define REG_RFD_RING_SIZE 0x1560
#define RFD_RING_SIZE_MASK 0x0FFF
#define REG_RX_BUF_SIZE 0x1564
#define RX_BUF_SIZE_MASK 0xFFFF
#define REG_RRD0_HEAD_ADDR_LO 0x1568
+#define REG_RRD1_HEAD_ADDR_LO 0x156C
+#define REG_RRD2_HEAD_ADDR_LO 0x1570
+#define REG_RRD3_HEAD_ADDR_LO 0x1574
#define REG_RRD_RING_SIZE 0x1578
#define RRD_RING_SIZE_MASK 0x0FFF
#define REG_TPD_PRI1_ADDR_LO 0x157C
#define REG_TPD_PRI0_ADDR_LO 0x1580
+#define REG_TPD_PRI2_ADDR_LO 0x1F10
+#define REG_TPD_PRI3_ADDR_LO 0x1F14
+
#define REG_TPD_RING_SIZE 0x1584
#define TPD_RING_SIZE_MASK 0xFFFF
@@ -655,15 +664,26 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
/* Mail box */
#define MB_RFDX_PROD_IDX_MASK 0xFFFF
#define REG_MB_RFD0_PROD_IDX 0x15E0
+#define REG_MB_RFD1_PROD_IDX 0x15E4
+#define REG_MB_RFD2_PROD_IDX 0x15E8
+#define REG_MB_RFD3_PROD_IDX 0x15EC
#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */
#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */
#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */
#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */
+#define REG_TPD_PRI3_PIDX 0x1F18
+#define REG_TPD_PRI2_PIDX 0x1F1A
+#define REG_TPD_PRI3_CIDX 0x1F1C
+#define REG_TPD_PRI2_CIDX 0x1F1E
+
#define REG_MB_RFD01_CONS_IDX 0x15F8
#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
+#define REG_MB_RFD23_CONS_IDX 0x15FC
+#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
+#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
/* Interrupt Status Register */
#define REG_ISR 0x1600
@@ -687,7 +707,7 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
/* GPHY low power state interrupt */
#define ISR_GPHY_LPW 0x00002000
#define ISR_TXQ_TO_RST 0x00004000
-#define ISR_TX_PKT 0x00008000
+#define ISR_TX_PKT_0 0x00008000
#define ISR_RX_PKT_0 0x00010000
#define ISR_RX_PKT_1 0x00020000
#define ISR_RX_PKT_2 0x00040000
@@ -699,6 +719,9 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define ISR_NFERR_DETECTED 0x01000000
#define ISR_CERR_DETECTED 0x02000000
#define ISR_PHY_LINKDOWN 0x04000000
+#define ISR_TX_PKT_1 0x10000000
+#define ISR_TX_PKT_2 0x20000000
+#define ISR_TX_PKT_3 0x40000000
#define ISR_DIS_INT 0x80000000
/* Interrupt Mask Register */
@@ -713,11 +736,15 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
ISR_TXQ_TO_RST |\
ISR_DMAW_TO_RST |\
ISR_GPHY |\
- ISR_TX_PKT |\
- ISR_RX_PKT_0 |\
ISR_GPHY_LPW |\
ISR_PHY_LINKDOWN)
+#define ISR_TX_PKT ( \
+ ISR_TX_PKT_0 | \
+ ISR_TX_PKT_1 | \
+ ISR_TX_PKT_2 | \
+ ISR_TX_PKT_3)
+
#define ISR_RX_PKT (\
ISR_RX_PKT_0 |\
ISR_RX_PKT_1 |\
@@ -771,6 +798,7 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
#define REG_MT_VERSION 0x1F0C
#define MT_MAGIC 0xaabb1234
+#define MT_MODE_4Q BIT(0)
#define L1D_MPW_PHYID1 0xD01C /* V7 */
#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 77da1c54c49f..1c6246a5dc22 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -36,18 +36,51 @@ MODULE_AUTHOR("Qualcomm Atheros Inc.");
MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
MODULE_LICENSE("GPL");
+struct atl1c_qregs {
+ u16 tpd_addr_lo;
+ u16 tpd_prod;
+ u16 tpd_cons;
+ u16 rfd_addr_lo;
+ u16 rrd_addr_lo;
+ u16 rfd_prod;
+ u32 tx_isr;
+ u32 rx_isr;
+};
+
+static struct atl1c_qregs atl1c_qregs[AT_MAX_TRANSMIT_QUEUE] = {
+ {
+ REG_TPD_PRI0_ADDR_LO, REG_TPD_PRI0_PIDX, REG_TPD_PRI0_CIDX,
+ REG_RFD0_HEAD_ADDR_LO, REG_RRD0_HEAD_ADDR_LO,
+ REG_MB_RFD0_PROD_IDX, ISR_TX_PKT_0, ISR_RX_PKT_0
+ },
+ {
+ REG_TPD_PRI1_ADDR_LO, REG_TPD_PRI1_PIDX, REG_TPD_PRI1_CIDX,
+ REG_RFD1_HEAD_ADDR_LO, REG_RRD1_HEAD_ADDR_LO,
+ REG_MB_RFD1_PROD_IDX, ISR_TX_PKT_1, ISR_RX_PKT_1
+ },
+ {
+ REG_TPD_PRI2_ADDR_LO, REG_TPD_PRI2_PIDX, REG_TPD_PRI2_CIDX,
+ REG_RFD2_HEAD_ADDR_LO, REG_RRD2_HEAD_ADDR_LO,
+ REG_MB_RFD2_PROD_IDX, ISR_TX_PKT_2, ISR_RX_PKT_2
+ },
+ {
+ REG_TPD_PRI3_ADDR_LO, REG_TPD_PRI3_PIDX, REG_TPD_PRI3_CIDX,
+ REG_RFD3_HEAD_ADDR_LO, REG_RRD3_HEAD_ADDR_LO,
+ REG_MB_RFD3_PROD_IDX, ISR_TX_PKT_3, ISR_RX_PKT_3
+ },
+};
+
static int atl1c_stop_mac(struct atl1c_hw *hw);
static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
static void atl1c_start_mac(struct atl1c_adapter *adapter);
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
- int *work_done, int work_to_do);
static int atl1c_up(struct atl1c_adapter *adapter);
static void atl1c_down(struct atl1c_adapter *adapter);
static int atl1c_reset_mac(struct atl1c_hw *hw);
static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
static int atl1c_configure(struct atl1c_adapter *adapter);
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode);
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ bool napi_mode);
static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -646,33 +679,26 @@ static int atl1c_alloc_queues(struct atl1c_adapter *adapter)
return 0;
}
-static void atl1c_set_mac_type(struct atl1c_hw *hw)
+static enum atl1c_nic_type atl1c_get_mac_type(struct pci_dev *pdev,
+ u8 __iomem *hw_addr)
{
- u32 magic;
- switch (hw->device_id) {
+ switch (pdev->device) {
case PCI_DEVICE_ID_ATTANSIC_L2C:
- hw->nic_type = athr_l2c;
- break;
+ return athr_l2c;
case PCI_DEVICE_ID_ATTANSIC_L1C:
- hw->nic_type = athr_l1c;
- break;
+ return athr_l1c;
case PCI_DEVICE_ID_ATHEROS_L2C_B:
- hw->nic_type = athr_l2c_b;
- break;
+ return athr_l2c_b;
case PCI_DEVICE_ID_ATHEROS_L2C_B2:
- hw->nic_type = athr_l2c_b2;
- break;
+ return athr_l2c_b2;
case PCI_DEVICE_ID_ATHEROS_L1D:
- hw->nic_type = athr_l1d;
- break;
+ return athr_l1d;
case PCI_DEVICE_ID_ATHEROS_L1D_2_0:
- hw->nic_type = athr_l1d_2;
- AT_READ_REG(hw, REG_MT_MAGIC, &magic);
- if (magic == MT_MAGIC)
- hw->nic_type = athr_mt;
- break;
+ if (readl(hw_addr + REG_MT_MAGIC) == MT_MAGIC)
+ return athr_mt;
+ return athr_l1d_2;
default:
- break;
+ return athr_l1c;
}
}
@@ -680,7 +706,6 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
{
u32 link_ctrl_data;
- atl1c_set_mac_type(hw);
AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
@@ -771,14 +796,14 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
struct atl1c_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
u32 revision;
-
+ int i;
adapter->wol = 0;
device_set_wakeup_enable(&pdev->dev, false);
adapter->link_speed = SPEED_0;
adapter->link_duplex = FULL_DUPLEX;
adapter->tpd_ring[0].count = 1024;
- adapter->rfd_ring.count = 512;
+ adapter->rfd_ring[0].count = 512;
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
@@ -796,6 +821,10 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
atl1c_patch_assign(hw);
hw->intr_mask = IMR_NORMAL_MASK;
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ hw->intr_mask |= atl1c_qregs[i].tx_isr;
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ hw->intr_mask |= atl1c_qregs[i].rx_isr;
hw->phy_configured = false;
hw->preamble_len = 7;
hw->max_frame_size = adapter->netdev->mtu;
@@ -855,12 +884,12 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
- * @type: type of transmit queue
+ * @queue: idx of transmit queue
*/
static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
- enum atl1c_trans_queue type)
+ u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 index, ring_count;
@@ -883,11 +912,12 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
/**
* atl1c_clean_rx_ring - Free rx-reservation skbs
* @adapter: board private structure
+ * @queue: idx of transmit queue
*/
-static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
+static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter, u32 queue)
{
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
int j;
@@ -910,26 +940,28 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
{
struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
struct atl1c_buffer *buffer_info;
int i, j;
- for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
+ for (i = 0; i < adapter->tx_queue_count; i++) {
tpd_ring[i].next_to_use = 0;
atomic_set(&tpd_ring[i].next_to_clean, 0);
buffer_info = tpd_ring[i].buffer_info;
for (j = 0; j < tpd_ring->count; j++)
ATL1C_SET_BUFFER_STATE(&buffer_info[i],
- ATL1C_BUFFER_FREE);
- }
- rfd_ring->next_to_use = 0;
- rfd_ring->next_to_clean = 0;
- rrd_ring->next_to_use = 0;
- rrd_ring->next_to_clean = 0;
- for (j = 0; j < rfd_ring->count; j++) {
- buffer_info = &rfd_ring->buffer_info[j];
- ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ ATL1C_BUFFER_FREE);
+ }
+ for (i = 0; i < adapter->rx_queue_count; i++) {
+ rfd_ring[i].next_to_use = 0;
+ rfd_ring[i].next_to_clean = 0;
+ rrd_ring[i].next_to_use = 0;
+ rrd_ring[i].next_to_clean = 0;
+ for (j = 0; j < rfd_ring[i].count; j++) {
+ buffer_info = &rfd_ring[i].buffer_info[j];
+ ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
+ }
}
}
@@ -942,20 +974,24 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
+ int i;
dma_free_coherent(&pdev->dev, adapter->ring_header.size,
adapter->ring_header.desc, adapter->ring_header.dma);
adapter->ring_header.desc = NULL;
/* Note: just free tdp_ring.buffer_info,
- * it contain rfd_ring.buffer_info, do not double free */
+ * it contain rfd_ring.buffer_info, do not double free
+ */
if (adapter->tpd_ring[0].buffer_info) {
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
- if (adapter->rx_page) {
- put_page(adapter->rx_page);
- adapter->rx_page = NULL;
+ for (i = 0; i < adapter->rx_queue_count; ++i) {
+ if (adapter->rrd_ring[i].rx_page) {
+ put_page(adapter->rrd_ring[i].rx_page);
+ adapter->rrd_ring[i].rx_page = NULL;
+ }
}
}
@@ -969,37 +1005,46 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
struct atl1c_ring_header *ring_header = &adapter->ring_header;
+ int tqc = adapter->tx_queue_count;
+ int rqc = adapter->rx_queue_count;
int size;
int i;
int count = 0;
- int rx_desc_count = 0;
u32 offset = 0;
- rrd_ring->count = rfd_ring->count;
- for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
+ /* Even though only one tpd queue is actually used, the "high"
+ * priority tpd queue also gets initialized
+ */
+ if (tqc == 1)
+ tqc = 2;
+
+ for (i = 1; i < tqc; i++)
tpd_ring[i].count = tpd_ring[0].count;
- /* 2 tpd queue, one high priority queue,
- * another normal priority queue */
- size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
- rfd_ring->count);
+ size = sizeof(struct atl1c_buffer) * (tpd_ring->count * tqc +
+ rfd_ring->count * rqc);
tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
if (unlikely(!tpd_ring->buffer_info))
goto err_nomem;
- for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
- tpd_ring[i].buffer_info =
- (tpd_ring->buffer_info + count);
+ for (i = 0; i < tqc; i++) {
+ tpd_ring[i].adapter = adapter;
+ tpd_ring[i].num = i;
+ tpd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
count += tpd_ring[i].count;
}
- rfd_ring->buffer_info =
- (tpd_ring->buffer_info + count);
- count += rfd_ring->count;
- rx_desc_count += rfd_ring->count;
+ for (i = 0; i < rqc; i++) {
+ rrd_ring[i].adapter = adapter;
+ rrd_ring[i].num = i;
+ rrd_ring[i].count = rfd_ring[0].count;
+ rfd_ring[i].count = rfd_ring[0].count;
+ rfd_ring[i].buffer_info = (tpd_ring->buffer_info + count);
+ count += rfd_ring->count;
+ }
/*
* real ring DMA buffer
@@ -1007,9 +1052,9 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
* additional bytes tacked onto the end.
*/
ring_header->size = size =
- sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
- sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
- sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
+ sizeof(struct atl1c_tpd_desc) * tpd_ring->count * tqc +
+ sizeof(struct atl1c_rx_free_desc) * rfd_ring->count * rqc +
+ sizeof(struct atl1c_recv_ret_status) * rfd_ring->count * rqc +
8 * 4;
ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
@@ -1022,25 +1067,28 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
tpd_ring[0].dma = roundup(ring_header->dma, 8);
offset = tpd_ring[0].dma - ring_header->dma;
- for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) {
+ for (i = 0; i < tqc; i++) {
tpd_ring[i].dma = ring_header->dma + offset;
- tpd_ring[i].desc = (u8 *) ring_header->desc + offset;
+ tpd_ring[i].desc = (u8 *)ring_header->desc + offset;
tpd_ring[i].size =
sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count;
offset += roundup(tpd_ring[i].size, 8);
}
- /* init RFD ring */
- rfd_ring->dma = ring_header->dma + offset;
- rfd_ring->desc = (u8 *) ring_header->desc + offset;
- rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
- offset += roundup(rfd_ring->size, 8);
+ for (i = 0; i < rqc; i++) {
+ /* init RFD ring */
+ rfd_ring[i].dma = ring_header->dma + offset;
+ rfd_ring[i].desc = (u8 *)ring_header->desc + offset;
+ rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) *
+ rfd_ring[i].count;
+ offset += roundup(rfd_ring[i].size, 8);
- /* init RRD ring */
- rrd_ring->dma = ring_header->dma + offset;
- rrd_ring->desc = (u8 *) ring_header->desc + offset;
- rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
- rrd_ring->count;
- offset += roundup(rrd_ring->size, 8);
+ /* init RRD ring */
+ rrd_ring[i].dma = ring_header->dma + offset;
+ rrd_ring[i].desc = (u8 *)ring_header->desc + offset;
+ rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) *
+ rrd_ring[i].count;
+ offset += roundup(rrd_ring[i].size, 8);
+ }
return 0;
@@ -1052,31 +1100,34 @@ err_nomem:
static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
{
struct atl1c_hw *hw = &adapter->hw;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
- struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
- adapter->tpd_ring;
+ struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring;
+ struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring;
+ struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
+ int i;
+ int tx_queue_count = adapter->tx_queue_count;
+
+ if (tx_queue_count == 1)
+ tx_queue_count = 2;
/* TPD */
AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
- (u32)((tpd_ring[atl1c_trans_normal].dma &
- AT_DMA_HI_ADDR_MASK) >> 32));
+ (u32)((tpd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32));
/* just enable normal priority TX queue */
- AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
- (u32)(tpd_ring[atl1c_trans_normal].dma &
- AT_DMA_LO_ADDR_MASK));
- AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
- (u32)(tpd_ring[atl1c_trans_high].dma &
- AT_DMA_LO_ADDR_MASK));
+ for (i = 0; i < tx_queue_count; i++) {
+ AT_WRITE_REG(hw, atl1c_qregs[i].tpd_addr_lo,
+ (u32)(tpd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+ }
AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
(u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK));
/* RFD */
AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
- (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
- AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
- (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
+ (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
+ for (i = 0; i < adapter->rx_queue_count; i++) {
+ AT_WRITE_REG(hw, atl1c_qregs[i].rfd_addr_lo,
+ (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+ }
AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
rfd_ring->count & RFD_RING_SIZE_MASK);
@@ -1084,8 +1135,10 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
/* RRD */
- AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
- (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
+ for (i = 0; i < adapter->rx_queue_count; i++) {
+ AT_WRITE_REG(hw, atl1c_qregs[i].rrd_addr_lo,
+ (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
+ }
AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
(rrd_ring->count & RRD_RING_SIZE_MASK));
@@ -1438,14 +1491,28 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int num;
+ int i;
+
+ if (adapter->hw.nic_type == athr_mt) {
+ u32 mode;
+
+ AT_READ_REG(&adapter->hw, REG_MT_MODE, &mode);
+ if (adapter->rx_queue_count == 4)
+ mode |= MT_MODE_4Q;
+ else
+ mode &= ~MT_MODE_4Q;
+ AT_WRITE_REG(&adapter->hw, REG_MT_MODE, mode);
+ }
atl1c_init_ring_ptrs(adapter);
atl1c_set_multi(netdev);
atl1c_restore_vlan(adapter);
- num = atl1c_alloc_rx_buffer(adapter, false);
- if (unlikely(num == 0))
- return -ENOMEM;
+ for (i = 0; i < adapter->rx_queue_count; ++i) {
+ num = atl1c_alloc_rx_buffer(adapter, i, false);
+ if (unlikely(num == 0))
+ return -ENOMEM;
+ }
if (atl1c_configure_mac(adapter))
return -EIO;
@@ -1541,9 +1608,11 @@ static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter)
static int atl1c_clean_tx(struct napi_struct *napi, int budget)
{
- struct atl1c_adapter *adapter =
- container_of(napi, struct atl1c_adapter, tx_napi);
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[atl1c_trans_normal];
+ struct atl1c_tpd_ring *tpd_ring =
+ container_of(napi, struct atl1c_tpd_ring, napi);
+ struct atl1c_adapter *adapter = tpd_ring->adapter;
+ struct netdev_queue *txq =
+ netdev_get_tx_queue(napi->dev, tpd_ring->num);
struct atl1c_buffer *buffer_info;
struct pci_dev *pdev = adapter->pdev;
u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
@@ -1551,7 +1620,8 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
unsigned int total_bytes = 0, total_packets = 0;
unsigned long flags;
- AT_READ_REGW(&adapter->hw, REG_TPD_PRI0_CIDX, &hw_next_to_clean);
+ AT_READ_REGW(&adapter->hw, atl1c_qregs[tpd_ring->num].tpd_cons,
+ &hw_next_to_clean);
while (next_to_clean != hw_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1565,17 +1635,15 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
atomic_set(&tpd_ring->next_to_clean, next_to_clean);
}
- netdev_completed_queue(adapter->netdev, total_packets, total_bytes);
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
- if (netif_queue_stopped(adapter->netdev) &&
- netif_carrier_ok(adapter->netdev)) {
- netif_wake_queue(adapter->netdev);
- }
+ if (netif_tx_queue_stopped(txq) && netif_carrier_ok(adapter->netdev))
+ netif_tx_wake_queue(txq);
if (total_packets < budget) {
napi_complete_done(napi, total_packets);
spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
- adapter->hw.intr_mask |= ISR_TX_PKT;
+ adapter->hw.intr_mask |= atl1c_qregs[tpd_ring->num].tx_isr;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
return total_packets;
@@ -1583,6 +1651,38 @@ static int atl1c_clean_tx(struct napi_struct *napi, int budget)
return budget;
}
+static void atl1c_intr_rx_tx(struct atl1c_adapter *adapter, u32 status)
+{
+ struct atl1c_hw *hw = &adapter->hw;
+ u32 intr_mask;
+ int i;
+
+ spin_lock(&hw->intr_mask_lock);
+ intr_mask = hw->intr_mask;
+ for (i = 0; i < adapter->rx_queue_count; ++i) {
+ if (!(status & atl1c_qregs[i].rx_isr))
+ continue;
+ if (napi_schedule_prep(&adapter->rrd_ring[i].napi)) {
+ intr_mask &= ~atl1c_qregs[i].rx_isr;
+ __napi_schedule(&adapter->rrd_ring[i].napi);
+ }
+ }
+ for (i = 0; i < adapter->tx_queue_count; ++i) {
+ if (!(status & atl1c_qregs[i].tx_isr))
+ continue;
+ if (napi_schedule_prep(&adapter->tpd_ring[i].napi)) {
+ intr_mask &= ~atl1c_qregs[i].tx_isr;
+ __napi_schedule(&adapter->tpd_ring[i].napi);
+ }
+ }
+
+ if (hw->intr_mask != intr_mask) {
+ hw->intr_mask = intr_mask;
+ AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
+ }
+ spin_unlock(&hw->intr_mask_lock);
+}
+
/**
* atl1c_intr - Interrupt Handler
* @irq: interrupt number
@@ -1613,24 +1713,8 @@ static irqreturn_t atl1c_intr(int irq, void *data)
atl1c_clear_phy_int(adapter);
/* Ack ISR */
AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT);
- if (status & ISR_RX_PKT) {
- if (likely(napi_schedule_prep(&adapter->napi))) {
- spin_lock(&hw->intr_mask_lock);
- hw->intr_mask &= ~ISR_RX_PKT;
- AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
- spin_unlock(&hw->intr_mask_lock);
- __napi_schedule(&adapter->napi);
- }
- }
- if (status & ISR_TX_PKT) {
- if (napi_schedule_prep(&adapter->tx_napi)) {
- spin_lock(&hw->intr_mask_lock);
- hw->intr_mask &= ~ISR_TX_PKT;
- AT_WRITE_REG(hw, REG_IMR, hw->intr_mask);
- spin_unlock(&hw->intr_mask_lock);
- __napi_schedule(&adapter->tx_napi);
- }
- }
+ if (status & (ISR_RX_PKT | ISR_TX_PKT))
+ atl1c_intr_rx_tx(adapter, status);
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
@@ -1681,44 +1765,47 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
}
static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter,
- bool napi_mode)
+ u32 queue, bool napi_mode)
{
+ struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue];
struct sk_buff *skb;
struct page *page;
if (adapter->rx_frag_size > PAGE_SIZE) {
if (likely(napi_mode))
- return napi_alloc_skb(&adapter->napi,
+ return napi_alloc_skb(&rrd_ring->napi,
adapter->rx_buffer_len);
else
return netdev_alloc_skb_ip_align(adapter->netdev,
adapter->rx_buffer_len);
}
- page = adapter->rx_page;
+ page = rrd_ring->rx_page;
if (!page) {
- adapter->rx_page = page = alloc_page(GFP_ATOMIC);
+ page = alloc_page(GFP_ATOMIC);
if (unlikely(!page))
return NULL;
- adapter->rx_page_offset = 0;
+ rrd_ring->rx_page = page;
+ rrd_ring->rx_page_offset = 0;
}
- skb = build_skb(page_address(page) + adapter->rx_page_offset,
+ skb = build_skb(page_address(page) + rrd_ring->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- adapter->rx_page_offset += adapter->rx_frag_size;
- if (adapter->rx_page_offset >= PAGE_SIZE)
- adapter->rx_page = NULL;
+ rrd_ring->rx_page_offset += adapter->rx_frag_size;
+ if (rrd_ring->rx_page_offset >= PAGE_SIZE)
+ rrd_ring->rx_page = NULL;
else
get_page(page);
}
return skb;
}
-static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
+static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
+ bool napi_mode)
{
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue];
struct pci_dev *pdev = adapter->pdev;
struct atl1c_buffer *buffer_info, *next_info;
struct sk_buff *skb;
@@ -1737,7 +1824,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = atl1c_alloc_skb(adapter, napi_mode);
+ skb = atl1c_alloc_skb(adapter, queue, napi_mode);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
@@ -1779,8 +1866,8 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, bool napi_mode)
/* TODO: update mailbox here */
wmb();
rfd_ring->next_to_use = rfd_next_to_use;
- AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
- rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
+ AT_WRITE_REG(&adapter->hw, atl1c_qregs[queue].rfd_prod,
+ rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
}
return num_alloc;
@@ -1818,22 +1905,33 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
rfd_ring->next_to_clean = rfd_index;
}
-static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
- int *work_done, int work_to_do)
+/**
+ * atl1c_clean_rx - NAPI Rx polling callback
+ * @napi: napi info
+ * @budget: limit of packets to clean
+ */
+static int atl1c_clean_rx(struct napi_struct *napi, int budget)
{
+ struct atl1c_rrd_ring *rrd_ring =
+ container_of(napi, struct atl1c_rrd_ring, napi);
+ struct atl1c_adapter *adapter = rrd_ring->adapter;
u16 rfd_num, rfd_index;
- u16 count = 0;
u16 length;
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
- struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
- struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[rrd_ring->num];
struct sk_buff *skb;
struct atl1c_recv_ret_status *rrs;
struct atl1c_buffer *buffer_info;
+ int work_done = 0;
+ unsigned long flags;
+
+ /* Keep link state information with original netdev */
+ if (!netif_carrier_ok(adapter->netdev))
+ goto quit_polling;
while (1) {
- if (*work_done >= work_to_do)
+ if (work_done >= budget)
break;
rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean);
if (likely(RRS_RXD_IS_VALID(rrs->word3))) {
@@ -1887,38 +1985,18 @@ rrs_checked:
vlan = le16_to_cpu(vlan);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
}
- napi_gro_receive(&adapter->napi, skb);
+ napi_gro_receive(napi, skb);
- (*work_done)++;
- count++;
+ work_done++;
}
- if (count)
- atl1c_alloc_rx_buffer(adapter, true);
-}
-
-/**
- * atl1c_clean - NAPI Rx polling callback
- * @napi: napi info
- * @budget: limit of packets to clean
- */
-static int atl1c_clean(struct napi_struct *napi, int budget)
-{
- struct atl1c_adapter *adapter =
- container_of(napi, struct atl1c_adapter, napi);
- int work_done = 0;
- unsigned long flags;
-
- /* Keep link state information with original netdev */
- if (!netif_carrier_ok(adapter->netdev))
- goto quit_polling;
- /* just enable one RXQ */
- atl1c_clean_rx_irq(adapter, &work_done, budget);
+ if (work_done)
+ atl1c_alloc_rx_buffer(adapter, rrd_ring->num, true);
if (work_done < budget) {
quit_polling:
napi_complete_done(napi, work_done);
spin_lock_irqsave(&adapter->hw.intr_mask_lock, flags);
- adapter->hw.intr_mask |= ISR_RX_PKT;
+ adapter->hw.intr_mask |= atl1c_qregs[rrd_ring->num].rx_isr;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
spin_unlock_irqrestore(&adapter->hw.intr_mask_lock, flags);
}
@@ -1942,9 +2020,9 @@ static void atl1c_netpoll(struct net_device *netdev)
}
#endif
-static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type)
+static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
u16 next_to_use = 0;
u16 next_to_clean = 0;
@@ -1962,9 +2040,9 @@ static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_tran
* there is enough tpd to use
*/
static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter,
- enum atl1c_trans_queue type)
+ u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
struct atl1c_tpd_desc *tpd_desc;
u16 next_to_use = 0;
@@ -2006,7 +2084,7 @@ static u16 atl1c_cal_tpd_req(const struct sk_buff *skb)
static int atl1c_tso_csum(struct atl1c_adapter *adapter,
struct sk_buff *skb,
struct atl1c_tpd_desc **tpd,
- enum atl1c_trans_queue type)
+ u32 queue)
{
struct pci_dev *pdev = adapter->pdev;
unsigned short offload_type;
@@ -2051,7 +2129,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
*(struct atl1c_tpd_ext_desc **)(tpd);
memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc));
- *tpd = atl1c_get_tpd(adapter, type);
+ *tpd = atl1c_get_tpd(adapter, queue);
ipv6_hdr(skb)->payload_len = 0;
/* check payload == 0 byte ? */
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
@@ -2103,9 +2181,9 @@ check_sum:
static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
struct atl1c_tpd_desc *first_tpd,
- enum atl1c_trans_queue type)
+ u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type];
+ struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[queue];
struct atl1c_buffer *buffer_info;
struct atl1c_tpd_desc *tpd;
u16 first_index, index;
@@ -2124,8 +2202,8 @@ static void atl1c_tx_rollback(struct atl1c_adapter *adpt,
}
static int atl1c_tx_map(struct atl1c_adapter *adapter,
- struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
- enum atl1c_trans_queue type)
+ struct sk_buff *skb, struct atl1c_tpd_desc *tpd,
+ u32 queue)
{
struct atl1c_tpd_desc *use_tpd = NULL;
struct atl1c_buffer *buffer_info = NULL;
@@ -2165,7 +2243,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
if (mapped_len == 0)
use_tpd = tpd;
else {
- use_tpd = atl1c_get_tpd(adapter, type);
+ use_tpd = atl1c_get_tpd(adapter, queue);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
}
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2187,7 +2265,7 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
for (f = 0; f < nr_frags; f++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
- use_tpd = atl1c_get_tpd(adapter, type);
+ use_tpd = atl1c_get_tpd(adapter, queue);
memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
@@ -2220,23 +2298,22 @@ err_dma:
return -1;
}
-static void atl1c_tx_queue(struct atl1c_adapter *adapter,
- enum atl1c_trans_queue type)
+static void atl1c_tx_queue(struct atl1c_adapter *adapter, u32 queue)
{
- struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
- u16 reg;
+ struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[queue];
- reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
- AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
+ AT_WRITE_REGW(&adapter->hw, atl1c_qregs[queue].tpd_prod,
+ tpd_ring->next_to_use);
}
static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- u16 tpd_req;
+ u32 queue = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, queue);
struct atl1c_tpd_desc *tpd;
- enum atl1c_trans_queue type = atl1c_trans_normal;
+ u16 tpd_req;
if (test_bit(__AT_DOWN, &adapter->flags)) {
dev_kfree_skb_any(skb);
@@ -2245,18 +2322,18 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
tpd_req = atl1c_cal_tpd_req(skb);
- if (atl1c_tpd_avail(adapter, type) < tpd_req) {
+ if (atl1c_tpd_avail(adapter, queue) < tpd_req) {
/* no enough descriptor, just stop queue */
- atl1c_tx_queue(adapter, type);
- netif_stop_queue(netdev);
+ atl1c_tx_queue(adapter, queue);
+ netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
}
- tpd = atl1c_get_tpd(adapter, type);
+ tpd = atl1c_get_tpd(adapter, queue);
/* do TSO and check sum */
- if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
- atl1c_tx_queue(adapter, type);
+ if (atl1c_tso_csum(adapter, skb, &tpd, queue) != 0) {
+ atl1c_tx_queue(adapter, queue);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2274,17 +2351,17 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
if (skb_network_offset(skb) != ETH_HLEN)
tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */
- if (atl1c_tx_map(adapter, skb, tpd, type) < 0) {
+ if (atl1c_tx_map(adapter, skb, tpd, queue) < 0) {
netif_info(adapter, tx_done, adapter->netdev,
"tx-skb dropped due to dma error\n");
/* roll back tpd/buffer */
- atl1c_tx_rollback(adapter, tpd, type);
+ atl1c_tx_rollback(adapter, tpd, queue);
dev_kfree_skb_any(skb);
} else {
bool more = netdev_xmit_more();
- if (__netdev_sent_queue(adapter->netdev, skb->len, more))
- atl1c_tx_queue(adapter, type);
+ if (__netdev_tx_sent_queue(txq, skb->len, more))
+ atl1c_tx_queue(adapter, queue);
}
return NETDEV_TX_OK;
@@ -2338,16 +2415,19 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
{
+ int i;
/* release tx-pending skbs and reset tx/rx ring index */
- atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
- atl1c_clean_tx_ring(adapter, atl1c_trans_high);
- atl1c_clean_rx_ring(adapter);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ atl1c_clean_tx_ring(adapter, i);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ atl1c_clean_rx_ring(adapter, i);
}
static int atl1c_up(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
+ int i;
netif_carrier_off(netdev);
@@ -2361,20 +2441,24 @@ static int atl1c_up(struct atl1c_adapter *adapter)
atl1c_check_link_status(adapter);
clear_bit(__AT_DOWN, &adapter->flags);
- napi_enable(&adapter->napi);
- napi_enable(&adapter->tx_napi);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ napi_enable(&adapter->tpd_ring[i].napi);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ napi_enable(&adapter->rrd_ring[i].napi);
atl1c_irq_enable(adapter);
netif_start_queue(netdev);
return err;
err_up:
- atl1c_clean_rx_ring(adapter);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ atl1c_clean_rx_ring(adapter, i);
return err;
}
static void atl1c_down(struct atl1c_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ int i;
atl1c_del_timer(adapter);
adapter->work_event = 0; /* clear all event */
@@ -2382,8 +2466,10 @@ static void atl1c_down(struct atl1c_adapter *adapter)
* reschedule our watchdog timer */
set_bit(__AT_DOWN, &adapter->flags);
netif_carrier_off(netdev);
- napi_disable(&adapter->napi);
- napi_disable(&adapter->tx_napi);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ napi_disable(&adapter->tpd_ring[i].napi);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ napi_disable(&adapter->rrd_ring[i].napi);
atl1c_irq_disable(adapter);
atl1c_free_irq(adapter);
/* disable ASPM if device inactive */
@@ -2568,8 +2654,11 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct atl1c_adapter *adapter;
static int cards_found;
-
+ u8 __iomem *hw_addr;
+ enum atl1c_nic_type nic_type;
+ u32 queue_count = 1;
int err = 0;
+ int i;
/* enable device (incl. PCI PM wakeup and hotplug setup) */
err = pci_enable_device_mem(pdev);
@@ -2602,7 +2691,18 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- netdev = alloc_etherdev(sizeof(struct atl1c_adapter));
+ hw_addr = pci_ioremap_bar(pdev, 0);
+ if (!hw_addr) {
+ err = -EIO;
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ goto err_ioremap;
+ }
+
+ nic_type = atl1c_get_mac_type(pdev, hw_addr);
+ if (nic_type == athr_mt)
+ queue_count = 4;
+
+ netdev = alloc_etherdev_mq(sizeof(struct atl1c_adapter), queue_count);
if (netdev == NULL) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -2618,13 +2718,11 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->netdev = netdev;
adapter->pdev = pdev;
adapter->hw.adapter = adapter;
+ adapter->hw.nic_type = nic_type;
adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg);
- adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
- if (!adapter->hw.hw_addr) {
- err = -EIO;
- dev_err(&pdev->dev, "cannot map device registers\n");
- goto err_ioremap;
- }
+ adapter->hw.hw_addr = hw_addr;
+ adapter->tx_queue_count = queue_count;
+ adapter->rx_queue_count = queue_count;
/* init mii data */
adapter->mii.dev = netdev;
@@ -2633,8 +2731,12 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
dev_set_threaded(netdev, true);
- netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
- netif_napi_add(netdev, &adapter->tx_napi, atl1c_clean_tx, 64);
+ for (i = 0; i < adapter->rx_queue_count; ++i)
+ netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
+ atl1c_clean_rx, 64);
+ for (i = 0; i < adapter->tx_queue_count; ++i)
+ netif_napi_add(netdev, &adapter->tpd_ring[i].napi,
+ atl1c_clean_tx, 64);
timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
@@ -2687,11 +2789,11 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_reset:
err_register:
err_sw_init:
- iounmap(adapter->hw.hw_addr);
err_init_netdev:
-err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ iounmap(hw_addr);
+err_ioremap:
pci_release_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 9834b77cf4b6..4ab5bf64d353 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,7 +172,6 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
- struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -206,21 +205,15 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
- if (regs) {
- bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
+ bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
+ else
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
- }
- regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
- if (regs) {
- bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
- regs);
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
- }
+ bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2c5f36befdfe..bee6cfad9fc6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8247,9 +8247,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
BNX2_WR(bp, PCI_COMMAND, reg);
} else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
!(bp->flags & BNX2_FLAG_PCIX)) {
-
dev_err(&pdev->dev,
"5706 A1 can only be used in a PCIX bus, aborting\n");
+ rc = -EPERM;
goto err_out_unmap;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index d21f085044cd..27943b0446c2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1223,8 +1223,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
goto failed;
/* SR-IOV capability was enabled but there are no VFs*/
- if (iov->total == 0)
+ if (iov->total == 0) {
+ err = -EINVAL;
goto failed;
+ }
iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 3a716c015415..966d5722c5e2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -504,7 +504,6 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
/* VF side vfpf channel functions */
int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count);
int bnx2x_vfpf_release(struct bnx2x *bp);
-int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2985844634c8..fcc729d52b17 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -282,7 +282,8 @@ static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
- idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF);
+ idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
+ idx == NETXTREME_E_P5_VF_HV);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -6932,17 +6933,10 @@ ctx_err:
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
__le64 *pg_dir)
{
- u8 pg_size = 0;
-
if (!rmem->nr_pages)
return;
- if (BNXT_PAGE_SHIFT == 13)
- pg_size = 1 << 4;
- else if (BNXT_PAGE_SIZE == 16)
- pg_size = 2 << 4;
-
- *pg_attr = pg_size;
+ BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= 2;
@@ -10785,37 +10779,125 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
return rc;
}
+static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
+ u8 **nextp)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
+ int hdr_count = 0;
+ u8 *nexthdr;
+ int start;
+
+ /* Check that there are at most 2 IPv6 extension headers, no
+ * fragment header, and each is <= 64 bytes.
+ */
+ start = nw_off + sizeof(*ip6h);
+ nexthdr = &ip6h->nexthdr;
+ while (ipv6_ext_hdr(*nexthdr)) {
+ struct ipv6_opt_hdr *hp;
+ int hdrlen;
+
+ if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
+ *nexthdr == NEXTHDR_FRAGMENT)
+ return false;
+ hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
+ skb_headlen(skb), NULL);
+ if (!hp)
+ return false;
+ if (*nexthdr == NEXTHDR_AUTH)
+ hdrlen = ipv6_authlen(hp);
+ else
+ hdrlen = ipv6_optlen(hp);
+
+ if (hdrlen > 64)
+ return false;
+ nexthdr = &hp->nexthdr;
+ start += hdrlen;
+ hdr_count++;
+ }
+ if (nextp) {
+ /* Caller will check inner protocol */
+ if (skb->encapsulation) {
+ *nextp = nexthdr;
+ return true;
+ }
+ *nextp = NULL;
+ }
+ /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
+ return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
+}
+
+/* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
+static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_hdr(skb);
+ __be16 udp_port = uh->dest;
+
+ if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
+ return false;
+ if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
+ struct ethhdr *eh = inner_eth_hdr(skb);
+
+ switch (eh->h_proto) {
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ return bnxt_exthdr_check(bp, skb,
+ skb_inner_network_offset(skb),
+ NULL);
+ }
+ }
+ return false;
+}
+
+static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
+{
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ return bnxt_udp_tunl_check(bp, skb);
+ case IPPROTO_IPIP:
+ return true;
+ case IPPROTO_GRE: {
+ switch (skb->inner_protocol) {
+ default:
+ return false;
+ case htons(ETH_P_IP):
+ return true;
+ case htons(ETH_P_IPV6):
+ fallthrough;
+ }
+ }
+ case IPPROTO_IPV6:
+ /* Check ext headers of inner ipv6 */
+ return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
+ NULL);
+ }
+ return false;
+}
+
static netdev_features_t bnxt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- struct bnxt *bp;
- __be16 udp_port;
- u8 l4_proto = 0;
+ struct bnxt *bp = netdev_priv(dev);
+ u8 *l4_proto;
features = vlan_features_check(skb, features);
- if (!skb->encapsulation)
- return features;
-
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
- l4_proto = ip_hdr(skb)->protocol;
+ if (!skb->encapsulation)
+ return features;
+ l4_proto = &ip_hdr(skb)->protocol;
+ if (bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
case htons(ETH_P_IPV6):
- l4_proto = ipv6_hdr(skb)->nexthdr;
+ if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
+ &l4_proto))
+ break;
+ if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
+ return features;
break;
- default:
- return features;
}
-
- if (l4_proto != IPPROTO_UDP)
- return features;
-
- bp = netdev_priv(dev);
- /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
- udp_port = udp_hdr(skb)->dest;
- if (udp_port == bp->vxlan_port || udp_port == bp->nge_port)
- return features;
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 98e0cef4532c..30e47ea343f9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1457,6 +1457,16 @@ struct bnxt_ctx_pg_info {
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNXT_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_SRQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_64K; \
+ else \
+ attr = FUNC_BACKING_STORE_CFG_REQ_QPC_PG_SIZE_PG_4K; \
+} while (0)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 5335244e4577..89d16c587bb7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -423,6 +423,10 @@ static int bcmgenet_mii_register(struct bcmgenet_priv *priv)
int id, ret;
pres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!pres) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
memset(&res, 0, sizeof(res));
memset(&ppd, 0, sizeof(ppd));
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 6bc7d41d519b..7d2fe13a52f8 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2867,6 +2867,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
struct gem_stats *hwstat = &bp->hw_stats.gem;
struct net_device_stats *nstat = &bp->dev->stats;
+ if (!netif_running(bp->dev))
+ return nstat;
+
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -4652,8 +4655,7 @@ static int macb_probe(struct platform_device *pdev)
struct macb *bp;
int err, val;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(&pdev->dev, regs);
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(mem))
return PTR_ERR(mem);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7c5af4beedc6..591229b96257 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1153,7 +1153,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_soft_command *sc;
union octnet_cmd *ncmd;
@@ -1161,15 +1161,15 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
if (!sc) {
netif_info(lio, rx_err, lio->netdev,
- "Failed to allocate octeon_soft_command\n");
- return;
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
}
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -1192,18 +1192,19 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
if (retval == IQ_SEND_FAILED) {
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
octeon_free_soft_command(oct, sc);
- return;
} else {
/* Sleep on a wait queue till the cond flag indicates that the
* response arrived or timed-out.
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -1778,6 +1779,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (oct->props[lio->ifidx].napi_enabled == 0) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -1813,7 +1815,9 @@ static int liquidio_open(struct net_device *netdev)
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
/* start periodical statistics fetch */
INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
@@ -1824,7 +1828,7 @@ static int liquidio_open(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
netdev->name);
- return 0;
+ return ret;
}
/**
@@ -1838,6 +1842,7 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
ifstate_reset(lio, LIO_IFSTATE_RUNNING);
@@ -1854,7 +1859,9 @@ static int liquidio_stop(struct net_device *netdev)
lio->link_changes++;
/* Tell Octeon that nic interface is down. */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
if (OCTEON_CN23XX_PF(oct)) {
if (!oct->msix_on)
@@ -1889,7 +1896,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 516f166ceff8..ffddb3126a32 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -595,7 +595,7 @@ static void octeon_destroy_resources(struct octeon_device *oct)
* @lio: per-network private data
* @start_stop: whether to start or stop
*/
-static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
struct octeon_soft_command *sc;
@@ -603,11 +603,16 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
int retval;
if (oct->props[lio->ifidx].rx_on == start_stop)
- return;
+ return 0;
sc = (struct octeon_soft_command *)
octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
+ }
ncmd = (union octnet_cmd *)sc->virtdptr;
@@ -635,11 +640,13 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
*/
retval = wait_for_sc_completion_timeout(oct, sc, 0);
if (retval)
- return;
+ return retval;
oct->props[lio->ifidx].rx_on = start_stop;
WRITE_ONCE(sc->caller_is_done, true);
}
+
+ return retval;
}
/**
@@ -906,6 +913,7 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
if (!oct->props[lio->ifidx].napi_enabled) {
tasklet_disable(&oct_priv->droq_tasklet);
@@ -932,11 +940,13 @@ static int liquidio_open(struct net_device *netdev)
(LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
/* tell Octeon to start forwarding packets to host */
- send_rx_ctrl_cmd(lio, 1);
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
- return 0;
+ return ret;
}
/**
@@ -950,9 +960,12 @@ static int liquidio_stop(struct net_device *netdev)
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)oct->priv;
struct napi_struct *napi, *n;
+ int ret = 0;
/* tell Octeon to stop forwarding packets to host */
- send_rx_ctrl_cmd(lio, 0);
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
/* Inform that netif carrier is down */
@@ -986,7 +999,7 @@ static int liquidio_stop(struct net_device *netdev)
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- return 0;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index f80fbd81b609..6d682b7c7aac 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -178,7 +178,7 @@ struct sge_txq { /* state for an SGE Tx queue */
unsigned int token; /* WR token */
dma_addr_t phys_addr; /* physical address of the ring */
struct sk_buff_head sendq; /* List of backpressured offload packets */
- struct tasklet_struct qresume_tsk; /* restarts the queue */
+ struct work_struct qresume_task; /* restarts the queue */
unsigned int cntxt_id; /* SGE context id for the Tx q */
unsigned long stops; /* # of times q has been stopped */
unsigned long restarts; /* # of queue restarts */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index 1bd7d89666c4..b706f2fbe4f4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -770,4 +770,6 @@ int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
int t3_aq100x_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
+
+extern struct workqueue_struct *cxgb3_wq;
#endif /* __CHELSIO_COMMON_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 84ad7261e243..57f210c53afc 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1273,14 +1273,14 @@ static int cxgb_up(struct adapter *adap)
free_irq(adap->msix_info[0].vec, adap);
goto irq_err;
}
- } else if ((err = request_irq(adap->pdev->irq,
- t3_intr_handler(adap,
- adap->sge.qs[0].rspq.
- polling),
- (adap->flags & USING_MSI) ?
- 0 : IRQF_SHARED,
- adap->name, adap)))
- goto irq_err;
+ } else {
+ err = request_irq(adap->pdev->irq,
+ t3_intr_handler(adap, adap->sge.qs[0].rspq.polling),
+ (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+ adap->name, adap);
+ if (err)
+ goto irq_err;
+ }
enable_all_napi(adap);
t3_sge_start(adap);
@@ -3098,8 +3098,9 @@ static void set_nqsets(struct adapter *adap)
nqsets = num_cpus;
if (nqsets < 1 || hwports == 4)
nqsets = 1;
- } else
+ } else {
nqsets = 1;
+ }
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 11d3b6218ed7..cb5c79c43bc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1518,14 +1518,15 @@ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
/**
* restart_ctrlq - restart a suspended control queue
- * @t: pointer to the tasklet associated with this handler
+ * @w: pointer to the work associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(struct tasklet_struct *t)
+static void restart_ctrlq(struct work_struct *w)
{
struct sk_buff *skb;
- struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk);
+ struct sge_qset *qs = container_of(w, struct sge_qset,
+ txq[TXQ_CTRL].qresume_task);
struct sge_txq *q = &qs->txq[TXQ_CTRL];
spin_lock(&q->lock);
@@ -1736,14 +1737,15 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
/**
* restart_offloadq - restart a suspended offload queue
- * @t: pointer to the tasklet associated with this handler
+ * @w: pointer to the work associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_offloadq(struct tasklet_struct *t)
+static void restart_offloadq(struct work_struct *w)
{
struct sk_buff *skb;
- struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk);
+ struct sge_qset *qs = container_of(w, struct sge_qset,
+ txq[TXQ_OFLD].qresume_task);
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
@@ -1998,13 +2000,17 @@ static void restart_tx(struct sge_qset *qs)
should_restart_tx(&qs->txq[TXQ_OFLD]) &&
test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
qs->txq[TXQ_OFLD].restarts++;
- tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
+
+ /* The work can be quite lengthy so we use driver's own queue */
+ queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task);
}
if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
should_restart_tx(&qs->txq[TXQ_CTRL]) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
qs->txq[TXQ_CTRL].restarts++;
- tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
+
+ /* The work can be quite lengthy so we use driver's own queue */
+ queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
}
}
@@ -3085,8 +3091,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
skb_queue_head_init(&q->txq[i].sendq);
}
- tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq);
- tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq);
+ INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq);
+ INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq);
q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size;
@@ -3276,11 +3282,11 @@ void t3_sge_start(struct adapter *adap)
*
* Can be invoked from interrupt context e.g. error handler.
*
- * Note that this function cannot disable the restart of tasklets as
+ * Note that this function cannot disable the restart of works as
* it cannot wait if called from interrupt context, however the
- * tasklets will have no effect since the doorbells are disabled. The
+ * works will have no effect since the doorbells are disabled. The
* driver will call tg3_sge_stop() later from process context, at
- * which time the tasklets will be stopped if they are still running.
+ * which time the works will be stopped if they are still running.
*/
void t3_sge_stop_dma(struct adapter *adap)
{
@@ -3292,7 +3298,7 @@ void t3_sge_stop_dma(struct adapter *adap)
* @adap: the adapter
*
* Called from process context. Disables the DMA engine and any
- * pending queue restart tasklets.
+ * pending queue restart works.
*/
void t3_sge_stop(struct adapter *adap)
{
@@ -3303,8 +3309,8 @@ void t3_sge_stop(struct adapter *adap)
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *qs = &adap->sge.qs[i];
- tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
- tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
+ cancel_work_sync(&qs->txq[TXQ_OFLD].qresume_task);
+ cancel_work_sync(&qs->txq[TXQ_CTRL].qresume_task);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 314f8d806723..9058f09f921e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
bool persistent, u8 *smt_idx);
int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
-int cxgb_open(struct net_device *dev);
-int cxgb_close(struct net_device *dev);
void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
int cxgb4_port_mirror_alloc(struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index bc581b149b11..22c9ac922eba 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1042,7 +1042,7 @@ void clear_all_filters(struct adapter *adapter)
cxgb4_del_filter(dev, f->tid, &f->fs);
}
- sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
+ sb = adapter->tids.stid_base;
for (i = 0; i < sb; i++) {
f = (struct filter_entry *)adapter->tids.tid_tab[i];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 6264bc66a4fc..6479ceedc352 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter)
/*
* net_device operations
*/
-int cxgb_open(struct net_device *dev)
+static int cxgb_open(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2882,7 +2882,7 @@ out_unlock:
return err;
}
-int cxgb_close(struct net_device *dev)
+static int cxgb_close(struct net_device *dev)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -3894,7 +3894,6 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
.ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
.ndo_set_vf_link_state = cxgb4_mgmt_set_vf_link_state,
};
-#endif
static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
@@ -3909,6 +3908,7 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
.get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
+#endif
static void notify_fatal_err(struct work_struct *work)
{
@@ -6480,9 +6480,9 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
direction);
- cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
+ cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
mutex_unlock(&uld_mutex);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 1b88bd1c2dbe..dd9be229819a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev,
if (!ch_flower)
return -ENOENT;
+ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
+ adap->flower_ht_params);
+
ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio,
&ch_flower->fs, ch_flower->filter_id);
if (ret)
- goto err;
+ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d",
+ ch_flower->filter_id, ret);
- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
- adap->flower_ht_params);
- if (ret) {
- netdev_err(dev, "Flow remove from rhashtable failed");
- goto err;
- }
kfree_rcu(ch_flower, rcu);
-
-err:
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index 6c259de96f96..338b04f339b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
* down before configuring tc params.
*/
if (netif_running(dev)) {
- cxgb_close(dev);
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
needs_bring_up = true;
}
@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev,
}
out:
- if (needs_bring_up)
- cxgb_open(dev);
+ if (needs_bring_up) {
+ netif_tx_start_all_queues(dev);
+ netif_carrier_on(dev);
+ }
mutex_unlock(&adap->tc_mqprio->mqprio_mutex);
return ret;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 1e5f2edb70cf..6a099cb34b12 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
if (!eosw_txq)
return -ENOMEM;
+ if (!(adap->flags & CXGB4_FW_OK)) {
+ /* Don't stall caller when access to FW is lost */
+ complete(&eosw_txq->completion);
+ return -EIO;
+ }
+
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index ae3ad99fbd06..9e3ea5f7be2e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7782,7 +7782,6 @@ int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
int idx, bool sleep_ok)
{
struct fw_vi_mac_exact *p;
- u8 addr[] = {0, 0, 0, 0, 0, 0};
struct fw_vi_mac_cmd c;
int ret = 0;
u32 exact;
@@ -7799,7 +7798,7 @@ int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
p = c.u.exact;
p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
FW_VI_MAC_CMD_IDX_V(idx));
- memcpy(p->macaddr, addr, sizeof(p->macaddr));
+ eth_zero_addr(p->macaddr);
ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index ef3f1e92632f..59683f79959c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -59,6 +59,7 @@ static int chcr_get_nfrags_to_send(struct sk_buff *skb, u32 start, u32 len)
}
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
+static void clear_conn_resources(struct chcr_ktls_info *tx_info);
/*
* chcr_ktls_save_keys: calculate and save crypto keys.
* @tx_info - driver specific tls info.
@@ -364,10 +365,14 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_uld_ctx *u_ctx;
if (!tx_info)
return;
+ u_ctx = tx_info->adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx && u_ctx->detach)
+ return;
/* clear l2t entry */
if (tx_info->l2te)
cxgb4_l2t_release(tx_info->l2te);
@@ -384,6 +389,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
if (tx_info->tid != -1) {
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
}
port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
@@ -411,6 +418,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
struct adapter *adap;
@@ -425,6 +433,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_open);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
@@ -434,6 +443,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (tx_ctx->chcr_info)
goto out;
+ if (u_ctx && u_ctx->detach)
+ goto out;
+
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
if (!tx_info)
goto out;
@@ -569,6 +581,8 @@ free_tid:
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
+ xa_erase(&u_ctx->tid_list, tx_info->tid);
+
put_module:
/* release module refcount */
module_put(THIS_MODULE);
@@ -633,8 +647,12 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_uld_ctx *u_ctx;
unsigned int atid, tid, status;
+ struct tls_context *tls_ctx;
struct tid_info *t;
+ int ret = 0;
tid = GET_TID(p);
status = AOPEN_STATUS_G(ntohl(p->atid_status));
@@ -666,14 +684,29 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
if (!status) {
tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
+ /* Adding tid */
+ tls_ctx = tls_get_ctx(tx_info->sk);
+ tx_ctx = chcr_get_ktls_tx_context(tls_ctx);
+ u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
+ if (u_ctx) {
+ ret = xa_insert_bh(&u_ctx->tid_list, tid, tx_ctx,
+ GFP_NOWAIT);
+ if (ret < 0) {
+ pr_err("%s: Failed to allocate tid XA entry = %d\n",
+ __func__, tx_info->tid);
+ tx_info->open_state = CH_KTLS_OPEN_FAILURE;
+ goto out;
+ }
+ }
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
} else {
tx_info->open_state = CH_KTLS_OPEN_FAILURE;
}
+out:
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);
- return 0;
+ return ret;
}
/*
@@ -2090,6 +2123,8 @@ static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
goto out;
}
u_ctx->lldi = *lldi;
+ u_ctx->detach = false;
+ xa_init_flags(&u_ctx->tid_list, XA_FLAGS_LOCK_BH);
out:
return u_ctx;
}
@@ -2123,6 +2158,45 @@ static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
+static void clear_conn_resources(struct chcr_ktls_info *tx_info)
+{
+ /* clear l2t entry */
+ if (tx_info->l2te)
+ cxgb4_l2t_release(tx_info->l2te);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(tx_info->netdev, (const u32 *)
+ &tx_info->sk->sk_v6_rcv_saddr,
+ 1);
+#endif
+
+ /* clear tid */
+ if (tx_info->tid != -1)
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+}
+
+static void ch_ktls_reset_all_conn(struct chcr_ktls_uld_ctx *u_ctx)
+{
+ struct ch_ktls_port_stats_debug *port_stats;
+ struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct chcr_ktls_info *tx_info;
+ unsigned long index;
+
+ xa_for_each(&u_ctx->tid_list, index, tx_ctx) {
+ tx_info = tx_ctx->chcr_info;
+ clear_conn_resources(tx_info);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
+ kvfree(tx_info);
+ tx_ctx->chcr_info = NULL;
+ /* release module refcount */
+ module_put(THIS_MODULE);
+ }
+}
+
static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
{
struct chcr_ktls_uld_ctx *u_ctx = handle;
@@ -2139,7 +2213,10 @@ static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
case CXGB4_STATE_DETACH:
pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
mutex_lock(&dev_mutex);
+ u_ctx->detach = true;
list_del(&u_ctx->entry);
+ ch_ktls_reset_all_conn(u_ctx);
+ xa_destroy(&u_ctx->tid_list);
mutex_unlock(&dev_mutex);
break;
default:
@@ -2178,6 +2255,7 @@ static void __exit chcr_ktls_exit(void)
adap = pci_get_drvdata(u_ctx->lldi.pdev);
memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
list_del(&u_ctx->entry);
+ xa_destroy(&u_ctx->tid_list);
kfree(u_ctx);
}
mutex_unlock(&dev_mutex);
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 18b3b1f02415..10572dc55365 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -75,6 +75,8 @@ struct chcr_ktls_ofld_ctx_tx {
struct chcr_ktls_uld_ctx {
struct list_head entry;
struct cxgb4_lld_info lldi;
+ struct xarray tid_list;
+ bool detach;
};
static inline struct chcr_ktls_ofld_ctx_tx *
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 188d871f6b8c..c320cc8ca68d 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
@@ -1564,8 +1564,10 @@ found_ok_skb:
cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE,
sizeof(thdr->type), &thdr->type);
- if (cerr && thdr->type != TLS_RECORD_TYPE_DATA)
- return -EIO;
+ if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) {
+ copied = -EIO;
+ break;
+ }
/* don't send tls header, skip copy */
goto skip_copy;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 8df6f081f244..c2ebb3388789 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2356,8 +2356,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
struct net_device *netdev;
- struct resource *gmacres;
- struct resource *dmares;
struct device *parent;
unsigned int id;
int irq;
@@ -2390,24 +2388,18 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* DMA memory */
- dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!dmares) {
- dev_err(dev, "no DMA resource\n");
- return -ENODEV;
- }
- port->dma_base = devm_ioremap_resource(dev, dmares);
- if (IS_ERR(port->dma_base))
+ port->dma_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(port->dma_base)) {
+ dev_err(dev, "get DMA address failed\n");
return PTR_ERR(port->dma_base);
+ }
/* GMAC config memory */
- gmacres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!gmacres) {
- dev_err(dev, "no GMAC resource\n");
- return -ENODEV;
- }
- port->gmac_base = devm_ioremap_resource(dev, gmacres);
- if (IS_ERR(port->gmac_base))
+ port->gmac_base = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(port->gmac_base)) {
+ dev_err(dev, "get GMAC address failed\n");
return PTR_ERR(port->gmac_base);
+ }
/* Interrupt */
irq = platform_get_irq(pdev, 0);
@@ -2502,10 +2494,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
if (ret)
goto unprepare;
- netdev_info(netdev,
- "irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
- port->irq, &dmares->start,
- &gmacres->start);
return 0;
unprepare:
@@ -2544,17 +2532,13 @@ static int gemini_ethernet_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gemini_ethernet *geth;
unsigned int retry = 5;
- struct resource *res;
u32 val;
/* Global registers */
geth = devm_kzalloc(dev, sizeof(*geth), GFP_KERNEL);
if (!geth)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- geth->base = devm_ioremap_resource(dev, res);
+ geth->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(geth->base))
return PTR_ERR(geth->base);
geth->dev = dev;
diff --git a/drivers/net/ethernet/dec/tulip/tulip.h b/drivers/net/ethernet/dec/tulip/tulip.h
index 815907259048..0ed598dc7569 100644
--- a/drivers/net/ethernet/dec/tulip/tulip.h
+++ b/drivers/net/ethernet/dec/tulip/tulip.h
@@ -478,7 +478,6 @@ void t21142_lnk_change(struct net_device *dev, int csr5);
void pnic2_lnk_change(struct net_device *dev, int csr5);
void pnic2_timer(struct timer_list *t);
void pnic2_start_nway(struct net_device *dev);
-void pnic2_lnk_change(struct net_device *dev, int csr5);
/* eeprom.c */
void tulip_parse_eeprom(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 4dfadf2b70d6..ae6d382d8735 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -1,6 +1,9 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2019 NXP */
+#include <linux/acpi.h>
+#include <linux/property.h>
+
#include "dpaa2-eth.h"
#include "dpaa2-mac.h"
@@ -34,39 +37,51 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
return 0;
}
-/* Caller must call of_node_put on the returned value */
-static struct device_node *dpaa2_mac_get_node(u16 dpmac_id)
+static struct fwnode_handle *dpaa2_mac_get_node(struct device *dev,
+ u16 dpmac_id)
{
- struct device_node *dpmacs, *dpmac = NULL;
- u32 id;
+ struct fwnode_handle *fwnode, *parent, *child = NULL;
+ struct device_node *dpmacs = NULL;
int err;
+ u32 id;
- dpmacs = of_find_node_by_name(NULL, "dpmacs");
- if (!dpmacs)
- return NULL;
+ fwnode = dev_fwnode(dev->parent);
+ if (is_of_node(fwnode)) {
+ dpmacs = of_find_node_by_name(NULL, "dpmacs");
+ if (!dpmacs)
+ return NULL;
+ parent = of_fwnode_handle(dpmacs);
+ } else if (is_acpi_node(fwnode)) {
+ parent = fwnode;
+ }
- while ((dpmac = of_get_next_child(dpmacs, dpmac)) != NULL) {
- err = of_property_read_u32(dpmac, "reg", &id);
+ fwnode_for_each_child_node(parent, child) {
+ err = -EINVAL;
+ if (is_acpi_device_node(child))
+ err = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &id);
+ else if (is_of_node(child))
+ err = of_property_read_u32(to_of_node(child), "reg", &id);
if (err)
continue;
- if (id == dpmac_id)
- break;
- }
+ if (id == dpmac_id) {
+ of_node_put(dpmacs);
+ return child;
+ }
+ }
of_node_put(dpmacs);
-
- return dpmac;
+ return NULL;
}
-static int dpaa2_mac_get_if_mode(struct device_node *node,
+static int dpaa2_mac_get_if_mode(struct fwnode_handle *dpmac_node,
struct dpmac_attr attr)
{
phy_interface_t if_mode;
int err;
- err = of_get_phy_mode(node, &if_mode);
- if (!err)
- return if_mode;
+ err = fwnode_get_phy_mode(dpmac_node);
+ if (err > 0)
+ return err;
err = phy_mode(attr.eth_if, &if_mode);
if (!err)
@@ -235,26 +250,27 @@ static const struct phylink_mac_ops dpaa2_mac_phylink_ops = {
};
static int dpaa2_pcs_create(struct dpaa2_mac *mac,
- struct device_node *dpmac_node, int id)
+ struct fwnode_handle *dpmac_node,
+ int id)
{
struct mdio_device *mdiodev;
- struct device_node *node;
+ struct fwnode_handle *node;
- node = of_parse_phandle(dpmac_node, "pcs-handle", 0);
- if (!node) {
+ node = fwnode_find_reference(dpmac_node, "pcs-handle", 0);
+ if (IS_ERR(node)) {
/* do not error out on old DTS files */
netdev_warn(mac->net_dev, "pcs-handle node not found\n");
return 0;
}
- if (!of_device_is_available(node)) {
+ if (!fwnode_device_is_available(node)) {
netdev_err(mac->net_dev, "pcs-handle node not available\n");
- of_node_put(node);
+ fwnode_handle_put(node);
return -ENODEV;
}
- mdiodev = of_mdio_find_device(node);
- of_node_put(node);
+ mdiodev = fwnode_mdio_find_device(node);
+ fwnode_handle_put(node);
if (!mdiodev)
return -EPROBE_DEFER;
@@ -283,13 +299,13 @@ static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct net_device *net_dev = mac->net_dev;
- struct device_node *dpmac_node;
+ struct fwnode_handle *dpmac_node;
struct phylink *phylink;
int err;
mac->if_link_type = mac->attr.link_type;
- dpmac_node = mac->of_node;
+ dpmac_node = mac->fw_node;
if (!dpmac_node) {
netdev_err(net_dev, "No dpmac@%d node found.\n", mac->attr.id);
return -ENODEV;
@@ -304,7 +320,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
* error out if the interface mode requests them and there is no PHY
* to act upon them
*/
- if (of_phy_is_fixed_link(dpmac_node) &&
+ if (of_phy_is_fixed_link(to_of_node(dpmac_node)) &&
(mac->if_mode == PHY_INTERFACE_MODE_RGMII_ID ||
mac->if_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
mac->if_mode == PHY_INTERFACE_MODE_RGMII_TXID)) {
@@ -324,7 +340,7 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
mac->phylink_config.type = PHYLINK_NETDEV;
phylink = phylink_create(&mac->phylink_config,
- of_fwnode_handle(dpmac_node), mac->if_mode,
+ dpmac_node, mac->if_mode,
&dpaa2_mac_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
@@ -335,9 +351,9 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
if (mac->pcs)
phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
- err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
+ err = phylink_fwnode_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
- netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
+ netdev_err(net_dev, "phylink_fwnode_phy_connect() = %d\n", err);
goto err_phylink_destroy;
}
@@ -384,8 +400,8 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
/* Find the device node representing the MAC device and link the device
* behind the associated netdev to it.
*/
- mac->of_node = dpaa2_mac_get_node(mac->attr.id);
- net_dev->dev.of_node = mac->of_node;
+ mac->fw_node = dpaa2_mac_get_node(&mac->mc_dev->dev, mac->attr.id);
+ net_dev->dev.of_node = to_of_node(mac->fw_node);
return 0;
@@ -399,8 +415,8 @@ void dpaa2_mac_close(struct dpaa2_mac *mac)
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
- if (mac->of_node)
- of_node_put(mac->of_node);
+ if (mac->fw_node)
+ fwnode_handle_put(mac->fw_node);
}
static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 8ebcb3420d02..7842cbb2207a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -24,7 +24,7 @@ struct dpaa2_mac {
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
struct lynx_pcs *pcs;
- struct device_node *of_node;
+ struct fwnode_handle *fw_node;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index 8b356c485507..ee1468e3eaa3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -99,15 +99,13 @@ EXPORT_SYMBOL(enetc_ierb_register_pf);
static int enetc_ierb_probe(struct platform_device *pdev)
{
struct enetc_ierb *ierb;
- struct resource *res;
void __iomem *regs;
ierb = devm_kzalloc(&pdev->dev, sizeof(*ierb), GFP_KERNEL);
if (!ierb)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(&pdev->dev, res);
+ regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 31274325159a..c84f6c226743 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
+#include <asm/unaligned.h>
#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/fsl/enetc_mdio.h>
@@ -17,15 +18,15 @@ static void enetc_pf_get_primary_mac_addr(struct enetc_hw *hw, int si, u8 *addr)
u32 upper = __raw_readl(hw->port + ENETC_PSIPMAR0(si));
u16 lower = __raw_readw(hw->port + ENETC_PSIPMAR1(si));
- *(u32 *)addr = upper;
- *(u16 *)(addr + 4) = lower;
+ put_unaligned_le32(upper, addr);
+ put_unaligned_le16(lower, addr + 4);
}
static void enetc_pf_set_primary_mac_addr(struct enetc_hw *hw, int si,
const u8 *addr)
{
- u32 upper = *(const u32 *)addr;
- u16 lower = *(const u16 *)(addr + 4);
+ u32 upper = get_unaligned_le32(addr);
+ u16 lower = get_unaligned_le16(addr + 4);
__raw_writel(upper, hw->port + ENETC_PSIPMAR0(si));
__raw_writew(lower, hw->port + ENETC_PSIPMAR1(si));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index af699f2ad095..4577226d3c6a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -465,8 +465,13 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
struct streamid_conf *si_conf;
u16 data_size;
dma_addr_t dma;
+ int port;
int err;
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
if (sid->index >= priv->psfp_cap.max_streamid)
return -EINVAL;
@@ -499,7 +504,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf = &cbd.sid_set;
/* Only one port supported for one entry, set itself */
- si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
+ si_conf->iports = cpu_to_le32(1 << port);
si_conf->id_type = 1;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -524,7 +529,7 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
si_conf->en = 0x80;
si_conf->stream_handle = cpu_to_le32(sid->handle);
- si_conf->iports = cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
+ si_conf->iports = cpu_to_le32(1 << port);
si_conf->id_type = sid->filtertype;
si_conf->oui[2] = 0x0;
si_conf->oui[1] = 0x80;
@@ -567,6 +572,11 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
{
struct enetc_cbd cbd = {.cmd = 0};
struct sfi_conf *sfi_config;
+ int port;
+
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
cbd.index = cpu_to_le16(sfi->index);
cbd.cls = BDCR_CMD_STREAM_FILTER;
@@ -586,8 +596,7 @@ static int enetc_streamfilter_hw_set(struct enetc_ndev_priv *priv,
}
sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
- sfi_config->input_ports =
- cpu_to_le32(1 << enetc_pf_to_port(priv->si->pdev));
+ sfi_config->input_ports = cpu_to_le32(1 << port);
/* The priority value which may be matched against the
* frame’s priority value to determine a match for this entry.
@@ -1548,7 +1557,7 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct flow_block_offload *f = type_data;
- int err;
+ int port, err;
err = flow_block_cb_setup_simple(f, &enetc_block_cb_list,
enetc_setup_tc_block_cb,
@@ -1558,10 +1567,18 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
switch (f->command) {
case FLOW_BLOCK_BIND:
- set_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ set_bit(port, &epsfp.dev_bitmap);
break;
case FLOW_BLOCK_UNBIND:
- clear_bit(enetc_pf_to_port(priv->si->pdev), &epsfp.dev_bitmap);
+ port = enetc_pf_to_port(priv->si->pdev);
+ if (port < 0)
+ return -EINVAL;
+
+ clear_bit(port, &epsfp.dev_bitmap);
if (!epsfp.dev_bitmap)
clean_psfp_all();
break;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f2065f9d02e6..ad82cffc6f3f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1662,7 +1662,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
}
/* ------------------------------------------------------------------------- */
-static void fec_get_mac(struct net_device *ndev)
+static int fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned char *iap, tmpaddr[ETH_ALEN];
@@ -1685,6 +1685,8 @@ static void fec_get_mac(struct net_device *ndev)
ret = of_get_mac_address(np, tmpaddr);
if (!ret)
iap = tmpaddr;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
}
}
@@ -1723,7 +1725,7 @@ static void fec_get_mac(struct net_device *ndev)
eth_hw_addr_random(ndev);
dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
ndev->dev_addr);
- return;
+ return 0;
}
memcpy(ndev->dev_addr, iap, ETH_ALEN);
@@ -1731,6 +1733,8 @@ static void fec_get_mac(struct net_device *ndev)
/* Adjust MAC if using macaddr */
if (iap == macaddr)
ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+
+ return 0;
}
/* ------------------------------------------------------------------------- */
@@ -3290,7 +3294,9 @@ static int fec_enet_init(struct net_device *ndev)
return ret;
}
- fec_enet_alloc_queue(ndev);
+ ret = fec_enet_alloc_queue(ndev);
+ if (ret)
+ return ret;
bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
@@ -3298,11 +3304,15 @@ static int fec_enet_init(struct net_device *ndev)
cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
GFP_KERNEL);
if (!cbd_base) {
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_queue_mem;
}
/* Get the Ethernet address */
- fec_get_mac(ndev);
+ ret = fec_get_mac(ndev);
+ if (ret)
+ goto free_queue_mem;
+
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
@@ -3376,6 +3386,10 @@ static int fec_enet_init(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
return 0;
+
+free_queue_mem:
+ fec_enet_free_queue(ndev);
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index f2945abdb041..9646483137c4 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -274,32 +274,44 @@ static void gfar_configure_coalescing_all(struct gfar_private *priv)
gfar_configure_coalescing(priv, 0xFF, 0xFF);
}
-static struct net_device_stats *gfar_get_stats(struct net_device *dev)
+static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long rx_packets = 0, rx_bytes = 0, rx_dropped = 0;
- unsigned long tx_packets = 0, tx_bytes = 0;
int i;
for (i = 0; i < priv->num_rx_queues; i++) {
- rx_packets += priv->rx_queue[i]->stats.rx_packets;
- rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
- rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
+ stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
+ stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
+ stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
}
- dev->stats.rx_packets = rx_packets;
- dev->stats.rx_bytes = rx_bytes;
- dev->stats.rx_dropped = rx_dropped;
-
for (i = 0; i < priv->num_tx_queues; i++) {
- tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
- tx_packets += priv->tx_queue[i]->stats.tx_packets;
+ stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
+ stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
}
- dev->stats.tx_bytes = tx_bytes;
- dev->stats.tx_packets = tx_packets;
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
+ unsigned long flags;
+ u32 rdrp, car, car_before;
+ u64 rdrp_offset;
+
+ spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ do {
+ car_before = car;
+ rdrp = gfar_read(&rmon->rdrp);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ } while (car != car_before);
+ if (car) {
+ priv->rmon_overflow.rdrp++;
+ gfar_write(&rmon->car1, car);
+ }
+ rdrp_offset = priv->rmon_overflow.rdrp;
+ spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
- return &dev->stats;
+ stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
+ }
}
/* Set the appropriate hash bit for the given addr */
@@ -390,7 +402,8 @@ static void gfar_ints_enable(struct gfar_private *priv)
for (i = 0; i < priv->num_grps; i++) {
struct gfar __iomem *regs = priv->gfargrp[i].regs;
/* Unmask the interrupts we look for */
- gfar_write(&regs->imask, IMASK_DEFAULT);
+ gfar_write(&regs->imask,
+ IMASK_DEFAULT | priv->rmon_overflow.imask);
}
}
@@ -2298,7 +2311,7 @@ static irqreturn_t gfar_receive(int irq, void *grp_id)
if (likely(napi_schedule_prep(&grp->napi_rx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
- imask &= IMASK_RX_DISABLED;
+ imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_rx);
@@ -2322,7 +2335,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
if (likely(napi_schedule_prep(&grp->napi_tx))) {
spin_lock_irqsave(&grp->grplock, flags);
imask = gfar_read(&grp->regs->imask);
- imask &= IMASK_TX_DISABLED;
+ imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
gfar_write(&grp->regs->imask, imask);
spin_unlock_irqrestore(&grp->grplock, flags);
__napi_schedule(&grp->napi_tx);
@@ -2693,6 +2706,18 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
+ if (events & IEVENT_MSRO) {
+ struct rmon_mib __iomem *rmon = &regs->rmon;
+ u32 car;
+
+ spin_lock(&priv->rmon_overflow.lock);
+ car = gfar_read(&rmon->car1) & CAR1_C1RDR;
+ if (car) {
+ priv->rmon_overflow.rdrp++;
+ gfar_write(&rmon->car1, car);
+ }
+ spin_unlock(&priv->rmon_overflow.lock);
+ }
if (events & IEVENT_BSY) {
dev->stats.rx_over_errors++;
atomic64_inc(&priv->extra_stats.rx_bsy);
@@ -3109,11 +3134,14 @@ static void gfar_hw_init(struct gfar_private *priv)
/* Zero out the rmon mib registers if it has them */
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
- memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
+ memset_io(&regs->rmon, 0, offsetof(struct rmon_mib, car1));
/* Mask off the CAM interrupts */
gfar_write(&regs->rmon.cam1, 0xffffffff);
gfar_write(&regs->rmon.cam2, 0xffffffff);
+ /* Clear the CAR registers (w1c style) */
+ gfar_write(&regs->rmon.car1, 0xffffffff);
+ gfar_write(&regs->rmon.car2, 0xffffffff);
}
/* Initialize ECNTRL */
@@ -3157,7 +3185,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_set_rx_mode = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
- .ndo_get_stats = gfar_get_stats,
+ .ndo_get_stats64 = gfar_get_stats64,
.ndo_change_carrier = fixed_phy_change_carrier,
.ndo_set_mac_address = gfar_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -3267,6 +3295,14 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_hw_init(priv);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
+
+ spin_lock_init(&priv->rmon_overflow.lock);
+ priv->rmon_overflow.imask = IMASK_MSRO;
+ gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
+ }
+
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 5ea47df93e5e..ca5e14f908fe 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -445,6 +445,60 @@ struct ethtool_rx_list {
#define RQFPR_PER 0x00000002
#define RQFPR_EER 0x00000001
+/* CAR1 bits */
+#define CAR1_C164 0x80000000
+#define CAR1_C1127 0x40000000
+#define CAR1_C1255 0x20000000
+#define CAR1_C1511 0x10000000
+#define CAR1_C11K 0x08000000
+#define CAR1_C1MAX 0x04000000
+#define CAR1_C1MGV 0x02000000
+#define CAR1_C1REJ 0x00020000
+#define CAR1_C1RBY 0x00010000
+#define CAR1_C1RPK 0x00008000
+#define CAR1_C1RFC 0x00004000
+#define CAR1_C1RMC 0x00002000
+#define CAR1_C1RBC 0x00001000
+#define CAR1_C1RXC 0x00000800
+#define CAR1_C1RXP 0x00000400
+#define CAR1_C1RXU 0x00000200
+#define CAR1_C1RAL 0x00000100
+#define CAR1_C1RFL 0x00000080
+#define CAR1_C1RCD 0x00000040
+#define CAR1_C1RCS 0x00000020
+#define CAR1_C1RUN 0x00000010
+#define CAR1_C1ROV 0x00000008
+#define CAR1_C1RFR 0x00000004
+#define CAR1_C1RJB 0x00000002
+#define CAR1_C1RDR 0x00000001
+
+/* CAM1 bits */
+#define CAM1_M164 0x80000000
+#define CAM1_M1127 0x40000000
+#define CAM1_M1255 0x20000000
+#define CAM1_M1511 0x10000000
+#define CAM1_M11K 0x08000000
+#define CAM1_M1MAX 0x04000000
+#define CAM1_M1MGV 0x02000000
+#define CAM1_M1REJ 0x00020000
+#define CAM1_M1RBY 0x00010000
+#define CAM1_M1RPK 0x00008000
+#define CAM1_M1RFC 0x00004000
+#define CAM1_M1RMC 0x00002000
+#define CAM1_M1RBC 0x00001000
+#define CAM1_M1RXC 0x00000800
+#define CAM1_M1RXP 0x00000400
+#define CAM1_M1RXU 0x00000200
+#define CAM1_M1RAL 0x00000100
+#define CAM1_M1RFL 0x00000080
+#define CAM1_M1RCD 0x00000040
+#define CAM1_M1RCS 0x00000020
+#define CAM1_M1RUN 0x00000010
+#define CAM1_M1ROV 0x00000008
+#define CAM1_M1RFR 0x00000004
+#define CAM1_M1RJB 0x00000002
+#define CAM1_M1RDR 0x00000001
+
/* TxBD status field bits */
#define TXBD_READY 0x8000
#define TXBD_PADCRC 0x4000
@@ -609,6 +663,15 @@ struct rmon_mib
u32 cam2; /* 0x.73c - Carry Mask Register Two */
};
+struct rmon_overflow {
+ /* lock for synchronization of the rdrp field of this struct, and
+ * CAR1/CAR2 registers
+ */
+ spinlock_t lock;
+ u32 imask;
+ u64 rdrp;
+};
+
struct gfar_extra_stats {
atomic64_t rx_alloc_err;
atomic64_t rx_large;
@@ -913,8 +976,8 @@ enum {
* Per TX queue stats
*/
struct tx_q_stats {
- unsigned long tx_packets;
- unsigned long tx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
};
/**
@@ -963,9 +1026,9 @@ struct gfar_priv_tx_q {
* Per RX queue stats
*/
struct rx_q_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long rx_dropped;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
};
struct gfar_rx_buff {
@@ -1096,6 +1159,7 @@ struct gfar_private {
/* Network Statistics */
struct gfar_extra_stats extra_stats;
+ struct rmon_overflow rmon_overflow;
/* PHY stuff */
phy_interface_t interface;
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index bfa2826c5545..0b68852379da 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -2,6 +2,7 @@
* QorIQ 10G MDIO Controller
*
* Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2021 NXP
*
* Authors: Andy Fleming <afleming@freescale.com>
* Timur Tabi <timur@freescale.com>
@@ -11,15 +12,17 @@
* kind, whether express or implied.
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/phy.h>
+#include <linux/kernel.h>
#include <linux/mdio.h>
+#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_platform.h>
#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/slab.h>
/* Number of microseconds to wait for a register to respond */
#define TIMEOUT 1000
@@ -243,10 +246,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
static int xgmac_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct mii_bus *bus;
- struct resource *res;
+ struct fwnode_handle *fwnode;
struct mdio_fsl_priv *priv;
+ struct resource *res;
+ struct mii_bus *bus;
int ret;
/* In DPAA-1, MDIO is one of the many FMan sub-devices. The FMan
@@ -279,13 +282,22 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ /* For both ACPI and DT cases, endianness of MDIO controller
+ * needs to be specified using "little-endian" property.
+ */
priv->is_little_endian = device_property_read_bool(&pdev->dev,
"little-endian");
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
- ret = of_mdiobus_register(bus, np);
+ fwnode = pdev->dev.fwnode;
+ if (is_of_node(fwnode))
+ ret = of_mdiobus_register(bus, to_of_node(fwnode));
+ else if (is_acpi_node(fwnode))
+ ret = acpi_mdiobus_register(bus, fwnode);
+ else
+ ret = -EINVAL;
if (ret) {
dev_err(&pdev->dev, "cannot register MDIO bus\n");
goto err_registration;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 7e451e61eefd..62c0bed82ced 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -548,8 +548,8 @@ static int fmvj18x_get_hwinfo(struct pcmcia_device *link, u_char *node_id)
base = ioremap(link->resource[2]->start, resource_size(link->resource[2]));
if (!base) {
- pcmcia_release_window(link, link->resource[2]);
- return -ENOMEM;
+ pcmcia_release_window(link, link->resource[2]);
+ return -1;
}
pcmcia_map_mem_page(link, link->resource[2], 0);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 7302498c6df3..bbc423e93122 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -180,7 +180,7 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
/* Double check we have no extra work.
* Ensure unmask synchronizes with checking for work.
*/
- dma_rmb();
+ mb();
if (block->tx)
reschedule |= gve_tx_poll(block, -1);
if (block->rx)
@@ -220,6 +220,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int vecs_left = new_num_ntfy_blks % 2;
priv->num_ntfy_blks = new_num_ntfy_blks;
+ priv->mgmt_msix_idx = priv->num_ntfy_blks;
priv->tx_cfg.max_queues = min_t(int, priv->tx_cfg.max_queues,
vecs_per_type);
priv->rx_cfg.max_queues = min_t(int, priv->rx_cfg.max_queues,
@@ -300,20 +301,22 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
{
int i;
- /* Free the irqs */
- for (i = 0; i < priv->num_ntfy_blks; i++) {
- struct gve_notify_block *block = &priv->ntfy_blocks[i];
- int msix_idx = i;
+ if (priv->msix_vectors) {
+ /* Free the irqs */
+ for (i = 0; i < priv->num_ntfy_blks; i++) {
+ struct gve_notify_block *block = &priv->ntfy_blocks[i];
+ int msix_idx = i;
- irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
- NULL);
- free_irq(priv->msix_vectors[msix_idx].vector, block);
+ irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
+ NULL);
+ free_irq(priv->msix_vectors[msix_idx].vector, block);
+ }
+ free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
}
dma_free_coherent(&priv->pdev->dev,
priv->num_ntfy_blks * sizeof(*priv->ntfy_blocks),
priv->ntfy_blocks, priv->ntfy_block_bus);
priv->ntfy_blocks = NULL;
- free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
pci_disable_msix(priv->pdev);
kvfree(priv->msix_vectors);
priv->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 6938f3a939d6..3e04a3973d68 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -212,10 +212,11 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
tx->dev = &priv->pdev->dev;
if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv);
-
+ if (!tx->tx_fifo.qpl)
+ goto abort_with_desc;
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
- goto abort_with_desc;
+ goto abort_with_qpl;
}
tx->q_resources =
@@ -236,6 +237,9 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
+abort_with_qpl:
+ if (!tx->raw_addressing)
+ gve_unassign_qpl(priv, tx->tx_fifo.qpl->id);
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -589,7 +593,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
struct gve_tx_ring *tx;
int nsegs;
- WARN(skb_get_queue_mapping(skb) > priv->tx_cfg.num_queues,
+ WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
if (unlikely(gve_maybe_stop_tx(tx, skb))) {
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 44f9279cdde1..bb062b02fb85 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -102,6 +102,7 @@ config HNS3_HCLGE
tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
default m
depends on PCI_MSI
+ imply PTP_1588_CLOCK
help
This selects the HNS3_HCLGE network acceleration engine & its hardware
compatibility layer. The engine would be used in Hisilicon hip08 family of
@@ -130,6 +131,7 @@ config HNS3_ENET
default m
depends on 64BIT && PCI
depends on INET
+ select DIMLIB
help
This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index a2c17af57fde..0a6cda309b24 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -20,7 +20,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
- HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
+ HCLGE_MBX_GET_BASIC_INFO, /* (VF -> PF) get basic info */
HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
@@ -69,6 +69,7 @@ enum hclge_mbx_vlan_cfg_subcode {
HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
+ HCLGE_MBX_ENABLE_VLAN_FILTER,
};
enum hclge_mbx_tbl_cfg_subcode {
@@ -85,6 +86,13 @@ struct hclge_ring_chain_param {
u8 int_gl_index;
};
+struct hclge_basic_info {
+ u8 hw_tc_map;
+ u8 rsv;
+ u16 mbx_api_version;
+ u32 pf_caps;
+};
+
struct hclgevf_mbx_resp_status {
struct mutex mbx_mutex; /* protects against contending sync cmd resp */
u32 origin_mbx_msg;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 57fa7fc97c69..0b202f4def83 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -91,7 +91,10 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_STASH_B,
HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B,
HNAE3_DEV_SUPPORT_PAUSE_B,
+ HNAE3_DEV_SUPPORT_RAS_IMP_B,
HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
+ HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -127,6 +130,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_dev_phy_imp_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps)
+#define hnae3_dev_ras_imp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, (hdev)->ae_dev->caps)
+
#define hnae3_dev_tqp_txrx_indep_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps)
@@ -145,18 +151,14 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+enum HNAE3_PF_CAP_BITS {
+ HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
+};
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
-enum hns_desc_type {
- DESC_TYPE_UNKNOWN,
- DESC_TYPE_SKB,
- DESC_TYPE_FRAGLIST_SKB,
- DESC_TYPE_PAGE,
-};
-
struct hnae3_handle;
struct hnae3_queue {
@@ -238,7 +240,6 @@ enum hnae3_reset_type {
HNAE3_FUNC_RESET,
HNAE3_GLOBAL_RESET,
HNAE3_IMP_RESET,
- HNAE3_UNKNOWN_RESET,
HNAE3_NONE_RESET,
HNAE3_MAX_RESET,
};
@@ -268,6 +269,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_MAC_MC,
HNAE3_DBG_CMD_MNG_TBL,
HNAE3_DBG_CMD_LOOPBACK,
+ HNAE3_DBG_CMD_PTP_INFO,
HNAE3_DBG_CMD_INTERRUPT_INFO,
HNAE3_DBG_CMD_RESET_INFO,
HNAE3_DBG_CMD_IMP_INFO,
@@ -283,6 +285,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_REG_TQP,
HNAE3_DBG_CMD_REG_MAC,
HNAE3_DBG_CMD_REG_DCB,
+ HNAE3_DBG_CMD_VLAN_CONFIG,
HNAE3_DBG_CMD_QUEUE_MAP,
HNAE3_DBG_CMD_RX_QUEUE_INFO,
HNAE3_DBG_CMD_TX_QUEUE_INFO,
@@ -516,6 +519,12 @@ struct hnae3_ae_dev {
* Check if any cls flower rule exist
* dbg_read_cmd
* Execute debugfs read command.
+ * set_tx_hwts_info
+ * Save information for 1588 tx packet
+ * get_rx_hwts
+ * Get 1588 rx hwstamp
+ * get_ts_info
+ * Get phc info
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
@@ -631,7 +640,7 @@ struct hnae3_ae_ops {
void (*get_mdix_mode)(struct hnae3_handle *handle,
u8 *tp_mdix_ctrl, u8 *tp_mdix);
- void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
+ int (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
@@ -701,6 +710,12 @@ struct hnae3_ae_ops {
struct ethtool_link_ksettings *cmd);
int (*set_phy_link_ksettings)(struct hnae3_handle *handle,
const struct ethtool_link_ksettings *cmd);
+ bool (*set_tx_hwts_info)(struct hnae3_handle *handle,
+ struct sk_buff *skb);
+ void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec);
+ int (*get_ts_info)(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info);
};
struct hnae3_dcb_ops {
@@ -745,6 +760,7 @@ struct hnae3_knic_private_info {
u16 rx_buf_len;
u16 num_tx_desc;
u16 num_rx_desc;
+ u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
@@ -783,7 +799,6 @@ struct hnae3_roce_private_info {
#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
-#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */
#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 57ba5a16ad73..34b6cd904a1a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -309,6 +309,20 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "vlan_config",
+ .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "ptp_info",
+ .cmd = HNAE3_DBG_CMD_PTP_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
@@ -343,9 +357,18 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
.name = "support imp-controlled PHY",
.cap_bit = HNAE3_DEV_SUPPORT_PHY_IMP_B,
}, {
+ .name = "support imp-controlled RAS",
+ .cap_bit = HNAE3_DEV_SUPPORT_RAS_IMP_B,
+ }, {
.name = "support rxd advanced layout",
.cap_bit = HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
- },
+ }, {
+ .name = "support port vlan bypass",
+ .cap_bit = HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ }, {
+ .name = "support modify vlan filter state",
+ .cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }
};
static void hns3_dbg_fill_content(char *content, u16 len,
@@ -369,6 +392,56 @@ static void hns3_dbg_fill_content(char *content, u16 len,
*pos++ = '\0';
}
+static const struct hns3_dbg_item tx_spare_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "COPYBREAK", 2 },
+ { "LEN", 7 },
+ { "NTU", 4 },
+ { "NTC", 4 },
+ { "LTC", 4 },
+ { "DMA", 17 },
+};
+
+static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf,
+ int len, u32 ring_num, int *pos)
+{
+ char data_str[ARRAY_SIZE(tx_spare_info_items)][HNS3_DBG_DATA_STR_LEN];
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ char *result[ARRAY_SIZE(tx_spare_info_items)];
+ char content[HNS3_DBG_INFO_LEN];
+ u32 i, j;
+
+ if (!tx_spare) {
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "tx spare buffer is not enabled\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tx_spare_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos, "tx spare buffer info\n");
+ hns3_dbg_fill_content(content, sizeof(content), tx_spare_info_items,
+ NULL, ARRAY_SIZE(tx_spare_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < ring_num; i++) {
+ j = 0;
+ sprintf(result[j++], "%8u", i);
+ sprintf(result[j++], "%9u", ring->tx_copybreak);
+ sprintf(result[j++], "%3u", tx_spare->len);
+ sprintf(result[j++], "%3u", tx_spare->next_to_use);
+ sprintf(result[j++], "%3u", tx_spare->next_to_clean);
+ sprintf(result[j++], "%3u", tx_spare->last_to_clean);
+ sprintf(result[j++], "%pad", &tx_spare->dma);
+ hns3_dbg_fill_content(content, sizeof(content),
+ tx_spare_info_items,
+ (const char **)result,
+ ARRAY_SIZE(tx_spare_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+}
+
static const struct hns3_dbg_item rx_queue_info_items[] = {
{ "QUEUE_ID", 2 },
{ "BD_NUM", 2 },
@@ -377,6 +450,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "PKTNUM", 2 },
+ { "COPYBREAK", 2 },
{ "RING_EN", 2 },
{ "RX_RING_EN", 2 },
{ "BASE_ADDR", 10 },
@@ -408,6 +482,7 @@ static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring,
sprintf(result[j++], "%6u", readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_PKTNUM_RECORD_REG));
+ sprintf(result[j++], "%9u", ring->rx_copybreak);
sprintf(result[j++], "%7s", readl_relaxed(ring->tqp->io_base +
HNS3_RING_EN_REG) ? "on" : "off");
@@ -570,6 +645,8 @@ static int hns3_dbg_tx_queue_info(struct hnae3_handle *h,
pos += scnprintf(buf + pos, len - pos, "%s", content);
}
+ hns3_dbg_tx_spare_info(ring, buf, len, h->kinfo.num_tqps, &pos);
+
return 0;
}
@@ -1043,8 +1120,10 @@ int hns3_dbg_init(struct hnae3_handle *handle)
handle->hnae3_dbgfs);
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) {
- if (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
- ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
+ if ((hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_TM_NODES &&
+ ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) ||
+ (hns3_dbg_cmd[i].cmd == HNAE3_DBG_CMD_PTP_INFO &&
+ !test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps)))
continue;
if (!hns3_dbg_cmd[i].init) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index de0e2d215879..51bbf5f760c5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -53,6 +53,19 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, " Network interface message level setting");
+static unsigned int tx_spare_buf_size;
+module_param(tx_spare_buf_size, uint, 0400);
+MODULE_PARM_DESC(tx_spare_buf_size, "Size used to allocate tx spare buffer");
+
+static unsigned int tx_sgl = 1;
+module_param(tx_sgl, uint, 0600);
+MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to optimize the IOMMU mapping");
+
+#define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \
+ sizeof(struct sg_table))
+#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM),\
+ dma_get_cache_alignment())
+
#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
@@ -368,6 +381,7 @@ static irqreturn_t hns3_irq_handle(int irq, void *vector)
struct hns3_enet_tqp_vector *tqp_vector = vector;
napi_schedule_irqoff(&tqp_vector->napi);
+ tqp_vector->event_cnt++;
return IRQ_HANDLED;
}
@@ -471,6 +485,8 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
disable_irq(tqp_vector->vector_irq);
napi_disable(&tqp_vector->napi);
+ cancel_work_sync(&tqp_vector->rx_group.dim.work);
+ cancel_work_sync(&tqp_vector->tx_group.dim.work);
}
void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
@@ -536,22 +552,17 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal;
struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal;
+ struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *prx_coal = &priv->rx_coal;
- /* initialize the configuration for interrupt coalescing.
- * 1. GL (Interrupt Gap Limiter)
- * 2. RL (Interrupt Rate Limiter)
- * 3. QL (Interrupt Quantity Limiter)
- *
- * Default: enable interrupt coalescing self-adaptive and GL
- */
- tx_coal->adapt_enable = 1;
- rx_coal->adapt_enable = 1;
+ tx_coal->adapt_enable = ptx_coal->adapt_enable;
+ rx_coal->adapt_enable = prx_coal->adapt_enable;
- tx_coal->int_gl = HNS3_INT_GL_50K;
- rx_coal->int_gl = HNS3_INT_GL_50K;
+ tx_coal->int_gl = ptx_coal->int_gl;
+ rx_coal->int_gl = prx_coal->int_gl;
- rx_coal->flow_level = HNS3_FLOW_LOW;
- tx_coal->flow_level = HNS3_FLOW_LOW;
+ rx_coal->flow_level = prx_coal->flow_level;
+ tx_coal->flow_level = ptx_coal->flow_level;
/* device version above V3(include V3), GL can configure 1us
* unit, so uses 1us unit.
@@ -566,8 +577,8 @@ static void hns3_vector_coalesce_init(struct hns3_enet_tqp_vector *tqp_vector,
rx_coal->ql_enable = 1;
tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max;
- tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
- rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ tx_coal->int_ql = ptx_coal->int_ql;
+ rx_coal->int_ql = prx_coal->int_ql;
}
}
@@ -910,13 +921,10 @@ static u8 hns3_get_netdev_flags(struct net_device *netdev)
{
u8 flags = 0;
- if (netdev->flags & IFF_PROMISC) {
+ if (netdev->flags & IFF_PROMISC)
flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
- } else {
- flags |= HNAE3_VLAN_FLTR;
- if (netdev->flags & IFF_ALLMULTI)
- flags |= HNAE3_USER_MPE;
- }
+ else if (netdev->flags & IFF_ALLMULTI)
+ flags = HNAE3_USER_MPE;
return flags;
}
@@ -946,23 +954,202 @@ void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
ops->request_update_promisc_mode(handle);
}
-void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
+static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- bool last_state;
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntc, ntu;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
- h->ae_algo->ops->enable_vlan_filter) {
- last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
- if (enable != last_state) {
- netdev_info(netdev,
- "%s vlan filter\n",
- enable ? "enable" : "disable");
- h->ae_algo->ops->enable_vlan_filter(h, enable);
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_spare_update() called in tx desc cleaning process.
+ */
+ ntc = smp_load_acquire(&tx_spare->last_to_clean);
+ ntu = tx_spare->next_to_use;
+
+ if (ntc > ntu)
+ return ntc - ntu - 1;
+
+ /* The free tx buffer is divided into two part, so pick the
+ * larger one.
+ */
+ return (ntc > (tx_spare->len - ntu) ? ntc :
+ (tx_spare->len - ntu)) - 1;
+}
+
+static void hns3_tx_spare_update(struct hns3_enet_ring *ring)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ if (!tx_spare ||
+ tx_spare->last_to_clean == tx_spare->next_to_clean)
+ return;
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * hns3_tx_spare_space() called in xmit process.
+ */
+ smp_store_release(&tx_spare->last_to_clean,
+ tx_spare->next_to_clean);
+}
+
+static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ u32 space)
+{
+ u32 len = skb->len <= ring->tx_copybreak ? skb->len :
+ skb_headlen(skb);
+
+ if (len > ring->tx_copybreak)
+ return false;
+
+ if (ALIGN(len, dma_get_cache_alignment()) > space) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_spare_full++;
+ u64_stats_update_end(&ring->syncp);
+ return false;
+ }
+
+ return true;
+}
+
+static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ u32 space)
+{
+ if (skb->len <= ring->tx_copybreak || !tx_sgl ||
+ (!skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < tx_sgl))
+ return false;
+
+ if (space < HNS3_MAX_SGL_SIZE) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_spare_full++;
+ u64_stats_update_end(&ring->syncp);
+ return false;
+ }
+
+ return true;
+}
+
+static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring)
+{
+ struct hns3_tx_spare *tx_spare;
+ struct page *page;
+ u32 alloc_size;
+ dma_addr_t dma;
+ int order;
+
+ alloc_size = tx_spare_buf_size ? tx_spare_buf_size :
+ ring->tqp->handle->kinfo.tx_spare_buf_size;
+ if (!alloc_size)
+ return;
+
+ order = get_order(alloc_size);
+ tx_spare = devm_kzalloc(ring_to_dev(ring), sizeof(*tx_spare),
+ GFP_KERNEL);
+ if (!tx_spare) {
+ /* The driver still work without the tx spare buffer */
+ dev_warn(ring_to_dev(ring), "failed to allocate hns3_tx_spare\n");
+ return;
+ }
+
+ page = alloc_pages_node(dev_to_node(ring_to_dev(ring)),
+ GFP_KERNEL, order);
+ if (!page) {
+ dev_warn(ring_to_dev(ring), "failed to allocate tx spare pages\n");
+ devm_kfree(ring_to_dev(ring), tx_spare);
+ return;
+ }
+
+ dma = dma_map_page(ring_to_dev(ring), page, 0,
+ PAGE_SIZE << order, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring_to_dev(ring), dma)) {
+ dev_warn(ring_to_dev(ring), "failed to map pages for tx spare\n");
+ put_page(page);
+ devm_kfree(ring_to_dev(ring), tx_spare);
+ return;
+ }
+
+ tx_spare->dma = dma;
+ tx_spare->buf = page_address(page);
+ tx_spare->len = PAGE_SIZE << order;
+ ring->tx_spare = tx_spare;
+}
+
+/* Use hns3_tx_spare_space() to make sure there is enough buffer
+ * before calling below function to allocate tx buffer.
+ */
+static void *hns3_tx_spare_alloc(struct hns3_enet_ring *ring,
+ unsigned int size, dma_addr_t *dma,
+ u32 *cb_len)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntu = tx_spare->next_to_use;
+
+ size = ALIGN(size, dma_get_cache_alignment());
+ *cb_len = size;
+
+ /* Tx spare buffer wraps back here because the end of
+ * freed tx buffer is not enough.
+ */
+ if (ntu + size > tx_spare->len) {
+ *cb_len += (tx_spare->len - ntu);
+ ntu = 0;
+ }
+
+ tx_spare->next_to_use = ntu + size;
+ if (tx_spare->next_to_use == tx_spare->len)
+ tx_spare->next_to_use = 0;
+
+ *dma = tx_spare->dma + ntu;
+
+ return tx_spare->buf + ntu;
+}
+
+static void hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ if (len > tx_spare->next_to_use) {
+ len -= tx_spare->next_to_use;
+ tx_spare->next_to_use = tx_spare->len - len;
+ } else {
+ tx_spare->next_to_use -= len;
+ }
+}
+
+static void hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+ u32 ntc = tx_spare->next_to_clean;
+ u32 len = cb->length;
+
+ tx_spare->next_to_clean += len;
+
+ if (tx_spare->next_to_clean >= tx_spare->len) {
+ tx_spare->next_to_clean -= tx_spare->len;
+
+ if (tx_spare->next_to_clean) {
+ ntc = 0;
+ len = tx_spare->next_to_clean;
}
}
+
+ /* This tx spare buffer is only really reclaimed after calling
+ * hns3_tx_spare_update(), so it is still safe to use the info in
+ * the tx buffer to do the dma sync or sg unmapping after
+ * tx_spare->next_to_clean is moved forword.
+ */
+ if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) {
+ dma_addr_t dma = tx_spare->dma + ntc;
+
+ dma_sync_single_for_cpu(ring_to_dev(ring), dma, len,
+ DMA_TO_DEVICE);
+ } else {
+ struct sg_table *sgt = tx_spare->buf + ntc;
+
+ dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
+ DMA_TO_DEVICE);
+ }
}
static int hns3_set_tso(struct sk_buff *skb, u32 *paylen_fdop_ol4cs,
@@ -1118,8 +1305,6 @@ static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
l4.udp->dest == htons(4790))))
return false;
- skb_checksum_help(skb);
-
return true;
}
@@ -1196,8 +1381,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
@@ -1242,7 +1426,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
break;
case IPPROTO_UDP:
if (hns3_tunnel_csum_bug(skb))
- break;
+ return skb_checksum_help(skb);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
@@ -1267,8 +1451,7 @@ static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
/* the stack computes the IP header already,
* driver calculate l4 checksum when not TSO.
*/
- skb_checksum_help(skb);
- return 0;
+ return skb_checksum_help(skb);
}
return 0;
@@ -1440,40 +1623,14 @@ out_hw_tx_csum:
return 0;
}
-static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
- unsigned int size, enum hns_desc_type type)
+static int hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma,
+ unsigned int size)
{
#define HNS3_LIKELY_BD_NUM 1
- struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
- struct device *dev = ring_to_dev(ring);
- skb_frag_t *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
- dma_addr_t dma;
-
- if (type == DESC_TYPE_FRAGLIST_SKB ||
- type == DESC_TYPE_SKB) {
- struct sk_buff *skb = (struct sk_buff *)priv;
-
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- } else {
- frag = (skb_frag_t *)priv;
- dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
- }
-
- if (unlikely(dma_mapping_error(dev, dma))) {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- return -ENOMEM;
- }
-
- desc_cb->priv = priv;
- desc_cb->length = size;
- desc_cb->dma = dma;
- desc_cb->type = type;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
desc->addr = cpu_to_le64(dma);
@@ -1509,6 +1666,52 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return frag_buf_num;
}
+static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
+ unsigned int type)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct device *dev = ring_to_dev(ring);
+ unsigned int size;
+ dma_addr_t dma;
+
+ if (type & (DESC_TYPE_FRAGLIST_SKB | DESC_TYPE_SKB)) {
+ struct sk_buff *skb = (struct sk_buff *)priv;
+
+ size = skb_headlen(skb);
+ if (!size)
+ return 0;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else if (type & DESC_TYPE_BOUNCE_HEAD) {
+ /* Head data has been filled in hns3_handle_tx_bounce(),
+ * just return 0 here.
+ */
+ return 0;
+ } else {
+ skb_frag_t *frag = (skb_frag_t *)priv;
+
+ size = skb_frag_size(frag);
+ if (!size)
+ return 0;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ return hns3_fill_desc(ring, dma, size);
+}
+
static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
unsigned int bd_num)
{
@@ -1732,6 +1935,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
for (i = 0; i < ring->desc_num; i++) {
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ struct hns3_desc_cb *desc_cb;
memset(desc, 0, sizeof(*desc));
@@ -1742,52 +1946,44 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
- if (!ring->desc_cb[ring->next_to_use].dma)
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+
+ if (!desc_cb->dma)
continue;
/* unmap the descriptor dma address */
- if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
- ring->desc_cb[ring->next_to_use].type ==
- DESC_TYPE_FRAGLIST_SKB)
- dma_unmap_single(dev,
- ring->desc_cb[ring->next_to_use].dma,
- ring->desc_cb[ring->next_to_use].length,
- DMA_TO_DEVICE);
- else if (ring->desc_cb[ring->next_to_use].length)
- dma_unmap_page(dev,
- ring->desc_cb[ring->next_to_use].dma,
- ring->desc_cb[ring->next_to_use].length,
+ if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
+ dma_unmap_single(dev, desc_cb->dma, desc_cb->length,
+ DMA_TO_DEVICE);
+ else if (desc_cb->type &
+ (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL))
+ hns3_tx_spare_rollback(ring, desc_cb->length);
+ else if (desc_cb->length)
+ dma_unmap_page(dev, desc_cb->dma, desc_cb->length,
DMA_TO_DEVICE);
- ring->desc_cb[ring->next_to_use].length = 0;
- ring->desc_cb[ring->next_to_use].dma = 0;
- ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
+ desc_cb->length = 0;
+ desc_cb->dma = 0;
+ desc_cb->type = DESC_TYPE_UNKNOWN;
}
}
static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
- struct sk_buff *skb, enum hns_desc_type type)
+ struct sk_buff *skb, unsigned int type)
{
- unsigned int size = skb_headlen(skb);
struct sk_buff *frag_skb;
int i, ret, bd_num = 0;
- if (size) {
- ret = hns3_fill_desc(ring, skb, size, type);
- if (unlikely(ret < 0))
- return ret;
+ ret = hns3_map_and_fill_desc(ring, skb, type);
+ if (unlikely(ret < 0))
+ return ret;
- bd_num += ret;
- }
+ bd_num += ret;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- size = skb_frag_size(frag);
- if (!size)
- continue;
-
- ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
+ ret = hns3_map_and_fill_desc(ring, frag, DESC_TYPE_PAGE);
if (unlikely(ret < 0))
return ret;
@@ -1827,6 +2023,153 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
+static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
+ struct hns3_desc *desc)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!(h->ae_algo->ops->set_tx_hwts_info &&
+ h->ae_algo->ops->set_tx_hwts_info(h, skb)))
+ return;
+
+ desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B));
+}
+
+static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ unsigned int type = DESC_TYPE_BOUNCE_HEAD;
+ unsigned int size = skb_headlen(skb);
+ dma_addr_t dma;
+ int bd_num = 0;
+ u32 cb_len;
+ void *buf;
+ int ret;
+
+ if (skb->len <= ring->tx_copybreak) {
+ size = skb->len;
+ type = DESC_TYPE_BOUNCE_ALL;
+ }
+
+ /* hns3_can_use_tx_bounce() is called to ensure the below
+ * function can always return the tx buffer.
+ */
+ buf = hns3_tx_spare_alloc(ring, size, &dma, &cb_len);
+
+ ret = skb_copy_bits(skb, 0, buf, size);
+ if (unlikely(ret < 0)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.copy_bits_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ desc_cb->priv = skb;
+ desc_cb->length = cb_len;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ bd_num += hns3_fill_desc(ring, dma, size);
+
+ if (type == DESC_TYPE_BOUNCE_HEAD) {
+ ret = hns3_fill_skb_to_desc(ring, skb,
+ DESC_TYPE_BOUNCE_HEAD);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ dma_sync_single_for_device(ring_to_dev(ring), dma, size,
+ DMA_TO_DEVICE);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_bounce++;
+ u64_stats_update_end(&ring->syncp);
+ return bd_num;
+}
+
+static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ u32 nfrag = skb_shinfo(skb)->nr_frags + 1;
+ struct sg_table *sgt;
+ int i, bd_num = 0;
+ dma_addr_t dma;
+ u32 cb_len;
+ int nents;
+
+ if (skb_has_frag_list(skb))
+ nfrag = HNS3_MAX_TSO_BD_NUM;
+
+ /* hns3_can_use_tx_sgl() is called to ensure the below
+ * function can always return the tx buffer.
+ */
+ sgt = hns3_tx_spare_alloc(ring, HNS3_SGL_SIZE(nfrag),
+ &dma, &cb_len);
+
+ /* scatterlist follows by the sg table */
+ sgt->sgl = (struct scatterlist *)(sgt + 1);
+ sg_init_table(sgt->sgl, nfrag);
+ nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
+ if (unlikely(nents < 0)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.skb2sgl_err++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ sgt->orig_nents = nents;
+ sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!sgt->nents)) {
+ hns3_tx_spare_rollback(ring, cb_len);
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.map_sg_err++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ desc_cb->priv = skb;
+ desc_cb->length = cb_len;
+ desc_cb->dma = dma;
+ desc_cb->type = DESC_TYPE_SGL_SKB;
+
+ for (i = 0; i < sgt->nents; i++)
+ bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
+ sg_dma_len(sgt->sgl + i));
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_sgl++;
+ u64_stats_update_end(&ring->syncp);
+
+ return bd_num;
+}
+
+static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ u32 space;
+
+ if (!ring->tx_spare)
+ goto out;
+
+ space = hns3_tx_spare_space(ring);
+
+ if (hns3_can_use_tx_sgl(ring, skb, space))
+ return hns3_handle_tx_sgl(ring, skb);
+
+ if (hns3_can_use_tx_bounce(ring, skb, space))
+ return hns3_handle_tx_bounce(ring, skb);
+
+out:
+ return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1873,16 +2216,22 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
* zero, which is unlikely, and 'ret > 0' means how many tx desc
* need to be notified to the hw.
*/
- ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+ ret = hns3_handle_desc_filling(ring, skb);
if (unlikely(ret <= 0))
goto fill_err;
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]);
+
ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
cpu_to_le16(BIT(HNS3_TXD_FE_B));
trace_hns3_tx_desc(ring, pre_ntu);
+ skb_tx_timestamp(skb);
+
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes,
@@ -1986,6 +2335,14 @@ static int hns3_nic_set_features(struct net_device *netdev,
return -EINVAL;
}
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ ret = h->ae_algo->ops->enable_vlan_filter(h, enable);
+ if (ret)
+ return ret;
+ }
+
netdev->features = features;
return 0;
}
@@ -2061,6 +2418,9 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_drop += ring->stats.tx_tso_err;
tx_drop += ring->stats.over_max_recursion;
tx_drop += ring->stats.hw_limitation;
+ tx_drop += ring->stats.copy_bits_err;
+ tx_drop += ring->stats.skb2sgl_err;
+ tx_drop += ring->stats.map_sg_err;
tx_errors += ring->stats.sw_err_cnt;
tx_errors += ring->stats.tx_vlan_err;
tx_errors += ring->stats.tx_l4_proto_err;
@@ -2068,6 +2428,9 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
tx_errors += ring->stats.tx_tso_err;
tx_errors += ring->stats.over_max_recursion;
tx_errors += ring->stats.hw_limitation;
+ tx_errors += ring->stats.copy_bits_err;
+ tx_errors += ring->stats.skb2sgl_err;
+ tx_errors += ring->stats.map_sg_err;
} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */
@@ -2831,6 +3194,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_TC;
netdev->features |= NETIF_F_HW_TC;
}
+
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2858,7 +3224,8 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
static void hns3_free_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb, int budget)
{
- if (cb->type == DESC_TYPE_SKB)
+ if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB))
napi_consume_skb(cb->priv, budget);
else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
__page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
@@ -2879,12 +3246,15 @@ static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
struct hns3_desc_cb *cb)
{
- if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
+ if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB))
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else if (cb->length)
+ else if ((cb->type & DESC_TYPE_PAGE) && cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
+ else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_SGL_SKB))
+ hns3_tx_spare_reclaim_cb(ring, cb);
}
static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
@@ -3036,7 +3406,9 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
desc_cb = &ring->desc_cb[ntc];
- if (desc_cb->type == DESC_TYPE_SKB) {
+ if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL |
+ DESC_TYPE_BOUNCE_HEAD |
+ DESC_TYPE_SGL_SKB)) {
(*pkts)++;
(*bytes) += desc_cb->send_bytes;
}
@@ -3059,6 +3431,9 @@ static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
* ring_space called by hns3_nic_net_xmit.
*/
smp_store_release(&ring->next_to_clean, ntc);
+
+ hns3_tx_spare_update(ring);
+
return true;
}
@@ -3150,7 +3525,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
{
- return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
+ return page_count(cb->priv) == cb->pagecnt_bias;
}
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
@@ -3158,40 +3533,62 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ u32 frag_offset = desc_cb->page_offset + pull_len;
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
+ u32 frag_size = size - pull_len;
- desc_cb->pagecnt_bias--;
- skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
- size - pull_len, truesize);
+ /* Avoid re-using remote or pfmem page */
+ if (unlikely(!dev_page_is_reusable(desc_cb->priv)))
+ goto out;
- /* Avoid re-using remote and pfmemalloc pages, or the stack is still
- * using the page when page_offset rollback to zero, flag default
- * unreuse
+ /* Stack is not using and current page_offset is non-zero, we can
+ * reuse from the zero offset.
*/
- if (!dev_page_is_reusable(desc_cb->priv) ||
- (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
- __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
- return;
- }
+ if (desc_cb->page_offset && hns3_can_reuse_page(desc_cb)) {
+ desc_cb->page_offset = 0;
+ desc_cb->reuse_flag = 1;
+ } else if (desc_cb->page_offset + truesize * 2 <=
+ hns3_page_size(ring)) {
+ desc_cb->page_offset += truesize;
+ desc_cb->reuse_flag = 1;
+ } else if (frag_size <= ring->rx_copybreak) {
+ void *frag = napi_alloc_frag(frag_size);
- /* Move offset up to the next cache line */
- desc_cb->page_offset += truesize;
+ if (unlikely(!frag)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.frag_alloc_err++;
+ u64_stats_update_end(&ring->syncp);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "failed to allocate rx frag\n");
+ goto out;
+ }
- if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
- desc_cb->reuse_flag = 1;
- } else if (hns3_can_reuse_page(desc_cb)) {
desc_cb->reuse_flag = 1;
- desc_cb->page_offset = 0;
- } else if (desc_cb->pagecnt_bias) {
- __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+ memcpy(frag, desc_cb->buf + frag_offset, frag_size);
+ skb_add_rx_frag(skb, i, virt_to_page(frag),
+ offset_in_page(frag), frag_size, frag_size);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.frag_alloc++;
+ u64_stats_update_end(&ring->syncp);
return;
}
+out:
+ desc_cb->pagecnt_bias--;
+
if (unlikely(!desc_cb->pagecnt_bias)) {
page_ref_add(desc_cb->priv, USHRT_MAX);
desc_cb->pagecnt_bias = USHRT_MAX;
}
+
+ skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset,
+ frag_size, truesize);
+
+ if (unlikely(!desc_cb->reuse_flag))
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
}
static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
@@ -3602,6 +3999,15 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ol_info = le32_to_cpu(desc->rx.ol_info);
csum = le16_to_cpu(desc->csum);
+ if (unlikely(bd_base_info & BIT(HNS3_RXD_TS_VLD_B))) {
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u32 nsec = le32_to_cpu(desc->ts_nsec);
+ u32 sec = le32_to_cpu(desc->ts_sec);
+
+ if (h->ae_algo->ops->get_rx_hwts)
+ h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec);
+ }
+
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
* in one layer tag case.
@@ -3772,139 +4178,30 @@ out:
return recv_pkts;
}
-static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
-{
-#define HNS3_RX_LOW_BYTE_RATE 10000
-#define HNS3_RX_MID_BYTE_RATE 20000
-#define HNS3_RX_ULTRA_PACKET_RATE 40
-
- enum hns3_flow_level_range new_flow_level;
- struct hns3_enet_tqp_vector *tqp_vector;
- int packets_per_msecs, bytes_per_msecs;
- u32 time_passed_ms;
-
- tqp_vector = ring_group->ring->tqp_vector;
- time_passed_ms =
- jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
- if (!time_passed_ms)
- return false;
-
- do_div(ring_group->total_packets, time_passed_ms);
- packets_per_msecs = ring_group->total_packets;
-
- do_div(ring_group->total_bytes, time_passed_ms);
- bytes_per_msecs = ring_group->total_bytes;
-
- new_flow_level = ring_group->coal.flow_level;
-
- /* Simple throttlerate management
- * 0-10MB/s lower (50000 ints/s)
- * 10-20MB/s middle (20000 ints/s)
- * 20-1249MB/s high (18000 ints/s)
- * > 40000pps ultra (8000 ints/s)
- */
- switch (new_flow_level) {
- case HNS3_FLOW_LOW:
- if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
- new_flow_level = HNS3_FLOW_MID;
- break;
- case HNS3_FLOW_MID:
- if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
- new_flow_level = HNS3_FLOW_HIGH;
- else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
- new_flow_level = HNS3_FLOW_LOW;
- break;
- case HNS3_FLOW_HIGH:
- case HNS3_FLOW_ULTRA:
- default:
- if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
- new_flow_level = HNS3_FLOW_MID;
- break;
- }
-
- if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
- &tqp_vector->rx_group == ring_group)
- new_flow_level = HNS3_FLOW_ULTRA;
-
- ring_group->total_bytes = 0;
- ring_group->total_packets = 0;
- ring_group->coal.flow_level = new_flow_level;
-
- return true;
-}
-
-static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
+static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
{
- struct hns3_enet_tqp_vector *tqp_vector;
- u16 new_int_gl;
-
- if (!ring_group->ring)
- return false;
-
- tqp_vector = ring_group->ring->tqp_vector;
- if (!tqp_vector->last_jiffies)
- return false;
-
- if (ring_group->total_packets == 0) {
- ring_group->coal.int_gl = HNS3_INT_GL_50K;
- ring_group->coal.flow_level = HNS3_FLOW_LOW;
- return true;
- }
-
- if (!hns3_get_new_flow_lvl(ring_group))
- return false;
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct dim_sample sample = {};
- new_int_gl = ring_group->coal.int_gl;
- switch (ring_group->coal.flow_level) {
- case HNS3_FLOW_LOW:
- new_int_gl = HNS3_INT_GL_50K;
- break;
- case HNS3_FLOW_MID:
- new_int_gl = HNS3_INT_GL_20K;
- break;
- case HNS3_FLOW_HIGH:
- new_int_gl = HNS3_INT_GL_18K;
- break;
- case HNS3_FLOW_ULTRA:
- new_int_gl = HNS3_INT_GL_8K;
- break;
- default:
- break;
- }
+ if (!rx_group->coal.adapt_enable)
+ return;
- if (new_int_gl != ring_group->coal.int_gl) {
- ring_group->coal.int_gl = new_int_gl;
- return true;
- }
- return false;
+ dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets,
+ rx_group->total_bytes, &sample);
+ net_dim(&rx_group->dim, sample);
}
-static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
+static void hns3_update_tx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
{
- struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
- bool rx_update, tx_update;
+ struct dim_sample sample = {};
- /* update param every 1000ms */
- if (time_before(jiffies,
- tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
+ if (!tx_group->coal.adapt_enable)
return;
- if (rx_group->coal.adapt_enable) {
- rx_update = hns3_get_new_int_gl(rx_group);
- if (rx_update)
- hns3_set_vector_coalesce_rx_gl(tqp_vector,
- rx_group->coal.int_gl);
- }
-
- if (tx_group->coal.adapt_enable) {
- tx_update = hns3_get_new_int_gl(tx_group);
- if (tx_update)
- hns3_set_vector_coalesce_tx_gl(tqp_vector,
- tx_group->coal.int_gl);
- }
-
- tqp_vector->last_jiffies = jiffies;
+ dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets,
+ tx_group->total_bytes, &sample);
+ net_dim(&tx_group->dim, sample);
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
@@ -3949,7 +4246,9 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (napi_complete(napi) &&
likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
- hns3_update_new_int_gl(tqp_vector);
+ hns3_update_rx_int_coalesce(tqp_vector);
+ hns3_update_tx_int_coalesce(tqp_vector);
+
hns3_mask_vector_irq(tqp_vector, 1);
}
@@ -4080,6 +4379,54 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
}
}
+static void hns3_rx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct hns3_enet_ring_group *group = container_of(dim,
+ struct hns3_enet_ring_group, dim);
+ struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
+ struct dim_cq_moder cur_moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+
+ hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec);
+ tqp_vector->rx_group.coal.int_gl = cur_moder.usec;
+
+ if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) {
+ hns3_set_vector_coalesce_rx_ql(tqp_vector, cur_moder.pkts);
+ tqp_vector->rx_group.coal.int_ql = cur_moder.pkts;
+ }
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void hns3_tx_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct hns3_enet_ring_group *group = container_of(dim,
+ struct hns3_enet_ring_group, dim);
+ struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector;
+ struct dim_cq_moder cur_moder =
+ net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector, cur_moder.usec);
+ tqp_vector->tx_group.coal.int_gl = cur_moder.usec;
+
+ if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) {
+ hns3_set_vector_coalesce_tx_ql(tqp_vector, cur_moder.pkts);
+ tqp_vector->tx_group.coal.int_ql = cur_moder.pkts;
+ }
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work);
+ tqp_vector->rx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work);
+ tqp_vector->tx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -4093,6 +4440,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[i];
hns3_vector_coalesce_init_hw(tqp_vector, priv);
tqp_vector->num_tqps = 0;
+ hns3_nic_init_dim(tqp_vector);
}
for (i = 0; i < h->kinfo.num_tqps; i++) {
@@ -4149,6 +4497,34 @@ map_ring_fail:
return ret;
}
+static void hns3_nic_init_coal_cfg(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
+
+ /* initialize the configuration for interrupt coalescing.
+ * 1. GL (Interrupt Gap Limiter)
+ * 2. RL (Interrupt Rate Limiter)
+ * 3. QL (Interrupt Quantity Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
+ */
+ tx_coal->adapt_enable = 1;
+ rx_coal->adapt_enable = 1;
+
+ tx_coal->int_gl = HNS3_INT_GL_50K;
+ rx_coal->int_gl = HNS3_INT_GL_50K;
+
+ rx_coal->flow_level = HNS3_FLOW_LOW;
+ tx_coal->flow_level = HNS3_FLOW_LOW;
+
+ if (ae_dev->dev_specs.int_ql_max) {
+ tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG;
+ }
+}
+
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
struct hnae3_handle *h = priv->ae_handle;
@@ -4260,10 +4636,13 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc;
ring->queue_index = q->tqp_index;
+ ring->tx_copybreak = priv->tx_copybreak;
+ ring->last_to_use = 0;
} else {
ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc;
ring->queue_index = q->tqp_index;
+ ring->rx_copybreak = priv->rx_copybreak;
}
hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
@@ -4277,7 +4656,6 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
- ring->last_to_use = 0;
}
static void hns3_queue_to_ring(struct hnae3_queue *tqp,
@@ -4337,6 +4715,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
ret = hns3_alloc_ring_buffers(ring);
if (ret)
goto out_with_desc;
+ } else {
+ hns3_init_tx_spare_buffer(ring);
}
return 0;
@@ -4359,9 +4739,18 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->next_to_use = 0;
ring->last_to_use = 0;
ring->pending_buf = 0;
- if (ring->skb) {
+ if (!HNAE3_IS_TX_RING(ring) && ring->skb) {
dev_kfree_skb_any(ring->skb);
ring->skb = NULL;
+ } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) {
+ struct hns3_tx_spare *tx_spare = ring->tx_spare;
+
+ dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len,
+ DMA_TO_DEVICE);
+ free_pages((unsigned long)tx_spare->buf,
+ get_order(tx_spare->len));
+ devm_kfree(ring_to_dev(ring), tx_spare);
+ ring->tx_spare = NULL;
}
}
@@ -4600,6 +4989,8 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_get_ring_cfg;
}
+ hns3_nic_init_coal_cfg(priv);
+
ret = hns3_nic_alloc_vector_data(priv);
if (ret) {
ret = -ENOMEM;
@@ -4622,12 +5013,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ret)
goto out_init_phy;
- ret = register_netdev(netdev);
- if (ret) {
- dev_err(priv->dev, "probe register netdev fail!\n");
- goto out_reg_netdev_fail;
- }
-
/* the device can work without cpu rmap, only aRFS needs it */
ret = hns3_set_rx_cpu_rmap(netdev);
if (ret)
@@ -4668,17 +5053,23 @@ static int hns3_client_init(struct hnae3_handle *handle)
if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_netdev_fail;
+ }
+
if (netif_msg_drv(handle))
hns3_info_show(priv);
return ret;
+out_reg_netdev_fail:
+ hns3_dbg_uninit(handle);
out_client_start:
hns3_free_rx_cpu_rmap(netdev);
hns3_nic_uninit_irq(priv);
out_init_irq_fail:
- unregister_netdev(netdev);
-out_reg_netdev_fail:
hns3_uninit_phy(netdev);
out_init_phy:
hns3_uninit_all_ring(priv);
@@ -4884,31 +5275,6 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
return 0;
}
-static void hns3_store_coal(struct hns3_nic_priv *priv)
-{
- /* ethtool only support setting and querying one coal
- * configuration for now, so save the vector 0' coal
- * configuration here in order to restore it.
- */
- memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
- sizeof(struct hns3_enet_coalesce));
-}
-
-static void hns3_restore_coal(struct hns3_nic_priv *priv)
-{
- u16 vector_num = priv->vector_num;
- int i;
-
- for (i = 0; i < vector_num; i++) {
- memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
- sizeof(struct hns3_enet_coalesce));
- memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
- sizeof(struct hns3_enet_coalesce));
- }
-}
-
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -4967,8 +5333,6 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
if (ret)
goto err_put_ring;
- hns3_restore_coal(priv);
-
ret = hns3_nic_init_vector_data(priv);
if (ret)
goto err_dealloc_vector;
@@ -5034,8 +5398,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_nic_uninit_vector_data(priv);
- hns3_store_coal(priv);
-
hns3_nic_dealloc_vector_data(priv);
hns3_uninit_all_ring(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 79ff2fa61d47..15af3d93857b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -4,6 +4,7 @@
#ifndef __HNS3_ENET_H
#define __HNS3_ENET_H
+#include <linux/dim.h>
#include <linux/if_vlan.h>
#include "hnae3.h"
@@ -121,8 +122,9 @@ enum hns3_nic_state {
#define HNS3_RXD_LUM_B 9
#define HNS3_RXD_CRCP_B 10
#define HNS3_RXD_L3L4P_B 11
-#define HNS3_RXD_TSIND_S 12
-#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
+#define HNS3_RXD_TSIDX_S 12
+#define HNS3_RXD_TSIDX_M (0x3 << HNS3_RXD_TSIDX_S)
+#define HNS3_RXD_TS_VLD_B 14
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_GRO_SIZE_S 16
#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
@@ -239,6 +241,10 @@ struct __packed hns3_desc {
union {
__le64 addr;
__le16 csum;
+ struct {
+ __le32 ts_nsec;
+ __le32 ts_sec;
+ };
};
union {
struct {
@@ -293,6 +299,16 @@ struct __packed hns3_desc {
};
};
+enum hns3_desc_type {
+ DESC_TYPE_UNKNOWN = 0,
+ DESC_TYPE_SKB = 1 << 0,
+ DESC_TYPE_FRAGLIST_SKB = 1 << 1,
+ DESC_TYPE_PAGE = 1 << 2,
+ DESC_TYPE_BOUNCE_ALL = 1 << 3,
+ DESC_TYPE_BOUNCE_HEAD = 1 << 4,
+ DESC_TYPE_SGL_SKB = 1 << 5,
+};
+
struct hns3_desc_cb {
dma_addr_t dma; /* dma address of this desc */
void *buf; /* cpu addr for a desc */
@@ -392,6 +408,12 @@ struct ring_stats {
u64 tx_tso_err;
u64 over_max_recursion;
u64 hw_limitation;
+ u64 tx_bounce;
+ u64 tx_spare_full;
+ u64 copy_bits_err;
+ u64 tx_sgl;
+ u64 skb2sgl_err;
+ u64 map_sg_err;
};
struct {
u64 rx_pkts;
@@ -405,11 +427,22 @@ struct ring_stats {
u64 csum_complete;
u64 rx_multicast;
u64 non_reuse_pg;
+ u64 frag_alloc_err;
+ u64 frag_alloc;
};
__le16 csum;
};
};
+struct hns3_tx_spare {
+ dma_addr_t dma;
+ void *buf;
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 last_to_clean;
+ u32 len;
+};
+
struct hns3_enet_ring {
struct hns3_desc *desc; /* dma map address space */
struct hns3_desc_cb *desc_cb;
@@ -432,18 +465,29 @@ struct hns3_enet_ring {
* next_to_use
*/
int next_to_clean;
- union {
- int last_to_use; /* last idx used by xmit */
- u32 pull_len; /* memcpy len for current rx packet */
- };
- u32 frag_num;
- void *va; /* first buffer address for current packet */
-
u32 flag; /* ring attribute */
int pending_buf;
- struct sk_buff *skb;
- struct sk_buff *tail_skb;
+ union {
+ /* for Tx ring */
+ struct {
+ u32 fd_qb_tx_sample;
+ int last_to_use; /* last idx used by xmit */
+ u32 tx_copybreak;
+ struct hns3_tx_spare *tx_spare;
+ };
+
+ /* for Rx ring */
+ struct {
+ u32 pull_len; /* memcpy len for current rx packet */
+ u32 rx_copybreak;
+ u32 frag_num;
+ /* first buffer address for current packet */
+ unsigned char *va;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
+ };
+ };
} ____cacheline_internodealigned_in_smp;
enum hns3_flow_level_range {
@@ -482,6 +526,7 @@ struct hns3_enet_ring_group {
u64 total_packets; /* total packets processed this group */
u16 count;
struct hns3_enet_coalesce coal;
+ struct dim dim;
};
struct hns3_enet_tqp_vector {
@@ -503,7 +548,7 @@ struct hns3_enet_tqp_vector {
char name[HNAE3_INT_NAME_LEN];
- unsigned long last_jiffies;
+ u64 event_cnt;
} ____cacheline_internodealigned_in_smp;
struct hns3_nic_priv {
@@ -526,6 +571,8 @@ struct hns3_nic_priv {
struct hns3_enet_coalesce tx_coal;
struct hns3_enet_coalesce rx_coal;
+ u32 tx_copybreak;
+ u32 rx_copybreak;
};
union l3_hdr_info {
@@ -641,7 +688,6 @@ void hns3_set_vector_coalesce_rx_ql(struct hns3_enet_tqp_vector *tqp_vector,
void hns3_set_vector_coalesce_tx_ql(struct hns3_enet_tqp_vector *tqp_vector,
u32 ql_value);
-void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index b48faf769b1c..82061ab6930f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -46,6 +46,12 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("tso_err", tx_tso_err),
HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
HNS3_TQP_STAT("hw_limitation", hw_limitation),
+ HNS3_TQP_STAT("bounce", tx_bounce),
+ HNS3_TQP_STAT("spare_full", tx_spare_full),
+ HNS3_TQP_STAT("copy_bits_err", copy_bits_err),
+ HNS3_TQP_STAT("sgl", tx_sgl),
+ HNS3_TQP_STAT("skb2sgl_err", skb2sgl_err),
+ HNS3_TQP_STAT("map_sg_err", map_sg_err),
};
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
@@ -65,6 +71,8 @@ static const struct hns3_stats hns3_rxq_stats[] = {
HNS3_TQP_STAT("csum_complete", csum_complete),
HNS3_TQP_STAT("multicast", rx_multicast),
HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
+ HNS3_TQP_STAT("frag_alloc_err", frag_alloc_err),
+ HNS3_TQP_STAT("frag_alloc", frag_alloc),
};
#define HNS3_PRIV_FLAGS_LEN ARRAY_SIZE(hns3_priv_flags)
@@ -88,7 +96,6 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
- bool vlan_filter_enable;
int ret;
if (!h->ae_algo->ops->set_loopback ||
@@ -110,14 +117,11 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
return ret;
- if (en) {
+ if (en)
h->ae_algo->ops->set_promisc_mode(h, true, true);
- } else {
+ else
/* recover promisc mode before loopback test */
hns3_request_update_promisc_mode(h);
- vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
- hns3_enable_vlan_filter(ndev, vlan_filter_enable);
- }
return ret;
}
@@ -1134,50 +1138,32 @@ static void hns3_get_channels(struct net_device *netdev,
h->ae_algo->ops->get_channels(h, ch);
}
-static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
- struct ethtool_coalesce *cmd)
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
{
- struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
struct hnae3_handle *h = priv->ae_handle;
- u16 queue_num = h->kinfo.num_tqps;
if (hns3_nic_resetting(netdev))
return -EBUSY;
- if (queue >= queue_num) {
- netdev_err(netdev,
- "Invalid queue value %u! Queue max id=%u\n",
- queue, queue_num - 1);
- return -EINVAL;
- }
-
- tx_vector = priv->ring[queue].tqp_vector;
- rx_vector = priv->ring[queue_num + queue].tqp_vector;
-
- cmd->use_adaptive_tx_coalesce =
- tx_vector->tx_group.coal.adapt_enable;
- cmd->use_adaptive_rx_coalesce =
- rx_vector->rx_group.coal.adapt_enable;
+ cmd->use_adaptive_tx_coalesce = tx_coal->adapt_enable;
+ cmd->use_adaptive_rx_coalesce = rx_coal->adapt_enable;
- cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
- cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
+ cmd->tx_coalesce_usecs = tx_coal->int_gl;
+ cmd->rx_coalesce_usecs = rx_coal->int_gl;
cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
- cmd->tx_max_coalesced_frames = tx_vector->tx_group.coal.int_ql;
- cmd->rx_max_coalesced_frames = rx_vector->rx_group.coal.int_ql;
+ cmd->tx_max_coalesced_frames = tx_coal->int_ql;
+ cmd->rx_max_coalesced_frames = rx_coal->int_ql;
return 0;
}
-static int hns3_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *cmd)
-{
- return hns3_get_coalesce_per_queue(netdev, 0, cmd);
-}
-
static int hns3_check_gl_coalesce_para(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
@@ -1292,19 +1278,7 @@ static int hns3_check_coalesce_para(struct net_device *netdev,
return ret;
}
- ret = hns3_check_ql_coalesce_param(netdev, cmd);
- if (ret)
- return ret;
-
- if (cmd->use_adaptive_tx_coalesce == 1 ||
- cmd->use_adaptive_rx_coalesce == 1) {
- netdev_info(netdev,
- "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
- cmd->use_adaptive_tx_coalesce,
- cmd->use_adaptive_rx_coalesce);
- }
-
- return 0;
+ return hns3_check_ql_coalesce_param(netdev, cmd);
}
static void hns3_set_coalesce_per_queue(struct net_device *netdev,
@@ -1350,6 +1324,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *cmd)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_coalesce *tx_coal = &priv->tx_coal;
+ struct hns3_enet_coalesce *rx_coal = &priv->rx_coal;
u16 queue_num = h->kinfo.num_tqps;
int ret;
int i;
@@ -1364,6 +1341,15 @@ static int hns3_set_coalesce(struct net_device *netdev,
h->kinfo.int_rl_setting =
hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ tx_coal->adapt_enable = cmd->use_adaptive_tx_coalesce;
+ rx_coal->adapt_enable = cmd->use_adaptive_rx_coalesce;
+
+ tx_coal->int_gl = cmd->tx_coalesce_usecs;
+ rx_coal->int_gl = cmd->rx_coalesce_usecs;
+
+ tx_coal->int_ql = cmd->tx_max_coalesced_frames;
+ rx_coal->int_ql = cmd->rx_max_coalesced_frames;
+
for (i = 0; i < queue_num; i++)
hns3_set_coalesce_per_queue(netdev, cmd, i);
@@ -1614,12 +1600,77 @@ static int hns3_set_priv_flags(struct net_device *netdev, u32 pflags)
return 0;
}
+static int hns3_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ void *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ /* all the tx rings have the same tx_copybreak */
+ *(u32 *)data = priv->tx_copybreak;
+ break;
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int hns3_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i, ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_TX_COPYBREAK:
+ priv->tx_copybreak = *(u32 *)data;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ priv->ring[i].tx_copybreak = priv->tx_copybreak;
+
+ break;
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+
+ for (i = h->kinfo.num_tqps; i < h->kinfo.num_tqps * 2; i++)
+ priv->ring[i].rx_copybreak = priv->rx_copybreak;
+
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
ETHTOOL_COALESCE_USE_ADAPTIVE | \
ETHTOOL_COALESCE_RX_USECS_HIGH | \
ETHTOOL_COALESCE_TX_USECS_HIGH | \
ETHTOOL_COALESCE_MAX_FRAMES)
+static int hns3_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (handle->ae_algo->ops->get_ts_info)
+ return handle->ae_algo->ops->get_ts_info(handle, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops hns3vf_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.get_drvinfo = hns3_get_drvinfo,
@@ -1646,6 +1697,8 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_priv_flags = hns3_get_priv_flags,
.set_priv_flags = hns3_set_priv_flags,
+ .get_tunable = hns3_get_tunable,
+ .set_tunable = hns3_set_tunable,
};
static const struct ethtool_ops hns3_ethtool_ops = {
@@ -1684,6 +1737,9 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
.set_priv_flags = hns3_set_priv_flags,
+ .get_ts_info = hns3_get_ts_info,
+ .get_tunable = hns3_get_tunable,
+ .set_tunable = hns3_set_tunable,
};
void hns3_ethtool_set_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index 6c28c8f6292c..a685392dbfe9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
ccflags-y += -I $(srctree)/$(src)
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
-hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o
hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 6aed30cc22f2..887297e37cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -178,7 +178,8 @@ static bool hclge_is_special_opcode(u16 opcode)
HCLGE_QUERY_CLEAR_MPF_RAS_INT,
HCLGE_QUERY_CLEAR_PF_RAS_INT,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
- HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ HCLGE_QUERY_ALL_ERR_INFO};
int i;
for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
@@ -386,8 +387,14 @@ static void hclge_parse_capability(struct hclge_dev *hdev,
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B))
set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B))
+ set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps);
if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B))
set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) {
+ set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
+ }
}
static __le32 hclge_build_api_caps(void)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 12558aa0fe0a..a322dfeba5cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -130,6 +130,10 @@ enum hclge_opcode_type {
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
+ /* PTP commands */
+ HCLGE_OPC_PTP_INT_EN = 0x0501,
+ HCLGE_OPC_PTP_MODE_CFG = 0x0507,
+
/* PFC/Pause commands */
HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
@@ -236,6 +240,7 @@ enum hclge_opcode_type {
HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+ HCLGE_OPC_PORT_VLAN_BYPASS = 0x1103,
/* Flow Director commands */
HCLGE_OPC_FD_MODE_CTRL = 0x1200,
@@ -292,6 +297,8 @@ enum hclge_opcode_type {
HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_QUERY_ALL_ERR_BD_NUM = 0x1516,
+ HCLGE_QUERY_ALL_ERR_INFO = 0x1517,
HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
@@ -389,9 +396,11 @@ enum HCLGE_CAP_BITS {
HCLGE_CAP_HW_PAD_B,
HCLGE_CAP_STASH_B,
HCLGE_CAP_UDP_TUNNEL_CSUM_B,
+ HCLGE_CAP_RAS_IMP_B = 12,
HCLGE_CAP_FEC_B = 13,
HCLGE_CAP_PAUSE_B = 14,
HCLGE_CAP_RXD_ADV_LAYOUT_B = 15,
+ HCLGE_CAP_PORT_VLAN_BYPASS_B = 17,
};
enum HCLGE_API_CAP_BITS {
@@ -527,10 +536,14 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
+#define HCLGE_CFG_VLAN_FLTR_CAP_S 8
+#define HCLGE_CFG_VLAN_FLTR_CAP_M GENMASK(9, 8)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
#define HCLGE_CFG_PF_RSS_SIZE_S 0
#define HCLGE_CFG_PF_RSS_SIZE_M GENMASK(3, 0)
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_S 4
+#define HCLGE_CFG_TX_SPARE_BUF_SIZE_M GENMASK(15, 4)
#define HCLGE_CFG_CMD_CNT 4
@@ -811,6 +824,14 @@ struct hclge_vlan_filter_vf_cfg_cmd {
u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
};
+#define HCLGE_INGRESS_BYPASS_B 0
+struct hclge_port_vlan_filter_bypass_cmd {
+ u8 bypass_state;
+ u8 rsv1[3];
+ u8 vf_id;
+ u8 rsv2[19];
+};
+
#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
#define HCLGE_SWITCH_ALW_LPBK_B 1U
#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 0b7c6838d905..6fc50d09b9db 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1894,6 +1894,336 @@ static void hclge_dbg_dump_mac_list(struct hclge_dev *hdev, char *buf, int len,
}
}
+static int hclge_get_vlan_rx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
+{
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ u8 rx_cfg;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, true);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u rxvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
+ }
+
+ rx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->strip_tag1 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG1_EN_B);
+ vlan_cfg->strip_tag2 = hnae3_get_bit(rx_cfg, HCLGE_REM_TAG2_EN_B);
+ vlan_cfg->drop_tag1 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG1_EN_B);
+ vlan_cfg->drop_tag2 = hnae3_get_bit(rx_cfg, HCLGE_DISCARD_TAG2_EN_B);
+ vlan_cfg->pri_only1 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG1_EN_B);
+ vlan_cfg->pri_only2 = hnae3_get_bit(rx_cfg, HCLGE_SHOW_TAG2_EN_B);
+
+ return 0;
+}
+
+static int hclge_get_vlan_tx_offload_cfg(struct hclge_dev *hdev, u8 vf_id,
+ struct hclge_dbg_vlan_cfg *vlan_cfg)
+{
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ u8 tx_cfg;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, true);
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->vf_offset = vf_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vf_id % HCLGE_VF_NUM_PER_CMD / HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] = 1U << (vf_id % HCLGE_VF_NUM_PER_BYTE);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u txvlan cfg, ret = %d\n",
+ vf_id, ret);
+ return ret;
+ }
+
+ tx_cfg = req->vport_vlan_cfg;
+ vlan_cfg->pvid = le16_to_cpu(req->def_vlan_tag1);
+
+ vlan_cfg->accept_tag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG1_B);
+ vlan_cfg->accept_tag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_TAG2_B);
+ vlan_cfg->accept_untag1 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG1_B);
+ vlan_cfg->accept_untag2 = hnae3_get_bit(tx_cfg, HCLGE_ACCEPT_UNTAG2_B);
+ vlan_cfg->insert_tag1 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG1_EN_B);
+ vlan_cfg->insert_tag2 = hnae3_get_bit(tx_cfg, HCLGE_PORT_INS_TAG2_EN_B);
+ vlan_cfg->shift_tag = hnae3_get_bit(tx_cfg, HCLGE_TAG_SHIFT_MODE_EN_B);
+
+ return 0;
+}
+
+static int hclge_get_vlan_filter_config_cmd(struct hclge_dev *hdev,
+ u8 vlan_type, u8 vf_id,
+ struct hclge_desc *desc)
+{
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc->data;
+ req->vlan_type = vlan_type;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u vlan filter config, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
+static int hclge_get_vlan_filter_state(struct hclge_dev *hdev, u8 vlan_type,
+ u8 vf_id, u8 *vlan_fe)
+{
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ ret = hclge_get_vlan_filter_config_cmd(hdev, vlan_type, vf_id, &desc);
+ if (ret)
+ return ret;
+
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
+ *vlan_fe = req->vlan_fe;
+
+ return 0;
+}
+
+static int hclge_get_port_vlan_filter_bypass_state(struct hclge_dev *hdev,
+ u8 vf_id, u8 *bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, true);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+ return ret;
+ }
+
+ *bypass_en = hnae3_get_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B);
+
+ return 0;
+}
+
+static const struct hclge_dbg_item vlan_filter_items[] = {
+ { "FUNC_ID", 2 },
+ { "I_VF_VLAN_FILTER", 2 },
+ { "E_VF_VLAN_FILTER", 2 },
+ { "PORT_VLAN_FILTER_BYPASS", 0 }
+};
+
+static const struct hclge_dbg_item vlan_offload_items[] = {
+ { "FUNC_ID", 2 },
+ { "PVID", 4 },
+ { "ACCEPT_TAG1", 2 },
+ { "ACCEPT_TAG2", 2 },
+ { "ACCEPT_UNTAG1", 2 },
+ { "ACCEPT_UNTAG2", 2 },
+ { "INSERT_TAG1", 2 },
+ { "INSERT_TAG2", 2 },
+ { "SHIFT_TAG", 2 },
+ { "STRIP_TAG1", 2 },
+ { "STRIP_TAG2", 2 },
+ { "DROP_TAG1", 2 },
+ { "DROP_TAG2", 2 },
+ { "PRI_ONLY_TAG1", 2 },
+ { "PRI_ONLY_TAG2", 0 }
+};
+
+static int hclge_dbg_dump_vlan_filter_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ char content[HCLGE_DBG_VLAN_FLTR_INFO_LEN], str_id[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_filter_items)];
+ u8 i, j, vlan_fe, bypass, ingress, egress;
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ int ret;
+
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_PORT, 0,
+ &vlan_fe);
+ if (ret)
+ return ret;
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+
+ *pos += scnprintf(buf, len, "I_PORT_VLAN_FILTER: %s\n",
+ state_str[ingress]);
+ *pos += scnprintf(buf + *pos, len - *pos, "E_PORT_VLAN_FILTER: %s\n",
+ state_str[egress]);
+
+ hclge_dbg_fill_content(content, sizeof(content), vlan_filter_items,
+ NULL, ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_filter_state(hdev, HCLGE_FILTER_TYPE_VF, i,
+ &vlan_fe);
+ if (ret)
+ return ret;
+
+ ingress = vlan_fe & HCLGE_FILTER_FE_NIC_INGRESS_B;
+ egress = vlan_fe & HCLGE_FILTER_FE_NIC_EGRESS_B ? 1 : 0;
+ ret = hclge_get_port_vlan_filter_bypass_state(hdev, i, &bypass);
+ if (ret)
+ return ret;
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = state_str[ingress];
+ result[j++] = state_str[egress];
+ result[j++] =
+ test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
+ hdev->ae_dev->caps) ? state_str[bypass] : "NA";
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_filter_items, result,
+ ARRAY_SIZE(vlan_filter_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+ *pos += scnprintf(buf + *pos, len - *pos, "\n");
+
+ return 0;
+}
+
+static int hclge_dbg_dump_vlan_offload_config(struct hclge_dev *hdev, char *buf,
+ int len, int *pos)
+{
+ char str_id[HCLGE_DBG_ID_LEN], str_pvid[HCLGE_DBG_ID_LEN];
+ const char *result[ARRAY_SIZE(vlan_offload_items)];
+ char content[HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN];
+ u8 func_num = pci_num_vf(hdev->pdev) + 1; /* pf and enabled vf num */
+ struct hclge_dbg_vlan_cfg vlan_cfg;
+ int ret;
+ u8 i, j;
+
+ hclge_dbg_fill_content(content, sizeof(content), vlan_offload_items,
+ NULL, ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < func_num; i++) {
+ ret = hclge_get_vlan_tx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_get_vlan_rx_offload_cfg(hdev, i, &vlan_cfg);
+ if (ret)
+ return ret;
+
+ sprintf(str_pvid, "%u", vlan_cfg.pvid);
+ j = 0;
+ result[j++] = hclge_dbg_get_func_id_str(str_id, i);
+ result[j++] = str_pvid;
+ result[j++] = state_str[vlan_cfg.accept_tag1];
+ result[j++] = state_str[vlan_cfg.accept_tag2];
+ result[j++] = state_str[vlan_cfg.accept_untag1];
+ result[j++] = state_str[vlan_cfg.accept_untag2];
+ result[j++] = state_str[vlan_cfg.insert_tag1];
+ result[j++] = state_str[vlan_cfg.insert_tag2];
+ result[j++] = state_str[vlan_cfg.shift_tag];
+ result[j++] = state_str[vlan_cfg.strip_tag1];
+ result[j++] = state_str[vlan_cfg.strip_tag2];
+ result[j++] = state_str[vlan_cfg.drop_tag1];
+ result[j++] = state_str[vlan_cfg.drop_tag2];
+ result[j++] = state_str[vlan_cfg.pri_only1];
+ result[j++] = state_str[vlan_cfg.pri_only2];
+
+ hclge_dbg_fill_content(content, sizeof(content),
+ vlan_offload_items, result,
+ ARRAY_SIZE(vlan_offload_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+
+ return 0;
+}
+
+static int hclge_dbg_dump_vlan_config(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ int pos = 0;
+ int ret;
+
+ ret = hclge_dbg_dump_vlan_filter_config(hdev, buf, len, &pos);
+ if (ret)
+ return ret;
+
+ return hclge_dbg_dump_vlan_offload_config(hdev, buf, len, &pos);
+}
+
+static int hclge_dbg_dump_ptp_info(struct hclge_dev *hdev, char *buf, int len)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+ u32 sw_cfg = ptp->ptp_cfg;
+ unsigned int tx_start;
+ unsigned int last_rx;
+ int pos = 0;
+ u32 hw_cfg;
+ int ret;
+
+ pos += scnprintf(buf + pos, len - pos, "phc %s's debug info:\n",
+ ptp->info.name);
+ pos += scnprintf(buf + pos, len - pos, "ptp enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp tx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ?
+ "yes" : "no");
+ pos += scnprintf(buf + pos, len - pos, "ptp rx enable: %s\n",
+ test_bit(HCLGE_PTP_FLAG_RX_EN, &ptp->flags) ?
+ "yes" : "no");
+
+ last_rx = jiffies_to_msecs(ptp->last_rx);
+ pos += scnprintf(buf + pos, len - pos, "last rx time: %lu.%lu\n",
+ last_rx / MSEC_PER_SEC, last_rx % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "rx count: %lu\n", ptp->rx_cnt);
+
+ tx_start = jiffies_to_msecs(ptp->tx_start);
+ pos += scnprintf(buf + pos, len - pos, "last tx start time: %lu.%lu\n",
+ tx_start / MSEC_PER_SEC, tx_start % MSEC_PER_SEC);
+ pos += scnprintf(buf + pos, len - pos, "tx count: %lu\n", ptp->tx_cnt);
+ pos += scnprintf(buf + pos, len - pos, "tx skipped count: %lu\n",
+ ptp->tx_skipped);
+ pos += scnprintf(buf + pos, len - pos, "tx timeout count: %lu\n",
+ ptp->tx_timeout);
+ pos += scnprintf(buf + pos, len - pos, "last tx seqid: %u\n",
+ ptp->last_tx_seqid);
+
+ ret = hclge_ptp_cfg_qry(hdev, &hw_cfg);
+ if (ret)
+ return ret;
+
+ pos += scnprintf(buf + pos, len - pos, "sw_cfg: %#x, hw_cfg: %#x\n",
+ sw_cfg, hw_cfg);
+
+ pos += scnprintf(buf + pos, len - pos, "tx type: %d, rx filter: %d\n",
+ ptp->ts_cfg.tx_type, ptp->ts_cfg.rx_filter);
+
+ return 0;
+}
+
static int hclge_dbg_dump_mac_uc(struct hclge_dev *hdev, char *buf, int len)
{
hclge_dbg_dump_mac_list(hdev, buf, len, true);
@@ -1966,6 +2296,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.dbg_dump = hclge_dbg_dump_loopback,
},
{
+ .cmd = HNAE3_DBG_CMD_PTP_INFO,
+ .dbg_dump = hclge_dbg_dump_ptp_info,
+ },
+ {
.cmd = HNAE3_DBG_CMD_INTERRUPT_INFO,
.dbg_dump = hclge_dbg_dump_interrupt,
},
@@ -2037,6 +2371,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_SERV_INFO,
.dbg_dump = hclge_dbg_dump_serv_info,
},
+ {
+ .cmd = HNAE3_DBG_CMD_VLAN_CONFIG,
+ .dbg_dump = hclge_dbg_dump_vlan_config,
+ },
};
int hclge_dbg_read_cmd(struct hnae3_handle *handle, enum hnae3_dbg_cmd cmd,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 642752e65a7c..c526591a7240 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -735,6 +735,8 @@ static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
};
#define HCLGE_DBG_INFO_LEN 256
+#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256
+#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512
#define HCLGE_DBG_ID_LEN 16
#define HCLGE_DBG_ITEM_NAME_LEN 32
#define HCLGE_DBG_DATA_STR_LEN 32
@@ -747,4 +749,21 @@ struct hclge_dbg_item {
u16 interval; /* blank numbers after the item */
};
+struct hclge_dbg_vlan_cfg {
+ u16 pvid;
+ u8 accept_tag1;
+ u8 accept_tag2;
+ u8 accept_untag1;
+ u8 accept_untag2;
+ u8 insert_tag1;
+ u8 insert_tag2;
+ u8 shift_tag;
+ u8 strip_tag1;
+ u8 strip_tag2;
+ u8 drop_tag1;
+ u8 drop_tag2;
+ u8 pri_only1;
+ u8 pri_only2;
+};
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 8223d699cd94..bad9fda19398 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -631,6 +631,134 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
{ /* sentinel */ }
};
+static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
+ {
+ .module_id = MODULE_NONE,
+ .msg = "MODULE_NONE"
+ }, {
+ .module_id = MODULE_BIOS_COMMON,
+ .msg = "MODULE_BIOS_COMMON"
+ }, {
+ .module_id = MODULE_GE,
+ .msg = "MODULE_GE"
+ }, {
+ .module_id = MODULE_IGU_EGU,
+ .msg = "MODULE_IGU_EGU"
+ }, {
+ .module_id = MODULE_LGE,
+ .msg = "MODULE_LGE"
+ }, {
+ .module_id = MODULE_NCSI,
+ .msg = "MODULE_NCSI"
+ }, {
+ .module_id = MODULE_PPP,
+ .msg = "MODULE_PPP"
+ }, {
+ .module_id = MODULE_QCN,
+ .msg = "MODULE_QCN"
+ }, {
+ .module_id = MODULE_RCB_RX,
+ .msg = "MODULE_RCB_RX"
+ }, {
+ .module_id = MODULE_RTC,
+ .msg = "MODULE_RTC"
+ }, {
+ .module_id = MODULE_SSU,
+ .msg = "MODULE_SSU"
+ }, {
+ .module_id = MODULE_TM,
+ .msg = "MODULE_TM"
+ }, {
+ .module_id = MODULE_RCB_TX,
+ .msg = "MODULE_RCB_TX"
+ }, {
+ .module_id = MODULE_TXDMA,
+ .msg = "MODULE_TXDMA"
+ }, {
+ .module_id = MODULE_MASTER,
+ .msg = "MODULE_MASTER"
+ }, {
+ .module_id = MODULE_ROCEE_TOP,
+ .msg = "MODULE_ROCEE_TOP"
+ }, {
+ .module_id = MODULE_ROCEE_TIMER,
+ .msg = "MODULE_ROCEE_TIMER"
+ }, {
+ .module_id = MODULE_ROCEE_MDB,
+ .msg = "MODULE_ROCEE_MDB"
+ }, {
+ .module_id = MODULE_ROCEE_TSP,
+ .msg = "MODULE_ROCEE_TSP"
+ }, {
+ .module_id = MODULE_ROCEE_TRP,
+ .msg = "MODULE_ROCEE_TRP"
+ }, {
+ .module_id = MODULE_ROCEE_SCC,
+ .msg = "MODULE_ROCEE_SCC"
+ }, {
+ .module_id = MODULE_ROCEE_CAEP,
+ .msg = "MODULE_ROCEE_CAEP"
+ }, {
+ .module_id = MODULE_ROCEE_GEN_AC,
+ .msg = "MODULE_ROCEE_GEN_AC"
+ }, {
+ .module_id = MODULE_ROCEE_QMM,
+ .msg = "MODULE_ROCEE_QMM"
+ }, {
+ .module_id = MODULE_ROCEE_LSAN,
+ .msg = "MODULE_ROCEE_LSAN"
+ }
+};
+
+static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
+ {
+ .type_id = NONE_ERROR,
+ .msg = "none_error"
+ }, {
+ .type_id = FIFO_ERROR,
+ .msg = "fifo_error"
+ }, {
+ .type_id = MEMORY_ERROR,
+ .msg = "memory_error"
+ }, {
+ .type_id = POISON_ERROR,
+ .msg = "poison_error"
+ }, {
+ .type_id = MSIX_ECC_ERROR,
+ .msg = "msix_ecc_error"
+ }, {
+ .type_id = TQP_INT_ECC_ERROR,
+ .msg = "tqp_int_ecc_error"
+ }, {
+ .type_id = PF_ABNORMAL_INT_ERROR,
+ .msg = "pf_abnormal_int_error"
+ }, {
+ .type_id = MPF_ABNORMAL_INT_ERROR,
+ .msg = "mpf_abnormal_int_error"
+ }, {
+ .type_id = COMMON_ERROR,
+ .msg = "common_error"
+ }, {
+ .type_id = PORT_ERROR,
+ .msg = "port_error"
+ }, {
+ .type_id = ETS_ERROR,
+ .msg = "ets_error"
+ }, {
+ .type_id = NCSI_ERROR,
+ .msg = "ncsi_error"
+ }, {
+ .type_id = GLB_ERROR,
+ .msg = "glb_error"
+ }, {
+ .type_id = ROCEE_NORMAL_ERR,
+ .msg = "rocee_normal_error"
+ }, {
+ .type_id = ROCEE_OVF_ERR,
+ .msg = "rocee_ovf_error"
+ }
+};
+
static void hclge_log_error(struct device *dev, char *reg,
const struct hclge_hw_error *err,
u32 err_sts, unsigned long *reset_requests)
@@ -1611,11 +1739,27 @@ static const struct hclge_hw_blk hw_blk[] = {
{ /* sentinel */ }
};
+static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+
+ if (enable)
+ reg_val |= BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+ else
+ reg_val &= ~BIT(HCLGE_VECTOR0_ALL_MSIX_ERR_B);
+
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+}
+
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
{
const struct hclge_hw_blk *module = hw_blk;
int ret = 0;
+ hclge_config_all_msix_error(hdev, state);
+
while (module->name) {
if (module->config_err_int) {
ret = module->config_err_int(hdev, state);
@@ -1876,11 +2020,8 @@ static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests)
{
- struct hclge_mac_tnl_stats mac_tnl_stats;
- struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
struct hclge_desc *desc;
- u32 status;
int ret;
/* query the number of bds for the MSIx int status */
@@ -1903,16 +2044,45 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
if (ret)
goto msi_error;
+ ret = hclge_handle_mac_tnl(hdev);
+
+msi_error:
+ kfree(desc);
+out:
+ return ret;
+}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "failed to handle msix error during dev init\n");
+ return -EAGAIN;
+ }
+
+ return hclge_handle_all_hw_msix_error(hdev, reset_requests);
+}
+
+int hclge_handle_mac_tnl(struct hclge_dev *hdev)
+{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ u32 status;
+ int ret;
+
/* query and clear mac tnl interruptions */
- hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
- true);
- ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_TNL_INT, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
- dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
- goto msi_error;
+ dev_err(dev, "failed to query mac tnl int, ret = %d.\n", ret);
+ return ret;
}
- status = le32_to_cpu(desc->data[0]);
+ status = le32_to_cpu(desc.data[0]);
if (status) {
/* When mac tnl interrupt occurs, we record current time and
* register status here in a fifo, then clear the status. So
@@ -1924,33 +2094,15 @@ static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
ret = hclge_clear_mac_tnl_int(hdev);
if (ret)
- dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
+ dev_err(dev, "failed to clear mac tnl int, ret = %d.\n",
+ ret);
}
-msi_error:
- kfree(desc);
-out:
return ret;
}
-int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
- unsigned long *reset_requests)
-{
- struct device *dev = &hdev->pdev->dev;
-
- if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
- dev_err(dev,
- "Can't handle - MSIx error reported during dev init\n");
- return 0;
- }
-
- return hclge_handle_all_hw_msix_error(hdev, reset_requests);
-}
-
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
{
-#define HCLGE_DESC_NO_DATA_LEN 8
-
struct hclge_dev *hdev = ae_dev->priv;
struct device *dev = &hdev->pdev->dev;
u32 mpf_bd_num, pf_bd_num, bd_num;
@@ -1999,3 +2151,205 @@ void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
msi_error:
kfree(desc);
}
+
+bool hclge_find_error_source(struct hclge_dev *hdev)
+{
+ u32 msix_src_flag, hw_err_src_flag;
+
+ msix_src_flag = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_VECTOR0_REG_MSIX_MASK;
+
+ hw_err_src_flag = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG) &
+ HCLGE_RAS_REG_ERR_MASK;
+
+ return msix_src_flag || hw_err_src_flag;
+}
+
+void hclge_handle_occurred_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ if (hclge_find_error_source(hdev))
+ hclge_handle_error_info_log(ae_dev);
+}
+
+static void
+hclge_handle_error_type_reg_log(struct device *dev,
+ struct hclge_mod_err_info *mod_info,
+ struct hclge_type_reg_err_info *type_reg_info)
+{
+#define HCLGE_ERR_TYPE_MASK 0x7F
+#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
+
+ u8 mod_id, total_module, type_id, total_type, i, is_ras;
+ u8 index_module = MODULE_NONE;
+ u8 index_type = NONE_ERROR;
+
+ mod_id = mod_info->mod_id;
+ type_id = type_reg_info->type_id & HCLGE_ERR_TYPE_MASK;
+ is_ras = type_reg_info->type_id >> HCLGE_ERR_TYPE_IS_RAS_OFFSET;
+
+ total_module = ARRAY_SIZE(hclge_hw_module_id_st);
+ total_type = ARRAY_SIZE(hclge_hw_type_id_st);
+
+ for (i = 0; i < total_module; i++) {
+ if (mod_id == hclge_hw_module_id_st[i].module_id) {
+ index_module = i;
+ break;
+ }
+ }
+
+ for (i = 0; i < total_type; i++) {
+ if (type_id == hclge_hw_type_id_st[i].type_id) {
+ index_type = i;
+ break;
+ }
+ }
+
+ if (index_module != MODULE_NONE && index_type != NONE_ERROR)
+ dev_err(dev,
+ "found %s %s, is %s error.\n",
+ hclge_hw_module_id_st[index_module].msg,
+ hclge_hw_type_id_st[index_type].msg,
+ is_ras ? "ras" : "msix");
+ else
+ dev_err(dev,
+ "unknown module[%u] or type[%u].\n", mod_id, type_id);
+
+ dev_err(dev, "reg_value:\n");
+ for (i = 0; i < type_reg_info->reg_num; i++)
+ dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+}
+
+static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
+ const u32 *buf, u32 buf_size)
+{
+ struct hclge_type_reg_err_info *type_reg_info;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_mod_err_info *mod_info;
+ struct hclge_sum_err_info *sum_info;
+ u8 mod_num, err_num, i;
+ u32 offset = 0;
+
+ sum_info = (struct hclge_sum_err_info *)&buf[offset++];
+ if (sum_info->reset_type &&
+ sum_info->reset_type != HNAE3_NONE_RESET)
+ set_bit(sum_info->reset_type, &ae_dev->hw_err_reset_req);
+ mod_num = sum_info->mod_num;
+
+ while (mod_num--) {
+ if (offset >= buf_size) {
+ dev_err(dev, "The offset(%u) exceeds buf's size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+ mod_info = (struct hclge_mod_err_info *)&buf[offset++];
+ err_num = mod_info->err_num;
+
+ for (i = 0; i < err_num; i++) {
+ if (offset >= buf_size) {
+ dev_err(dev,
+ "The offset(%u) exceeds buf size(%u).\n",
+ offset, buf_size);
+ return;
+ }
+
+ type_reg_info = (struct hclge_type_reg_err_info *)
+ &buf[offset++];
+ hclge_handle_error_type_reg_log(dev, mod_info,
+ type_reg_info);
+
+ offset += type_reg_info->reg_num;
+ }
+ }
+}
+
+static int hclge_query_all_err_bd_num(struct hclge_dev *hdev, u32 *bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_ALL_ERR_BD_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "failed to query error bd_num, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *bd_num = le32_to_cpu(desc_bd.data[0]);
+ if (!(*bd_num)) {
+ dev_err(dev, "The value of bd_num is 0!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_query_all_err_info(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(desc, HCLGE_QUERY_ALL_ERR_INFO, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(dev, "failed to query error info, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev)
+{
+ u32 bd_num, desc_len, buf_len, buf_size, i;
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_desc *desc;
+ __le32 *desc_data;
+ u32 *buf;
+ int ret;
+
+ ret = hclge_query_all_err_bd_num(hdev, &bd_num);
+ if (ret)
+ goto out;
+
+ desc_len = bd_num * sizeof(struct hclge_desc);
+ desc = kzalloc(desc_len, GFP_KERNEL);
+ if (!desc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hclge_query_all_err_info(hdev, desc, bd_num);
+ if (ret)
+ goto err_desc;
+
+ buf_len = bd_num * sizeof(struct hclge_desc) - HCLGE_DESC_NO_DATA_LEN;
+ buf_size = buf_len / sizeof(u32);
+
+ desc_data = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_data)
+ return -ENOMEM;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_buf_alloc;
+ }
+
+ memcpy(desc_data, &desc[0].data[0], buf_len);
+ for (i = 0; i < buf_size; i++)
+ buf[i] = le32_to_cpu(desc_data[i]);
+
+ hclge_handle_error_module_log(ae_dev, buf, buf_size);
+ kfree(buf);
+
+err_buf_alloc:
+ kfree(desc_data);
+err_desc:
+ kfree(desc);
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index d647f3c84134..07987fb8332e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -15,6 +15,8 @@
#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
#define HCLGE_RAS_REG_NFE_MASK 0xFF00
#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
+#define HCLGE_RAS_REG_ERR_MASK \
+ (HCLGE_RAS_REG_NFE_MASK | HCLGE_RAS_REG_ROCEE_ERR_MASK)
#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
@@ -107,6 +109,10 @@
#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F
+#define HCLGE_DESC_DATA_MAX 8
+#define HCLGE_REG_NUM_MAX 256
+#define HCLGE_DESC_NO_DATA_LEN 8
+
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
HCLGE_ERR_INT_RAS_CE = 1,
@@ -114,6 +120,56 @@ enum hclge_err_int_type {
HCLGE_ERR_INT_RAS_FE = 3,
};
+enum hclge_mod_name_list {
+ MODULE_NONE = 0,
+ MODULE_BIOS_COMMON = 1,
+ MODULE_GE = 2,
+ MODULE_IGU_EGU = 3,
+ MODULE_LGE = 4,
+ MODULE_NCSI = 5,
+ MODULE_PPP = 6,
+ MODULE_QCN = 7,
+ MODULE_RCB_RX = 8,
+ MODULE_RTC = 9,
+ MODULE_SSU = 10,
+ MODULE_TM = 11,
+ MODULE_RCB_TX = 12,
+ MODULE_TXDMA = 13,
+ MODULE_MASTER = 14,
+ /* add new MODULE NAME for NIC here in order */
+ MODULE_ROCEE_TOP = 40,
+ MODULE_ROCEE_TIMER = 41,
+ MODULE_ROCEE_MDB = 42,
+ MODULE_ROCEE_TSP = 43,
+ MODULE_ROCEE_TRP = 44,
+ MODULE_ROCEE_SCC = 45,
+ MODULE_ROCEE_CAEP = 46,
+ MODULE_ROCEE_GEN_AC = 47,
+ MODULE_ROCEE_QMM = 48,
+ MODULE_ROCEE_LSAN = 49,
+ /* add new MODULE NAME for RoCEE here in order */
+};
+
+enum hclge_err_type_list {
+ NONE_ERROR = 0,
+ FIFO_ERROR = 1,
+ MEMORY_ERROR = 2,
+ POISON_ERROR = 3,
+ MSIX_ECC_ERROR = 4,
+ TQP_INT_ECC_ERROR = 5,
+ PF_ABNORMAL_INT_ERROR = 6,
+ MPF_ABNORMAL_INT_ERROR = 7,
+ COMMON_ERROR = 8,
+ PORT_ERROR = 9,
+ ETS_ERROR = 10,
+ NCSI_ERROR = 11,
+ GLB_ERROR = 12,
+ /* add new ERROR TYPE for NIC here in order */
+ ROCEE_NORMAL_ERR = 40,
+ ROCEE_OVF_ERR = 41,
+ /* add new ERROR TYPE for ROCEE here in order */
+};
+
struct hclge_hw_blk {
u32 msk;
const char *name;
@@ -126,11 +182,44 @@ struct hclge_hw_error {
enum hnae3_reset_type reset_level;
};
+struct hclge_hw_module_id {
+ enum hclge_mod_name_list module_id;
+ const char *msg;
+};
+
+struct hclge_hw_type_id {
+ enum hclge_err_type_list type_id;
+ const char *msg;
+};
+
+struct hclge_sum_err_info {
+ u8 reset_type;
+ u8 mod_num;
+ u8 rsv[2];
+};
+
+struct hclge_mod_err_info {
+ u8 mod_id;
+ u8 err_num;
+ u8 rsv[2];
+};
+
+struct hclge_type_reg_err_info {
+ u8 type_id;
+ u8 reg_num;
+ u8 rsv[2];
+ u32 hclge_reg[HCLGE_REG_NUM_MAX];
+};
+
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
+bool hclge_find_error_source(struct hclge_dev *hdev);
+void hclge_handle_occurred_error(struct hclge_dev *hdev);
pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
unsigned long *reset_requests);
+int hclge_handle_error_info_log(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_mac_tnl(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 3882f829fc49..f3e482ab3c71 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -1279,6 +1279,7 @@ static u32 hclge_get_max_speed(u16 speed_ability)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
+#define HCLGE_TX_SPARE_SIZE_UNIT 4096
#define SPEED_ABILITY_EXT_SHIFT 8
struct hclge_cfg_param_cmd *req;
@@ -1334,6 +1335,10 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
HCLGE_CFG_SPEED_ABILITY_EXT_S);
cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+ cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_VLAN_FLTR_CAP_M,
+ HCLGE_CFG_VLAN_FLTR_CAP_S);
+
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
@@ -1354,6 +1359,15 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1U << cfg->pf_rss_size_max :
cfg->vf_rss_size_max;
+
+ /* The unit of the tx spare buffer size queried from configuration
+ * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
+ * needed here.
+ */
+ cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
+ HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
+ cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
}
/* hclge_get_cfg: query the static parameter from flash
@@ -1513,6 +1527,7 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
static int hclge_configure(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_cfg cfg;
unsigned int i;
int ret;
@@ -1534,6 +1549,9 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
hdev->wanted_umv_size = cfg.umv_space;
+ hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
+ if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
if (hnae3_dev_fd_supported(hdev)) {
hdev->fd_en = true;
@@ -1729,6 +1747,7 @@ static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
kinfo->num_rx_desc = num_rx_desc;
kinfo->rx_buf_len = hdev->rx_buf_len;
+ kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
@@ -1843,6 +1862,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
vport->mps = HCLGE_MAC_DEFAULT_FRAME;
vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
vport->rxvlan_cfg.rx_vlan_offload_en = true;
+ vport->req_vlan_fltr_en = true;
INIT_LIST_HEAD(&vport->vlan_list);
INIT_LIST_HEAD(&vport->uc_mac_list);
INIT_LIST_HEAD(&vport->mc_mac_list);
@@ -2835,6 +2855,14 @@ static void hclge_reset_task_schedule(struct hclge_dev *hdev)
hclge_wq, &hdev->service_task, 0);
}
+static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
+}
+
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
@@ -3291,11 +3319,13 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
{
- u32 cmdq_src_reg, msix_src_reg;
+ u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ hw_err_src_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_RAS_PF_OTHER_INT_STS_REG);
/* Assumption: If by any chance reset and mailbox events are reported
* together then we will only process reset event in this go and will
@@ -3323,10 +3353,15 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
return HCLGE_VECTOR0_EVENT_RST;
}
- /* check for vector0 msix event source */
- if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
- *clearval = msix_src_reg;
+ /* check for vector0 msix event and hardware error event source */
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
+ hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
return HCLGE_VECTOR0_EVENT_ERR;
+
+ /* check for vector0 ptp event source */
+ if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
+ *clearval = msix_src_reg;
+ return HCLGE_VECTOR0_EVENT_PTP;
}
/* check for vector0 mailbox(=CMDQ RX) event source */
@@ -3338,9 +3373,8 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
/* print other vector0 event source */
dev_info(&hdev->pdev->dev,
- "CMDQ INT status:0x%x, other INT status:0x%x\n",
- cmdq_src_reg, msix_src_reg);
- *clearval = msix_src_reg;
+ "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
+ cmdq_src_reg, hw_err_src_reg, msix_src_reg);
return HCLGE_VECTOR0_EVENT_OTHER;
}
@@ -3349,6 +3383,7 @@ static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
@@ -3377,6 +3412,7 @@ static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
{
struct hclge_dev *hdev = data;
+ unsigned long flags;
u32 clearval = 0;
u32 event_cause;
@@ -3386,21 +3422,16 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
/* vector 0 interrupt is shared with reset and mailbox source events.*/
switch (event_cause) {
case HCLGE_VECTOR0_EVENT_ERR:
- /* we do not know what type of reset is required now. This could
- * only be decided after we fetch the type of errors which
- * caused this event. Therefore, we will do below for now:
- * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
- * have defered type of reset to be used.
- * 2. Schedule the reset service task.
- * 3. When service task receives HNAE3_UNKNOWN_RESET type it
- * will fetch the correct type of reset. This would be done
- * by first decoding the types of errors.
- */
- set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
- fallthrough;
+ hclge_errhand_task_schedule(hdev);
+ break;
case HCLGE_VECTOR0_EVENT_RST:
hclge_reset_task_schedule(hdev);
break;
+ case HCLGE_VECTOR0_EVENT_PTP:
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ hclge_ptp_clean_tx_hwts(hdev);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+ break;
case HCLGE_VECTOR0_EVENT_MBX:
/* If we are here then,
* 1. Either we are not handling any mbx task and we are not
@@ -3421,15 +3452,11 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
hclge_clear_event_cause(hdev, event_cause, clearval);
- /* Enable interrupt if it is not cause by reset. And when
- * clearval equal to 0, it means interrupt status may be
- * cleared by hardware before driver reads status register.
- * For this case, vector0 interrupt also should be enabled.
- */
- if (!clearval ||
- event_cause == HCLGE_VECTOR0_EVENT_MBX) {
+ /* Enable interrupt if it is not caused by reset event or error event */
+ if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
+ event_cause == HCLGE_VECTOR0_EVENT_MBX ||
+ event_cause == HCLGE_VECTOR0_EVENT_OTHER)
hclge_enable_vector(&hdev->misc_vector, true);
- }
return IRQ_HANDLED;
}
@@ -3786,28 +3813,6 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
struct hclge_dev *hdev = ae_dev->priv;
- /* first, resolve any unknown reset type to the known type(s) */
- if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
- u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
- HCLGE_MISC_VECTOR_INT_STS);
- /* we will intentionally ignore any errors from this function
- * as we will end up in *some* reset request in any case
- */
- if (hclge_handle_hw_msix_error(hdev, addr))
- dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
- msix_sts_reg);
-
- clear_bit(HNAE3_UNKNOWN_RESET, addr);
- /* We defered the clearing of the error event which caused
- * interrupt since it was not posssible to do that in
- * interrupt context (and this is the reason we introduced
- * new UNKNOWN reset type). Now, the errors have been
- * handled and cleared in hardware we can safely enable
- * interrupts. This is an exception to the norm.
- */
- hclge_enable_vector(&hdev->misc_vector, true);
- }
-
/* return the highest priority reset level amongst all */
if (test_bit(HNAE3_IMP_RESET, addr)) {
rst_level = HNAE3_IMP_RESET;
@@ -4256,6 +4261,68 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
hdev->reset_type = HNAE3_NONE_RESET;
}
+static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_type;
+
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_type);
+ }
+
+ if (hdev->default_reset_request && ae_dev->ops->reset_event)
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+
+ /* enable interrupt after error handling complete */
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
+static void hclge_handle_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->hw_err_reset_req = 0;
+
+ if (hclge_find_error_source(hdev)) {
+ hclge_handle_error_info_log(ae_dev);
+ hclge_handle_mac_tnl(hdev);
+ }
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_misc_err_recovery(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct device *dev = &hdev->pdev->dev;
+ u32 msix_sts_reg;
+
+ msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+ if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
+ if (hclge_handle_hw_msix_error
+ (hdev, &hdev->default_reset_request))
+ dev_info(dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+ }
+
+ hclge_handle_hw_ras_error(ae_dev);
+
+ hclge_handle_err_reset_request(hdev);
+}
+
+static void hclge_errhand_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
+ return;
+
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_err_recovery(hdev);
+ else
+ hclge_misc_err_recovery(hdev);
+}
+
static void hclge_reset_service_task(struct hclge_dev *hdev)
{
if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
@@ -4334,19 +4401,43 @@ out:
hclge_task_schedule(hdev, delta);
}
+static void hclge_ptp_service_task(struct hclge_dev *hdev)
+{
+ unsigned long flags;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
+ !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
+ !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
+ return;
+
+ /* to prevent concurrence with the irq handler */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+
+ /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
+ * handler may handle it just before spin_lock_irqsave().
+ */
+ if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
+ hclge_ptp_clean_tx_hwts(hdev);
+
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
container_of(work, struct hclge_dev, service_task.work);
+ hclge_errhand_service_task(hdev);
hclge_reset_service_task(hdev);
+ hclge_ptp_service_task(hdev);
hclge_mailbox_service_task(hdev);
hclge_periodic_service_task(hdev);
- /* Handle reset and mbx again in case periodical task delays the
- * handling by calling hclge_task_schedule() in
+ /* Handle error recovery, reset and mbx again in case periodical task
+ * delays the handling by calling hclge_task_schedule() in
* hclge_periodic_service_task().
*/
+ hclge_errhand_service_task(hdev);
hclge_reset_service_task(hdev);
hclge_mailbox_service_task(hdev);
}
@@ -5183,9 +5274,8 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_dev *hdev = vport->back;
- set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
}
static void hclge_sync_fd_state(struct hclge_dev *hdev)
@@ -8050,6 +8140,7 @@ int hclge_vport_start(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_active_jiffies = jiffies;
if (test_bit(vport->vport_id, hdev->vport_config_block)) {
@@ -8791,8 +8882,7 @@ static bool hclge_sync_from_add_list(struct list_head *add_list,
kfree(mac_node);
} else if (mac_node->state == HCLGE_MAC_ACTIVE) {
mac_node->state = HCLGE_MAC_TO_DEL;
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
} else {
list_del(&mac_node->node);
kfree(mac_node);
@@ -8821,8 +8911,7 @@ static void hclge_sync_from_del_list(struct list_head *del_list,
list_del(&mac_node->node);
kfree(mac_node);
} else {
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
}
}
}
@@ -8866,8 +8955,7 @@ static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, &tmp_del_list);
+ list_move_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
@@ -8949,8 +9037,7 @@ static void hclge_build_del_list(struct list_head *list,
switch (mac_cfg->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
- list_del(&mac_cfg->node);
- list_add_tail(&mac_cfg->node, tmp_del_list);
+ list_move_tail(&mac_cfg->node, tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
if (is_del_list) {
@@ -9045,8 +9132,7 @@ static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
switch (mac_node->state) {
case HCLGE_MAC_TO_DEL:
case HCLGE_MAC_ACTIVE:
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, &tmp_del_list);
+ list_move_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGE_MAC_TO_ADD:
list_del(&mac_node->node);
@@ -9375,12 +9461,41 @@ static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hdev->hw.mac.phydev)
- return hclge_mii_ioctl(hdev, ifr, cmd);
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return hclge_ptp_get_cfg(hdev, ifr);
+ case SIOCSHWTSTAMP:
+ return hclge_ptp_set_cfg(hdev, ifr);
+ default:
+ if (!hdev->hw.mac.phydev)
+ return hclge_mii_ioctl(hdev, ifr, cmd);
+ }
return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
}
+static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
+ bool bypass_en)
+{
+ struct hclge_port_vlan_filter_bypass_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
+ req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
+ req->vf_id = vf_id;
+ hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
+ bypass_en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
+ vf_id, ret);
+
+ return ret;
+}
+
static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
u8 fe_type, bool filter_en, u8 vf_id)
{
@@ -9414,37 +9529,99 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
return ret;
}
-#define HCLGE_FILTER_TYPE_VF 0
-#define HCLGE_FILTER_TYPE_PORT 1
-#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
-#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
-#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
-#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
-#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
-#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
- | HCLGE_FILTER_FE_ROCE_EGRESS_B)
-#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
- | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ int ret;
-static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ enable, vport->vport_id);
+
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable,
+ vport->vport_id);
+ if (ret)
+ return ret;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps))
+ ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
+ !enable);
+ else if (!vport->vport_id)
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS,
+ enable, 0);
+
+ return ret;
+}
+
+static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
{
- struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
- if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS, enable, 0);
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, enable, 0);
- } else {
- hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
- HCLGE_FILTER_FE_EGRESS_V1_B, enable,
- 0);
+ if (vport->vport_id) {
+ if (vport->port_base_vlan_cfg.state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return true;
+
+ if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
+ return false;
+ } else if (handle->netdev_flags & HNAE3_USER_UPE) {
+ return false;
}
- if (enable)
- handle->netdev_flags |= HNAE3_VLAN_FLTR;
- else
- handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
+
+ if (!vport->req_vlan_fltr_en)
+ return false;
+
+ /* compatible with former device, always enable vlan filter */
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ return true;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+ if (vlan->vlan_id != 0)
+ return true;
+
+ return false;
+}
+
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool need_en;
+ int ret;
+
+ mutex_lock(&hdev->vport_lock);
+
+ vport->req_vlan_fltr_en = request_en;
+
+ need_en = hclge_need_enable_vport_vlan_filter(vport);
+ if (need_en == vport->cur_vlan_fltr_en) {
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ ret = hclge_set_vport_vlan_filter(vport, need_en);
+ if (ret) {
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+ }
+
+ vport->cur_vlan_fltr_en = need_en;
+
+ mutex_unlock(&hdev->vport_lock);
+
+ return 0;
+}
+
+static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_enable_vport_vlan_filter(vport, enable);
}
static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
@@ -9724,7 +9901,7 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
u16 port_base_vlan_state,
- u16 vlan_tag)
+ u16 vlan_tag, u8 qos)
{
int ret;
@@ -9738,7 +9915,8 @@ static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
vport->txvlan_cfg.accept_tag1 =
ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
vport->txvlan_cfg.insert_tag1_en = true;
- vport->txvlan_cfg.default_tag1 = vlan_tag;
+ vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
+ vlan_tag;
}
vport->txvlan_cfg.accept_untag1 = true;
@@ -9837,6 +10015,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
vport->vport_id);
if (ret)
return ret;
+ vport->cur_vlan_fltr_en = true;
}
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
@@ -9852,8 +10031,6 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
return ret;
}
- handle->netdev_flags |= HNAE3_VLAN_FLTR;
-
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
@@ -9867,13 +10044,15 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
for (i = 0; i < hdev->num_alloc_vport; i++) {
u16 vlan_tag;
+ u8 qos;
vport = &hdev->vport[i];
vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ qos = vport->port_base_vlan_cfg.vlan_info.qos;
ret = hclge_vlan_offload_cfg(vport,
vport->port_base_vlan_cfg.state,
- vlan_tag);
+ vlan_tag, qos);
if (ret)
return ret;
}
@@ -10048,7 +10227,6 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev)
hclge_restore_mac_table_common(vport);
hclge_restore_vport_vlan_table(vport);
- set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
hclge_restore_fd_entries(handle);
}
@@ -10075,6 +10253,14 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
+static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
+}
+
static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
u16 port_base_vlan_state,
struct hclge_vlan_info *new_info,
@@ -10085,6 +10271,10 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
hclge_rm_vport_all_vlan_table(vport, false);
+ /* force clear VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
+ if (ret)
+ return ret;
return hclge_set_vlan_filter_hw(hdev,
htons(new_info->vlan_proto),
vport->vport_id,
@@ -10092,6 +10282,11 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
false);
}
+ /* force add VLAN 0 */
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
+ if (ret)
+ return ret;
+
ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
vport->vport_id, old_info->vlan_tag,
true);
@@ -10101,6 +10296,18 @@ static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
return hclge_add_vport_all_vlan_table(vport);
}
+static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
+ const struct hclge_vlan_info *old_cfg)
+{
+ if (new_cfg->vlan_tag != old_cfg->vlan_tag)
+ return true;
+
+ if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
+ return true;
+
+ return false;
+}
+
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
@@ -10111,10 +10318,14 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
- ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
+ vlan_info->qos);
if (ret)
return ret;
+ if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
+ goto out;
+
if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
/* add new VLAN tag */
ret = hclge_set_vlan_filter_hw(hdev,
@@ -10126,15 +10337,23 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
return ret;
/* remove old VLAN tag */
- ret = hclge_set_vlan_filter_hw(hdev,
- htons(old_vlan_info->vlan_proto),
- vport->vport_id,
- old_vlan_info->vlan_tag,
- true);
- if (ret)
+ if (old_vlan_info->vlan_tag == 0)
+ ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
+ true, 0);
+ else
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ old_vlan_info->vlan_tag,
+ true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vport%u port base vlan %u, ret = %d.\n",
+ vport->vport_id, old_vlan_info->vlan_tag, ret);
return ret;
+ }
- goto update;
+ goto out;
}
ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
@@ -10142,38 +10361,38 @@ int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
if (ret)
return ret;
- /* update state only when disable/enable port based VLAN */
+out:
vport->port_base_vlan_cfg.state = state;
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
-update:
- vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
- vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
- vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
+ vport->port_base_vlan_cfg.vlan_info = *vlan_info;
+ hclge_set_vport_vlan_fltr_change(vport);
return 0;
}
static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
enum hnae3_port_base_vlan_state state,
- u16 vlan)
+ u16 vlan, u8 qos)
{
if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
- if (!vlan)
+ if (!vlan && !qos)
return HNAE3_PORT_BASE_VLAN_NOCHANGE;
- else
- return HNAE3_PORT_BASE_VLAN_ENABLE;
- } else {
- if (!vlan)
- return HNAE3_PORT_BASE_VLAN_DISABLE;
- else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
- return HNAE3_PORT_BASE_VLAN_NOCHANGE;
- else
- return HNAE3_PORT_BASE_VLAN_MODIFY;
+
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
}
+
+ if (!vlan && !qos)
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
+
+ if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
+ vport->port_base_vlan_cfg.vlan_info.qos == qos)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
}
static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
@@ -10201,7 +10420,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
state = hclge_get_port_base_vlan_state(vport,
vport->port_base_vlan_cfg.state,
- vlan);
+ vlan, qos);
if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
return 0;
@@ -10224,8 +10443,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
vport->vport_id, state,
- vlan, qos,
- ntohs(proto));
+ &vlan_info);
return 0;
}
@@ -10295,9 +10513,37 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
*/
set_bit(vlan_id, vport->vlan_del_fail_bmap);
}
+
+ hclge_set_vport_vlan_fltr_change(vport);
+
return ret;
}
+static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int ret;
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state))
+ continue;
+
+ ret = hclge_enable_vport_vlan_filter(vport,
+ vport->req_vlan_fltr_en);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to sync vlan filter state for vport%u, ret = %d\n",
+ vport->vport_id, ret);
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ return;
+ }
+ }
+}
+
static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
{
#define HCLGE_MAX_SYNC_COUNT 60
@@ -10320,6 +10566,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ hclge_set_vport_vlan_fltr_change(vport);
sync_cnt++;
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
@@ -10329,6 +10576,8 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
VLAN_N_VID);
}
}
+
+ hclge_sync_vlan_fltr_state(hdev);
}
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
@@ -10822,6 +11071,8 @@ static void hclge_info_show(struct hclge_dev *hdev)
hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ dev_info(dev, "Default tx spare buffer size: %u\n",
+ hdev->tx_spare_buf_size);
dev_info(dev, "PF info end.\n");
}
@@ -11336,6 +11587,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11352,7 +11607,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_resetting_state(hdev);
/* Log and clear the hw errors those already occurred */
- hclge_handle_all_hns_hw_errors(ae_dev);
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
/* request delayed reset for the error recovery because an immediate
* global reset on a PF affecting pending initialization of other PFs
@@ -11500,10 +11758,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
u32 new_trusted = enable ? 1 : 0;
- bool en_bc_pmc;
- int ret;
vport = hclge_get_vf_vport(hdev, vf);
if (!vport)
@@ -11512,18 +11767,9 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
if (vport->vf_info.trusted == new_trusted)
return 0;
- /* Disable promisc mode for VF if it is not trusted any more. */
- if (!enable && vport->vf_info.promisc_enable) {
- en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
- ret = hclge_set_vport_promisc_mode(vport, false, false,
- en_bc_pmc);
- if (ret)
- return ret;
- vport->vf_info.promisc_enable = 0;
- hclge_inform_vf_promisc_info(vport);
- }
-
vport->vf_info.trusted = new_trusted;
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
return 0;
}
@@ -11716,8 +11962,15 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_ptp_init(hdev);
+ if (ret)
+ return ret;
+
/* Log and clear the hw errors those already occurred */
- hclge_handle_all_hns_hw_errors(ae_dev);
+ if (hnae3_dev_ras_imp_supported(hdev))
+ hclge_handle_occurred_error(hdev);
+ else
+ hclge_handle_all_hns_hw_errors(ae_dev);
/* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset.
@@ -11766,6 +12019,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_clear_vf_vlan(hdev);
hclge_misc_affinity_teardown(hdev);
hclge_state_uninit(hdev);
+ hclge_ptp_uninit(hdev);
hclge_uninit_rxd_adv_layout(hdev);
hclge_uninit_mac_table(hdev);
hclge_del_all_fd_entries(hdev);
@@ -12417,21 +12671,50 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
struct hnae3_handle *handle = &vport->nic;
u8 tmp_flags;
int ret;
+ u16 i;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
- set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
vport->last_promisc_flags = vport->overflow_promisc_flags;
}
- if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
+ if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
tmp_flags & HNAE3_MPE);
if (!ret) {
- clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
- hclge_enable_vlan_filter(handle,
- tmp_flags & HNAE3_VLAN_FLTR);
+ clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+ &vport->state);
+ }
+ }
+
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ bool uc_en = false;
+ bool mc_en = false;
+ bool bc_en;
+
+ vport = &hdev->vport[i];
+
+ if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state))
+ continue;
+
+ if (vport->vf_info.trusted) {
+ uc_en = vport->vf_info.request_uc_en > 0;
+ mc_en = vport->vf_info.request_mc_en > 0;
+ }
+ bc_en = vport->vf_info.request_bc_en > 0;
+
+ ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+ mc_en, bc_en);
+ if (ret) {
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ &vport->state);
+ return;
}
+ hclge_set_vport_vlan_fltr_change(vport);
}
}
@@ -12633,6 +12916,9 @@ static const struct hnae3_ae_ops hclge_ops = {
.cls_flower_active = hclge_is_cls_flower_active,
.get_phy_link_ksettings = hclge_get_phy_link_ksettings,
.set_phy_link_ksettings = hclge_set_phy_link_ksettings,
+ .set_tx_hwts_info = hclge_ptp_set_tx_info,
+ .get_rx_hwts = hclge_ptp_get_rx_hwts,
+ .get_ts_info = hclge_ptp_get_ts_info,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 4bdb0243a97a..3d3352491dba 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -10,6 +10,7 @@
#include <linux/kfifo.h>
#include "hclge_cmd.h"
+#include "hclge_ptp.h"
#include "hnae3.h"
#define HCLGE_MOD_VERSION "1.0"
@@ -178,6 +179,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_FUN_RST_ING_B 0
/* Vector0 register bits define */
+#define HCLGE_VECTOR0_REG_PTP_INT_B 0
#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
#define HCLGE_VECTOR0_CORERESET_INT_B 6
#define HCLGE_VECTOR0_IMPRESET_INT_B 7
@@ -190,6 +192,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
+#define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U
#define HCLGE_MAC_DEFAULT_FRAME \
(ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
@@ -221,14 +224,16 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_RST_HANDLING,
HCLGE_STATE_MBX_SERVICE_SCHED,
HCLGE_STATE_MBX_HANDLING,
+ HCLGE_STATE_ERR_SERVICE_SCHED,
HCLGE_STATE_STATISTICS_UPDATING,
HCLGE_STATE_CMD_DISABLE,
HCLGE_STATE_LINK_UPDATING,
- HCLGE_STATE_PROMISC_CHANGED,
HCLGE_STATE_RST_FAIL,
HCLGE_STATE_FD_TBL_CHANGED,
HCLGE_STATE_FD_CLEAR_ALL,
HCLGE_STATE_FD_USER_DEF_CHANGED,
+ HCLGE_STATE_PTP_EN,
+ HCLGE_STATE_PTP_TX_HANDLING,
HCLGE_STATE_MAX
};
@@ -236,6 +241,7 @@ enum hclge_evt_cause {
HCLGE_VECTOR0_EVENT_RST,
HCLGE_VECTOR0_EVENT_MBX,
HCLGE_VECTOR0_EVENT_ERR,
+ HCLGE_VECTOR0_EVENT_PTP,
HCLGE_VECTOR0_EVENT_OTHER,
};
@@ -322,6 +328,22 @@ enum hclge_fc_mode {
HCLGE_FC_DEFAULT
};
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+
+enum hclge_vlan_fltr_cap {
+ HCLGE_VLAN_FLTR_DEF,
+ HCLGE_VLAN_FLTR_CAN_MDF,
+};
enum hclge_link_fail_code {
HCLGE_LF_NORMAL,
HCLGE_LF_REF_CLOCK_LOST,
@@ -352,6 +374,7 @@ struct hclge_tc_info {
struct hclge_cfg {
u8 tc_num;
+ u8 vlan_fliter_cap;
u16 tqp_desc_num;
u16 rx_buf_len;
u16 vf_rss_size_max;
@@ -361,6 +384,7 @@ struct hclge_cfg {
u8 mac_addr[ETH_ALEN];
u8 default_speed;
u32 numa_node_map;
+ u32 tx_spare_buf_size;
u16 speed_ability;
u16 umv_space;
};
@@ -760,9 +784,14 @@ struct hclge_mac_tnl_stats {
struct hclge_vf_vlan_cfg {
u8 mbx_cmd;
u8 subcode;
- u8 is_kill;
- u16 vlan;
- u16 proto;
+ union {
+ struct {
+ u8 is_kill;
+ u16 vlan;
+ u16 proto;
+ };
+ u8 enable;
+ };
};
#pragma pack()
@@ -820,6 +849,7 @@ struct hclge_dev {
u16 alloc_rss_size; /* Allocated RSS task queue */
u16 vf_rss_size_max; /* HW defined VF max RSS task queue */
u16 pf_rss_size_max; /* HW defined PF max RSS task queue */
+ u32 tx_spare_buf_size; /* HW defined TX spare buffer size */
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
@@ -912,6 +942,7 @@ struct hclge_dev {
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
+ struct hclge_ptp *ptp;
};
/* VPort level vlan tag configuration for TX direction */
@@ -952,6 +983,8 @@ struct hclge_rss_tuple_cfg {
enum HCLGE_VPORT_STATE {
HCLGE_VPORT_STATE_ALIVE,
HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ HCLGE_VPORT_STATE_PROMISC_CHANGE,
+ HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
HCLGE_VPORT_STATE_MAX
};
@@ -972,7 +1005,9 @@ struct hclge_vf_info {
u32 spoofchk;
u32 max_tx_rate;
u32 trusted;
- u16 promisc_enable;
+ u8 request_uc_en;
+ u8 request_mc_en;
+ u8 request_bc_en;
};
struct hclge_vport {
@@ -991,6 +1026,8 @@ struct hclge_vport {
u32 bw_limit; /* VSI BW Limit (0 = disabled) */
u8 dwrr;
+ bool req_vlan_fltr_en;
+ bool cur_vlan_fltr_en;
unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
struct hclge_port_base_vlan_config port_base_vlan_cfg;
struct hclge_tx_vtag_cfg txvlan_cfg;
@@ -1082,8 +1119,8 @@ void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info);
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
- u16 state, u16 vlan_tag, u16 qos,
- u16 vlan_proto);
+ u16 state,
+ struct hclge_vlan_info *vlan_info);
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
struct hclge_desc *desc);
@@ -1092,4 +1129,5 @@ void hclge_report_hw_error(struct hclge_dev *hdev,
void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
int hclge_dbg_dump_rst_info(struct hclge_dev *hdev, char *buf, int len);
int hclge_push_vf_link_status(struct hclge_vport *vport);
+int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 8e5f9dc8791d..e10a2c36b706 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -231,19 +231,15 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
return ret;
}
-static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
- struct hclge_mbx_vf_to_pf_cmd *req)
+static void hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
{
- bool en_bc = req->msg.en_bc ? true : false;
- bool en_uc = req->msg.en_uc ? true : false;
- bool en_mc = req->msg.en_mc ? true : false;
struct hnae3_handle *handle = &vport->nic;
- int ret;
+ struct hclge_dev *hdev = vport->back;
- if (!vport->vf_info.trusted) {
- en_uc = false;
- en_mc = false;
- }
+ vport->vf_info.request_uc_en = req->msg.en_uc;
+ vport->vf_info.request_mc_en = req->msg.en_mc;
+ vport->vf_info.request_bc_en = req->msg.en_bc;
if (req->msg.en_limit_promisc)
set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags);
@@ -251,22 +247,8 @@ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
clear_bit(HNAE3_PFLAG_LIMIT_PROMISC,
&handle->priv_flags);
- ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
-
- vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
-
- return ret;
-}
-
-void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
-{
- u8 dest_vfid = (u8)vport->vport_id;
- u8 msg_data[2];
-
- memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
-
- hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
- HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
+ set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+ hclge_task_schedule(hdev, 0);
}
static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
@@ -336,17 +318,17 @@ static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
}
int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
- u16 state, u16 vlan_tag, u16 qos,
- u16 vlan_proto)
+ u16 state,
+ struct hclge_vlan_info *vlan_info)
{
#define MSG_DATA_SIZE 8
u8 msg_data[MSG_DATA_SIZE];
memcpy(&msg_data[0], &state, sizeof(u16));
- memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
- memcpy(&msg_data[4], &qos, sizeof(u16));
- memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
+ memcpy(&msg_data[2], &vlan_info->vlan_proto, sizeof(u16));
+ memcpy(&msg_data[4], &vlan_info->qos, sizeof(u16));
+ memcpy(&msg_data[6], &vlan_info->vlan_tag, sizeof(u16));
return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_PUSH_VLAN_INFO, vfid);
@@ -359,49 +341,35 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
#define HCLGE_MBX_VLAN_STATE_OFFSET 0
#define HCLGE_MBX_VLAN_INFO_OFFSET 2
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
struct hclge_vf_vlan_cfg *msg_cmd;
- int status = 0;
msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
- if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
- struct hnae3_handle *handle = &vport->nic;
- u16 vlan, proto;
- bool is_kill;
-
- is_kill = !!msg_cmd->is_kill;
- vlan = msg_cmd->vlan;
- proto = msg_cmd->proto;
- status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
- vlan, is_kill);
- } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
- struct hnae3_handle *handle = &vport->nic;
- bool en = msg_cmd->is_kill ? true : false;
-
- status = hclge_en_hw_strip_rxvtag(handle, en);
- } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
- struct hclge_vlan_info *vlan_info;
- u16 *state;
-
- state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET];
- vlan_info = (struct hclge_vlan_info *)
- &mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET];
- status = hclge_update_port_base_vlan_cfg(vport, *state,
- vlan_info);
- } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
+ switch (msg_cmd->subcode) {
+ case HCLGE_MBX_VLAN_FILTER:
+ return hclge_set_vlan_filter(handle,
+ cpu_to_be16(msg_cmd->proto),
+ msg_cmd->vlan, msg_cmd->is_kill);
+ case HCLGE_MBX_VLAN_RX_OFF_CFG:
+ return hclge_en_hw_strip_rxvtag(handle, msg_cmd->enable);
+ case HCLGE_MBX_GET_PORT_BASE_VLAN_STATE:
/* vf does not need to know about the port based VLAN state
* on device HNAE3_DEVICE_VERSION_V3. So always return disable
* on device HNAE3_DEVICE_VERSION_V3 if vf queries the port
* based VLAN state.
*/
resp_msg->data[0] =
- ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
+ hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3 ?
HNAE3_PORT_BASE_VLAN_DISABLE :
vport->port_base_vlan_cfg.state;
resp_msg->len = sizeof(u8);
+ return 0;
+ case HCLGE_MBX_ENABLE_VLAN_FILTER:
+ return hclge_enable_vport_vlan_filter(vport, msg_cmd->enable);
+ default:
+ return 0;
}
-
- return status;
}
static int hclge_set_vf_alive(struct hclge_vport *vport,
@@ -418,16 +386,23 @@ static int hclge_set_vf_alive(struct hclge_vport *vport,
return ret;
}
-static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
- struct hclge_respond_to_vf_msg *resp_msg)
+static void hclge_get_basic_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_ae_dev *ae_dev = vport->back->ae_dev;
+ struct hclge_basic_info *basic_info;
unsigned int i;
+ basic_info = (struct hclge_basic_info *)resp_msg->data;
for (i = 0; i < kinfo->tc_info.num_tc; i++)
- resp_msg->data[0] |= BIT(i);
+ basic_info->hw_tc_map |= BIT(i);
- resp_msg->len = sizeof(u8);
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ hnae3_set_bit(basic_info->pf_caps,
+ HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, 1);
+
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
}
static void hclge_get_vf_queue_info(struct hclge_vport *vport,
@@ -710,7 +685,6 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
unsigned int flag;
int ret = 0;
- memset(&resp_msg, 0, sizeof(resp_msg));
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
@@ -738,6 +712,9 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
trace_hclge_pf_mbx_get(hdev, req);
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+
switch (req->msg.code) {
case HCLGE_MBX_MAP_RING_TO_VECTOR:
ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
@@ -748,11 +725,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
req);
break;
case HCLGE_MBX_SET_PROMISC_MODE:
- ret = hclge_set_vf_promisc_mode(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF promisc mode\n",
- ret);
+ hclge_set_vf_promisc_mode(vport, req);
break;
case HCLGE_MBX_SET_UNICAST:
ret = hclge_set_vf_uc_mac_addr(vport, req);
@@ -788,8 +761,8 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_GET_QDEPTH:
hclge_get_vf_queue_depth(vport, &resp_msg);
break;
- case HCLGE_MBX_GET_TCINFO:
- hclge_get_vf_tcinfo(vport, &resp_msg);
+ case HCLGE_MBX_GET_BASIC_INFO:
+ hclge_get_basic_info(vport, &resp_msg);
break;
case HCLGE_MBX_GET_LINK_STATUS:
ret = hclge_push_vf_link_status(vport);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
new file mode 100644
index 000000000000..b3eb8f109dbb
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2021 Hisilicon Limited.
+
+#include <linux/skbuff.h>
+#include "hclge_main.h"
+#include "hnae3.h"
+
+static int hclge_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ u64 adj_val, adj_base, diff;
+ unsigned long flags;
+ bool is_neg = false;
+ u32 quo, numerator;
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ is_neg = true;
+ }
+
+ adj_base = HCLGE_PTP_CYCLE_ADJ_BASE * HCLGE_PTP_CYCLE_ADJ_UNIT;
+ adj_val = adj_base * ppb;
+ diff = div_u64(adj_val, 1000000000ULL);
+
+ if (is_neg)
+ adj_val = adj_base - diff;
+ else
+ adj_val = adj_base + diff;
+
+ /* This clock cycle is defined by three part: quotient, numerator
+ * and denominator. For example, 2.5ns, the quotient is 2,
+ * denominator is fixed to HCLGE_PTP_CYCLE_ADJ_UNIT, and numerator
+ * is 0.5 * HCLGE_PTP_CYCLE_ADJ_UNIT.
+ */
+ quo = div_u64_rem(adj_val, HCLGE_PTP_CYCLE_ADJ_UNIT, &numerator);
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(quo, hdev->ptp->io_base + HCLGE_PTP_CYCLE_QUO_REG);
+ writel(numerator, hdev->ptp->io_base + HCLGE_PTP_CYCLE_NUM_REG);
+ writel(HCLGE_PTP_CYCLE_ADJ_UNIT,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_DEN_REG);
+ writel(HCLGE_PTP_CYCLE_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_CYCLE_CFG_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!test_bit(HCLGE_PTP_FLAG_TX_EN, &ptp->flags) ||
+ test_and_set_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state)) {
+ ptp->tx_skipped++;
+ return false;
+ }
+
+ ptp->tx_start = jiffies;
+ ptp->tx_skb = skb_get(skb);
+ ptp->tx_cnt++;
+
+ return true;
+}
+
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev)
+{
+ struct sk_buff *skb = hdev->ptp->tx_skb;
+ struct skb_shared_hwtstamps hwts;
+ u32 hi, lo;
+ u64 ns;
+
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_NSEC_REG) &
+ HCLGE_PTP_TX_TS_NSEC_MASK;
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_L_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_TX_TS_SEC_H_REG) &
+ HCLGE_PTP_TX_TS_SEC_H_MASK;
+ hdev->ptp->last_tx_seqid = readl(hdev->ptp->io_base +
+ HCLGE_PTP_TX_TS_SEQID_REG);
+
+ if (skb) {
+ hdev->ptp->tx_skb = NULL;
+ hdev->ptp->tx_cleaned++;
+
+ ns += (((u64)hi) << 32 | lo) * NSEC_PER_SEC;
+ hwts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &hwts);
+ dev_kfree_skb_any(skb);
+ }
+
+ clear_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state);
+}
+
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ unsigned long flags;
+ u64 ns = nsec;
+ u32 sec_h;
+
+ if (!test_bit(HCLGE_PTP_FLAG_RX_EN, &hdev->ptp->flags))
+ return;
+
+ /* Since the BD does not have enough space for the higher 16 bits of
+ * second, and this part will not change frequently, so read it
+ * from register.
+ */
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ sec_h = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)sec_h) << HCLGE_PTP_SEC_H_OFFSET | sec) * NSEC_PER_SEC;
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ hdev->ptp->last_rx = jiffies;
+ hdev->ptp->rx_cnt++;
+}
+
+static int hclge_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ u32 hi, lo;
+ u64 ns;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ ns = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_NSEC_REG);
+ hi = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_H_REG);
+ lo = readl(hdev->ptp->io_base + HCLGE_PTP_CUR_TIME_SEC_L_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ ns += (((u64)hi) << HCLGE_PTP_SEC_H_OFFSET | lo) * NSEC_PER_SEC;
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int hclge_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(ts->tv_nsec, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(ts->tv_sec >> HCLGE_PTP_SEC_H_OFFSET,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_H_REG);
+ writel(ts->tv_sec & HCLGE_PTP_SEC_L_MASK,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SEC_L_REG);
+ /* synchronize the time of phc */
+ writel(HCLGE_PTP_TIME_SYNC_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_SYNC_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+static int hclge_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct hclge_dev *hdev = hclge_ptp_get_hdev(ptp);
+ unsigned long flags;
+ bool is_neg = false;
+ u32 adj_val = 0;
+
+ if (delta < 0) {
+ adj_val |= HCLGE_PTP_TIME_NSEC_NEG;
+ delta = -delta;
+ is_neg = true;
+ }
+
+ if (delta > HCLGE_PTP_TIME_NSEC_MASK) {
+ struct timespec64 ts;
+ s64 ns;
+
+ hclge_ptp_gettimex(ptp, &ts, NULL);
+ ns = timespec64_to_ns(&ts);
+ ns = is_neg ? ns - delta : ns + delta;
+ ts = ns_to_timespec64(ns);
+ return hclge_ptp_settime(ptp, &ts);
+ }
+
+ adj_val |= delta & HCLGE_PTP_TIME_NSEC_MASK;
+
+ spin_lock_irqsave(&hdev->ptp->lock, flags);
+ writel(adj_val, hdev->ptp->io_base + HCLGE_PTP_TIME_NSEC_REG);
+ writel(HCLGE_PTP_TIME_ADJ_EN,
+ hdev->ptp->io_base + HCLGE_PTP_TIME_ADJ_REG);
+ spin_unlock_irqrestore(&hdev->ptp->lock, flags);
+
+ return 0;
+}
+
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, &hdev->ptp->ts_cfg,
+ sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
+}
+
+static int hclge_ptp_int_en(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_ptp_int_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_int_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_INT_EN, false);
+ req->int_en = en ? 1 : 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to %s ptp interrupt, ret = %d\n",
+ en ? "enable" : "disable", ret);
+
+ return ret;
+}
+
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query ptp config, ret = %d\n", ret);
+ return ret;
+ }
+
+ *cfg = le32_to_cpu(req->cfg);
+
+ return 0;
+}
+
+static int hclge_ptp_cfg(struct hclge_dev *hdev, u32 cfg)
+{
+ struct hclge_ptp_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_ptp_cfg_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PTP_MODE_CFG, false);
+ req->cfg = cpu_to_le32(cfg);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to config ptp, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_ptp_set_tx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ switch (cfg->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ clear_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ break;
+ case HWTSTAMP_TX_ON:
+ set_bit(HCLGE_PTP_FLAG_TX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_TX_EN_B;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int hclge_ptp_set_rx_mode(struct hwtstamp_config *cfg,
+ unsigned long *flags, u32 *ptp_cfg)
+{
+ int rx_filter = cfg->rx_filter;
+
+ switch (cfg->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ clear_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ set_bit(HCLGE_PTP_FLAG_RX_EN, flags);
+ *ptp_cfg |= HCLGE_PTP_RX_EN_B;
+ *ptp_cfg |= HCLGE_PTP_UDP_FULL_TYPE << HCLGE_PTP_UDP_EN_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG1_V2_DEFAULT << HCLGE_PTP_MSG1_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG0_V2_EVENT << HCLGE_PTP_MSG0_SHIFT;
+ *ptp_cfg |= HCLGE_PTP_MSG_TYPE_V2 << HCLGE_PTP_MSG_TYPE_SHIFT;
+ rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ return -ERANGE;
+ }
+
+ cfg->rx_filter = rx_filter;
+
+ return 0;
+}
+
+static int hclge_ptp_set_ts_mode(struct hclge_dev *hdev,
+ struct hwtstamp_config *cfg)
+{
+ unsigned long flags = hdev->ptp->flags;
+ u32 ptp_cfg = 0;
+ int ret;
+
+ if (test_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags))
+ ptp_cfg |= HCLGE_PTP_EN_B;
+
+ ret = hclge_ptp_set_tx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_set_rx_mode(cfg, &flags, &ptp_cfg);
+ if (ret)
+ return ret;
+
+ ret = hclge_ptp_cfg(hdev, ptp_cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->flags = flags;
+ hdev->ptp->ptp_cfg = ptp_cfg;
+
+ return 0;
+}
+
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config cfg;
+ int ret;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ ret = hclge_ptp_set_ts_mode(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ hdev->ptp->ts_cfg = cfg;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state)) {
+ dev_err(&hdev->pdev->dev, "phc is unsupported\n");
+ return -EOPNOTSUPP;
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (hdev->ptp->clock)
+ info->phc_index = ptp_clock_index(hdev->ptp->clock);
+ else
+ info->phc_index = -1;
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ);
+
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ);
+
+ return 0;
+}
+
+static int hclge_ptp_create_clock(struct hclge_dev *hdev)
+{
+#define HCLGE_PTP_NAME_LEN 32
+
+ struct hclge_ptp *ptp;
+
+ ptp = devm_kzalloc(&hdev->pdev->dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp)
+ return -ENOMEM;
+
+ ptp->hdev = hdev;
+ snprintf(ptp->info.name, HCLGE_PTP_NAME_LEN, "%s",
+ HCLGE_DRIVER_NAME);
+ ptp->info.owner = THIS_MODULE;
+ ptp->info.max_adj = HCLGE_PTP_CYCLE_ADJ_MAX;
+ ptp->info.n_ext_ts = 0;
+ ptp->info.pps = 0;
+ ptp->info.adjfreq = hclge_ptp_adjfreq;
+ ptp->info.adjtime = hclge_ptp_adjtime;
+ ptp->info.gettimex64 = hclge_ptp_gettimex;
+ ptp->info.settime64 = hclge_ptp_settime;
+
+ ptp->info.n_alarm = 0;
+ ptp->clock = ptp_clock_register(&ptp->info, &hdev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ dev_err(&hdev->pdev->dev,
+ "%d failed to register ptp clock, ret = %ld\n",
+ ptp->info.n_alarm, PTR_ERR(ptp->clock));
+ return -ENODEV;
+ } else if (!ptp->clock) {
+ dev_err(&hdev->pdev->dev, "failed to register ptp clock\n");
+ return -ENODEV;
+ }
+
+ spin_lock_init(&ptp->lock);
+ ptp->io_base = hdev->hw.io_base + HCLGE_PTP_REG_OFFSET;
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+ hdev->ptp = ptp;
+
+ return 0;
+}
+
+static void hclge_ptp_destroy_clock(struct hclge_dev *hdev)
+{
+ ptp_clock_unregister(hdev->ptp->clock);
+ hdev->ptp->clock = NULL;
+ devm_kfree(&hdev->pdev->dev, hdev->ptp);
+ hdev->ptp = NULL;
+}
+
+int hclge_ptp_init(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct timespec64 ts;
+ int ret;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps))
+ return 0;
+
+ if (!hdev->ptp) {
+ ret = hclge_ptp_create_clock(hdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_ptp_int_en(hdev, true);
+ if (ret)
+ goto out;
+
+ set_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags);
+ ret = hclge_ptp_adjfreq(&hdev->ptp->info, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init freq, ret = %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts mode, ret = %d\n", ret);
+ goto out;
+ }
+
+ ktime_get_real_ts64(&ts);
+ ret = hclge_ptp_settime(&hdev->ptp->info, &ts);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init ts time, ret = %d\n", ret);
+ goto out;
+ }
+
+ set_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ dev_info(&hdev->pdev->dev, "phc initializes ok!\n");
+
+ return 0;
+
+out:
+ hclge_ptp_destroy_clock(hdev);
+
+ return ret;
+}
+
+void hclge_ptp_uninit(struct hclge_dev *hdev)
+{
+ struct hclge_ptp *ptp = hdev->ptp;
+
+ if (!ptp)
+ return;
+
+ hclge_ptp_int_en(hdev, false);
+ clear_bit(HCLGE_STATE_PTP_EN, &hdev->state);
+ clear_bit(HCLGE_PTP_FLAG_EN, &ptp->flags);
+ ptp->ts_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ ptp->ts_cfg.tx_type = HWTSTAMP_TX_OFF;
+
+ if (hclge_ptp_set_ts_mode(hdev, &ptp->ts_cfg))
+ dev_err(&hdev->pdev->dev, "failed to disable phc\n");
+
+ if (ptp->tx_skb) {
+ struct sk_buff *skb = ptp->tx_skb;
+
+ ptp->tx_skb = NULL;
+ dev_kfree_skb_any(skb);
+ }
+
+ hclge_ptp_destroy_clock(hdev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
new file mode 100644
index 000000000000..b3ca7afdaaa6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2021 Hisilicon Limited.
+
+#ifndef __HCLGE_PTP_H
+#define __HCLGE_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/types.h>
+
+#define HCLGE_PTP_REG_OFFSET 0x29000
+
+#define HCLGE_PTP_TX_TS_SEQID_REG 0x0
+#define HCLGE_PTP_TX_TS_NSEC_REG 0x4
+#define HCLGE_PTP_TX_TS_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TX_TS_SEC_L_REG 0x8
+#define HCLGE_PTP_TX_TS_SEC_H_REG 0xC
+#define HCLGE_PTP_TX_TS_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TX_TS_CNT_REG 0x30
+
+#define HCLGE_PTP_TIME_SEC_H_REG 0x50
+#define HCLGE_PTP_TIME_SEC_H_MASK GENMASK(15, 0)
+#define HCLGE_PTP_TIME_SEC_L_REG 0x54
+#define HCLGE_PTP_TIME_NSEC_REG 0x58
+#define HCLGE_PTP_TIME_NSEC_MASK GENMASK(29, 0)
+#define HCLGE_PTP_TIME_NSEC_NEG BIT(31)
+#define HCLGE_PTP_TIME_SYNC_REG 0x5C
+#define HCLGE_PTP_TIME_SYNC_EN BIT(0)
+#define HCLGE_PTP_TIME_ADJ_REG 0x60
+#define HCLGE_PTP_TIME_ADJ_EN BIT(0)
+#define HCLGE_PTP_CYCLE_QUO_REG 0x64
+#define HCLGE_PTP_CYCLE_DEN_REG 0x68
+#define HCLGE_PTP_CYCLE_NUM_REG 0x6C
+#define HCLGE_PTP_CYCLE_CFG_REG 0x70
+#define HCLGE_PTP_CYCLE_ADJ_EN BIT(0)
+#define HCLGE_PTP_CUR_TIME_SEC_H_REG 0x74
+#define HCLGE_PTP_CUR_TIME_SEC_L_REG 0x78
+#define HCLGE_PTP_CUR_TIME_NSEC_REG 0x7C
+
+#define HCLGE_PTP_CYCLE_ADJ_BASE 2
+#define HCLGE_PTP_CYCLE_ADJ_MAX 500000000
+#define HCLGE_PTP_CYCLE_ADJ_UNIT 100000000
+#define HCLGE_PTP_SEC_H_OFFSET 32u
+#define HCLGE_PTP_SEC_L_MASK GENMASK(31, 0)
+
+#define HCLGE_PTP_FLAG_EN BIT(0)
+#define HCLGE_PTP_FLAG_TX_EN BIT(1)
+#define HCLGE_PTP_FLAG_RX_EN BIT(2)
+
+struct hclge_ptp {
+ struct hclge_dev *hdev;
+ struct ptp_clock *clock;
+ struct sk_buff *tx_skb;
+ unsigned long flags;
+ void __iomem *io_base;
+ struct ptp_clock_info info;
+ struct hwtstamp_config ts_cfg;
+ spinlock_t lock; /* protects ptp registers */
+ u32 ptp_cfg;
+ u32 last_tx_seqid;
+ unsigned long tx_start;
+ unsigned long tx_cnt;
+ unsigned long tx_skipped;
+ unsigned long tx_cleaned;
+ unsigned long last_rx;
+ unsigned long rx_cnt;
+ unsigned long tx_timeout;
+};
+
+struct hclge_ptp_int_cmd {
+#define HCLGE_PTP_INT_EN_B BIT(0)
+
+ u8 int_en;
+ u8 rsvd[23];
+};
+
+enum hclge_ptp_udp_type {
+ HCLGE_PTP_UDP_NOT_TYPE,
+ HCLGE_PTP_UDP_P13F_TYPE,
+ HCLGE_PTP_UDP_P140_TYPE,
+ HCLGE_PTP_UDP_FULL_TYPE,
+};
+
+enum hclge_ptp_msg_type {
+ HCLGE_PTP_MSG_TYPE_V2_L2,
+ HCLGE_PTP_MSG_TYPE_V2,
+ HCLGE_PTP_MSG_TYPE_V2_EVENT,
+};
+
+enum hclge_ptp_msg0_type {
+ HCLGE_PTP_MSG0_V2_DELAY_REQ = 1,
+ HCLGE_PTP_MSG0_V2_PDELAY_REQ,
+ HCLGE_PTP_MSG0_V2_DELAY_RESP,
+ HCLGE_PTP_MSG0_V2_EVENT = 0xF,
+};
+
+#define HCLGE_PTP_MSG1_V2_DEFAULT 1
+
+struct hclge_ptp_cfg_cmd {
+#define HCLGE_PTP_EN_B BIT(0)
+#define HCLGE_PTP_TX_EN_B BIT(1)
+#define HCLGE_PTP_RX_EN_B BIT(2)
+#define HCLGE_PTP_UDP_EN_SHIFT 3
+#define HCLGE_PTP_UDP_EN_MASK GENMASK(4, 3)
+#define HCLGE_PTP_MSG_TYPE_SHIFT 8
+#define HCLGE_PTP_MSG_TYPE_MASK GENMASK(9, 8)
+#define HCLGE_PTP_MSG1_SHIFT 16
+#define HCLGE_PTP_MSG1_MASK GENMASK(19, 16)
+#define HCLGE_PTP_MSG0_SHIFT 24
+#define HCLGE_PTP_MSG0_MASK GENMASK(27, 24)
+
+ __le32 cfg;
+ u8 rsvd[20];
+};
+
+static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info)
+{
+ struct hclge_ptp *ptp = container_of(info, struct hclge_ptp, info);
+
+ return ptp->hdev;
+}
+
+bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb);
+void hclge_ptp_clean_tx_hwts(struct hclge_dev *dev);
+void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb,
+ u32 nsec, u32 sec);
+int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
+int hclge_ptp_init(struct hclge_dev *hdev);
+void hclge_ptp_uninit(struct hclge_dev *hdev);
+int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
+ struct ethtool_ts_info *info);
+int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7bef6b24e610..52eaf82b7cd7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -243,23 +243,31 @@ static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
}
}
-static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+static int hclgevf_get_basic_info(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ u8 resp_msg[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ struct hclge_basic_info *basic_info;
struct hclge_vf_to_pf_msg send_msg;
- u8 resp_msg;
+ unsigned long caps;
int status;
- hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
- status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_BASIC_INFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
sizeof(resp_msg));
if (status) {
dev_err(&hdev->pdev->dev,
- "VF request to get TC info from PF failed %d",
- status);
+ "failed to get basic info from pf, ret = %d", status);
return status;
}
- hdev->hw_tc_map = resp_msg;
+ basic_info = (struct hclge_basic_info *)resp_msg;
+
+ hdev->hw_tc_map = basic_info->hw_tc_map;
+ hdev->mbx_api_version = basic_info->mbx_api_version;
+ caps = basic_info->pf_caps;
+ if (test_bit(HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B, &caps))
+ set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
return 0;
}
@@ -1528,8 +1536,7 @@ static void hclgevf_sync_from_add_list(struct list_head *add_list,
kfree(mac_node);
} else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
mac_node->state = HCLGEVF_MAC_TO_DEL;
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
} else {
list_del(&mac_node->node);
kfree(mac_node);
@@ -1554,8 +1561,7 @@ static void hclgevf_sync_from_del_list(struct list_head *del_list,
list_del(&mac_node->node);
kfree(mac_node);
} else {
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, mac_list);
+ list_move_tail(&mac_node->node, mac_list);
}
}
}
@@ -1591,8 +1597,7 @@ static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
list_for_each_entry_safe(mac_node, tmp, list, node) {
switch (mac_node->state) {
case HCLGEVF_MAC_TO_DEL:
- list_del(&mac_node->node);
- list_add_tail(&mac_node->node, &tmp_del_list);
+ list_move_tail(&mac_node->node, &tmp_del_list);
break;
case HCLGEVF_MAC_TO_ADD:
new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
@@ -1642,6 +1647,22 @@ static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
spin_unlock_bh(&hdev->mac_table.mac_list_lock);
}
+static int hclgevf_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct hclge_vf_to_pf_msg send_msg;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_ENABLE_VLAN_FILTER);
+ send_msg.data[0] = enable ? 1 : 0;
+
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+}
+
static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
__be16 proto, u16 vlan_id,
bool is_kill)
@@ -2466,6 +2487,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
{
int ret;
+ ret = hclgevf_get_basic_info(hdev);
+ if (ret)
+ return ret;
+
/* get current port based vlan state from PF */
ret = hclgevf_get_port_base_vlan_filter_state(hdev);
if (ret)
@@ -2481,12 +2506,7 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_get_pf_media_type(hdev);
- if (ret)
- return ret;
-
- /* get tc configuration from PF */
- return hclgevf_get_tc_info(hdev);
+ return hclgevf_get_pf_media_type(hdev);
}
static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
@@ -3801,6 +3821,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_tc_size = hclgevf_get_tc_size,
.get_fw_version = hclgevf_get_fw_version,
.set_vlan_filter = hclgevf_set_vlan_filter,
+ .enable_vlan_filter = hclgevf_enable_vlan_filter,
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
.set_default_reset_request = hclgevf_set_def_reset_request,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index b146d04526de..d7d02848d674 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -285,6 +285,7 @@ struct hclgevf_dev {
struct semaphore reset_sem; /* protect reset process */
u32 fw_version;
+ u16 mbx_api_version;
u16 num_tqps; /* num task queue pairs of this VF */
u16 alloc_rss_size; /* allocated RSS task queue */
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 8fddce769c14..d5df131b183c 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -2618,10 +2618,8 @@ static int ehea_restart_qps(struct net_device *dev)
u16 dummy16 = 0;
cb0 = (void *)get_zeroed_page(GFP_KERNEL);
- if (!cb0) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!cb0)
+ return -ENOMEM;
for (i = 0; i < (port->num_def_qps); i++) {
struct ehea_port_res *pr = &port->port_res[i];
@@ -2641,6 +2639,7 @@ static int ehea_restart_qps(struct net_device *dev)
cb0);
if (hret != H_SUCCESS) {
netdev_err(dev, "query_ehea_qp failed (1)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2653,6 +2652,7 @@ static int ehea_restart_qps(struct net_device *dev)
&dummy64, &dummy16, &dummy16);
if (hret != H_SUCCESS) {
netdev_err(dev, "modify_ehea_qp failed (1)\n");
+ ret = -EFAULT;
goto out;
}
@@ -2661,6 +2661,7 @@ static int ehea_restart_qps(struct net_device *dev)
cb0);
if (hret != H_SUCCESS) {
netdev_err(dev, "query_ehea_qp failed (2)\n");
+ ret = -EFAULT;
goto out;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4d439413f6d9..2d8804ebdf96 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -95,7 +95,7 @@ static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
static void send_query_map(struct ibmvnic_adapter *adapter);
-static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
+static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
static int send_request_unmap(struct ibmvnic_adapter *, u8);
static int send_login(struct ibmvnic_adapter *adapter);
static void send_query_cap(struct ibmvnic_adapter *adapter);
@@ -141,6 +141,29 @@ static const struct ibmvnic_stat ibmvnic_stats[] = {
{"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
};
+static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
+ crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
+static int send_version_xchg(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.version_exchange.first = IBMVNIC_CRQ_CMD;
+ crq.version_exchange.cmd = VERSION_EXCHANGE;
+ crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
+
+ return ibmvnic_send_crq(adapter, &crq);
+}
+
static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
unsigned long length, unsigned long *number,
unsigned long *irq)
@@ -846,6 +869,8 @@ static const char *adapter_state_to_string(enum vnic_state state)
return "REMOVING";
case VNIC_REMOVED:
return "REMOVED";
+ case VNIC_DOWN:
+ return "DOWN";
}
return "UNKNOWN";
}
@@ -1501,7 +1526,8 @@ static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
/**
* build_hdr_descs_arr - build a header descriptor array
- * @txbuff: tx buffer
+ * @skb: tx socket buffer
+ * @indir_arr: indirect array
* @num_entries: number of descriptors to be sent
* @hdr_field: bit field determining which headers will be sent
*
@@ -1945,6 +1971,8 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
return "TIMEOUT";
case VNIC_RESET_CHANGE_PARAM:
return "CHANGE_PARAM";
+ case VNIC_RESET_PASSIVE_INIT:
+ return "PASSIVE_INIT";
}
return "UNKNOWN";
}
@@ -2083,10 +2111,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- /* If the adapter was in PROBE state prior to the reset,
+ /* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
*/
- if (reset_state == VNIC_PROBED) {
+ if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
rc = 0;
goto out;
}
@@ -2212,10 +2240,10 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
if (rc)
goto out;
- /* If the adapter was in PROBE state prior to the reset,
+ /* If the adapter was in PROBE or DOWN state prior to the reset,
* exit here.
*/
- if (reset_state == VNIC_PROBED)
+ if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
goto out;
rc = ibmvnic_login(netdev);
@@ -2268,6 +2296,76 @@ static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
return rwi;
}
+/**
+ * do_passive_init - complete probing when partner device is detected.
+ * @adapter: ibmvnic_adapter struct
+ *
+ * If the ibmvnic device does not have a partner device to communicate with at boot
+ * and that partner device comes online at a later time, this function is called
+ * to complete the initialization process of ibmvnic device.
+ * Caller is expected to hold rtnl_lock().
+ *
+ * Returns non-zero if sub-CRQs are not initialized properly leaving the device
+ * in the down state.
+ * Returns 0 upon success and the device is in PROBED state.
+ */
+
+static int do_passive_init(struct ibmvnic_adapter *adapter)
+{
+ unsigned long timeout = msecs_to_jiffies(30000);
+ struct net_device *netdev = adapter->netdev;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ netdev_dbg(netdev, "Partner device found, probing.\n");
+
+ adapter->state = VNIC_PROBING;
+ reinit_completion(&adapter->init_done);
+ adapter->init_done_rc = 0;
+ adapter->crq.active = true;
+
+ rc = send_crq_init_complete(adapter);
+ if (rc)
+ goto out;
+
+ rc = send_version_xchg(adapter);
+ if (rc)
+ netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
+
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Initialization sequence timed out\n");
+ rc = -ETIMEDOUT;
+ goto out;
+ }
+
+ rc = init_sub_crqs(adapter);
+ if (rc) {
+ dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
+ goto out;
+ }
+
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
+ goto init_failed;
+ }
+
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
+
+ adapter->state = VNIC_PROBED;
+ netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
+
+ return 0;
+
+init_failed:
+ release_sub_crqs(adapter, 1);
+out:
+ adapter->state = VNIC_DOWN;
+ return rc;
+}
+
static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
@@ -2304,7 +2402,13 @@ static void __ibmvnic_reset(struct work_struct *work)
}
spin_unlock_irqrestore(&adapter->state_lock, flags);
- if (adapter->force_reset_recovery) {
+ if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
+ rtnl_lock();
+ rc = do_passive_init(adapter);
+ rtnl_unlock();
+ if (!rc)
+ netif_carrier_on(adapter->netdev);
+ } else if (adapter->force_reset_recovery) {
/* Since we are doing a hard reset now, clear the
* failover_pending flag so we don't ignore any
* future MOBILITY or other resets.
@@ -2400,8 +2504,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
goto err;
}
- list_for_each(entry, &adapter->rwi_list) {
- tmp = list_entry(entry, struct ibmvnic_rwi, list);
+ list_for_each_entry(tmp, &adapter->rwi_list, list) {
if (tmp->reset_reason == reason) {
netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
reset_reason_to_string(reason));
@@ -3774,18 +3877,6 @@ static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
return 0;
}
-static int send_version_xchg(struct ibmvnic_adapter *adapter)
-{
- union ibmvnic_crq crq;
-
- memset(&crq, 0, sizeof(crq));
- crq.version_exchange.first = IBMVNIC_CRQ_CMD;
- crq.version_exchange.cmd = VERSION_EXCHANGE;
- crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
-
- return ibmvnic_send_crq(adapter, &crq);
-}
-
struct vnic_login_client_data {
u8 type;
__be16 len;
@@ -4299,7 +4390,7 @@ static void handle_vpd_rsp(union ibmvnic_crq *crq,
complete:
if (adapter->fw_version[0] == '\0')
- strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
+ strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
complete(&adapter->fw_done);
}
@@ -4905,7 +4996,12 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+
+ if (adapter->state == VNIC_DOWN)
+ rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
+ else
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+
if (rc && rc != -EBUSY) {
/* We were unable to schedule the failover
* reset either because the adapter was still
@@ -5328,6 +5424,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
+ bool init_success;
int rc;
dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
@@ -5374,6 +5471,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
+ init_success = false;
do {
rc = init_crq_queue(adapter);
if (rc) {
@@ -5383,10 +5481,16 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
rc = ibmvnic_reset_init(adapter, false);
- if (rc && rc != EAGAIN)
- goto ibmvnic_init_fail;
} while (rc == EAGAIN);
+ /* We are ignoring the error from ibmvnic_reset_init() assuming that the
+ * partner is not ready. CRQ is not active. When the partner becomes
+ * ready, we will do the passive init reset.
+ */
+
+ if (!rc)
+ init_success = true;
+
rc = init_stats_buffers(adapter);
if (rc)
goto ibmvnic_init_fail;
@@ -5395,10 +5499,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (rc)
goto ibmvnic_stats_fail;
- netdev->mtu = adapter->req_mtu - ETH_HLEN;
- netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
- netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
-
rc = device_create_file(&dev->dev, &dev_attr_failover);
if (rc)
goto ibmvnic_dev_file_err;
@@ -5411,7 +5511,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
dev_info(&dev->dev, "ibmvnic registered\n");
- adapter->state = VNIC_PROBED;
+ if (init_success) {
+ adapter->state = VNIC_PROBED;
+ netdev->mtu = adapter->req_mtu - ETH_HLEN;
+ netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
+ netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
+ } else {
+ adapter->state = VNIC_DOWN;
+ }
adapter->wait_for_reset = false;
adapter->last_reset_time = jiffies;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index c1d39a748546..22df602323bc 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -851,14 +851,16 @@ enum vnic_state {VNIC_PROBING = 1,
VNIC_CLOSING,
VNIC_CLOSED,
VNIC_REMOVING,
- VNIC_REMOVED};
+ VNIC_REMOVED,
+ VNIC_DOWN};
enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
VNIC_RESET_MOBILITY,
VNIC_RESET_FATAL,
VNIC_RESET_NON_FATAL,
VNIC_RESET_TIMEOUT,
- VNIC_RESET_CHANGE_PARAM};
+ VNIC_RESET_CHANGE_PARAM,
+ VNIC_RESET_PASSIVE_INIT};
struct ibmvnic_rwi {
enum ibmvnic_reset_reason reset_reason;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index c1d155690341..82744a7501c7 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -241,6 +241,7 @@ config I40E
tristate "Intel(R) Ethernet Controller XL710 Family support"
imply PTP_1588_CLOCK
depends on PCI
+ select AUXILIARY_BUS
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
@@ -294,9 +295,11 @@ config ICE
tristate "Intel(R) Ethernet Connection E800 Series Support"
default n
depends on PCI_MSI
+ select AUXILIARY_BUS
select DIMLIB
select NET_DEVLINK
select PLDMFW
+ imply PTP_1588_CLOCK
help
This driver supports Intel(R) Ethernet Connection E800 Series of
devices. For more information on how to identify your adapter, go
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index f8d78af76d7d..1b0958bd24f6 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1395,7 +1395,7 @@ static int e100_phy_check_without_mii(struct nic *nic)
u8 phy_type;
int without_mii;
- phy_type = (nic->eeprom[eeprom_phy_iface] >> 8) & 0x0f;
+ phy_type = (le16_to_cpu(nic->eeprom[eeprom_phy_iface]) >> 8) & 0x0f;
switch (phy_type) {
case NoSuchPhy: /* Non-MII PHY; UNTESTED! */
@@ -1515,7 +1515,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (le16_to_cpu(nic->eeprom[eeprom_cnfg_mdix]) & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
@@ -2269,9 +2269,9 @@ static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
return (nic->pdev->device >= 0x1050) && (nic->pdev->device <= 0x1057) &&
- (nic->eeprom[eeprom_config_asf] & eeprom_asf) &&
- !(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
- ((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE);
+ (le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_asf) &&
+ !(le16_to_cpu(nic->eeprom[eeprom_config_asf]) & eeprom_gcl) &&
+ ((le16_to_cpu(nic->eeprom[eeprom_smbus_addr]) & 0xFF) != 0xFE);
}
static int e100_up(struct nic *nic)
@@ -2926,7 +2926,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Wol magic packet can be enabled from eeprom */
if ((nic->mac >= mac_82558_D101_A4) &&
- (nic->eeprom[eeprom_id] & eeprom_id_wol)) {
+ (le16_to_cpu(nic->eeprom[eeprom_id]) & eeprom_id_wol)) {
nic->flags |= wol_magic;
device_set_wakeup_enable(&pdev->dev, true);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index f976e9daa3d8..3c51ee94fa00 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -513,7 +513,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = e1000_write_eeprom(hw, first_word,
last_word - first_word + 1, eeprom_buff);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 9e3103fae723..dbcae92bb18d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1370,7 +1370,6 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
struct fm10k_hw *hw = &interface->hw;
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 eicr;
- s32 err = 0;
/* unmask any set bits related to this interrupt */
eicr = fm10k_read_reg(hw, FM10K_EICR);
@@ -1386,15 +1385,16 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* service mailboxes */
if (fm10k_mbx_trylock(interface)) {
- err = mbx->ops.process(hw, mbx);
+ s32 err = mbx->ops.process(hw, mbx);
+
+ if (err == FM10K_ERR_RESET_REQUESTED)
+ set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
+
/* handle VFLRE events */
fm10k_iov_event(interface);
fm10k_mbx_unlock(interface);
}
- if (err == FM10K_ERR_RESET_REQUESTED)
- set_bit(FM10K_FLAG_RESET_REQUESTED, interface->flags);
-
/* if switch toggled state we should reset GLORTs */
if (eicr & FM10K_EICR_SWITCHNOTREADY) {
/* force link down for at least 4 seconds */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 85d3dd3a3339..b9417dc0007c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -870,6 +870,8 @@ struct i40e_netdev_priv {
struct i40e_vsi *vsi;
};
+extern struct ida i40e_client_ida;
+
/* struct that defines an interrupt vector */
struct i40e_q_vector {
struct i40e_vsi *vsi;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 32f3facbed1a..e07ed065d3a4 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -12,6 +12,7 @@ static const char i40e_client_interface_version_str[] = I40E_CLIENT_VERSION_STR;
static struct i40e_client *registered_client;
static LIST_HEAD(i40e_devices);
static DEFINE_MUTEX(i40e_device_mutex);
+DEFINE_IDA(i40e_client_ida);
static int i40e_client_virtchnl_send(struct i40e_info *ldev,
struct i40e_client *client,
@@ -275,6 +276,57 @@ void i40e_client_update_msix_info(struct i40e_pf *pf)
cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
}
+static void i40e_auxiliary_dev_release(struct device *dev)
+{
+ struct i40e_auxiliary_device *i40e_aux_dev =
+ container_of(dev, struct i40e_auxiliary_device, aux_dev.dev);
+
+ ida_free(&i40e_client_ida, i40e_aux_dev->aux_dev.id);
+ kfree(i40e_aux_dev);
+}
+
+static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
+{
+ struct i40e_auxiliary_device *i40e_aux_dev;
+ struct pci_dev *pdev = ldev->pcidev;
+ struct auxiliary_device *aux_dev;
+ int ret;
+
+ i40e_aux_dev = kzalloc(sizeof(*i40e_aux_dev), GFP_KERNEL);
+ if (!i40e_aux_dev)
+ return -ENOMEM;
+
+ i40e_aux_dev->ldev = ldev;
+
+ aux_dev = &i40e_aux_dev->aux_dev;
+ aux_dev->name = name;
+ aux_dev->dev.parent = &pdev->dev;
+ aux_dev->dev.release = i40e_auxiliary_dev_release;
+ ldev->aux_dev = aux_dev;
+
+ ret = ida_alloc(&i40e_client_ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(i40e_aux_dev);
+ return ret;
+ }
+ aux_dev->id = ret;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret < 0) {
+ ida_free(&i40e_client_ida, aux_dev->id);
+ kfree(i40e_aux_dev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret) {
+ auxiliary_device_uninit(aux_dev);
+ return ret;
+ }
+
+ return ret;
+}
+
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
@@ -286,9 +338,6 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
- if (!registered_client || pf->cinst)
- return;
-
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return;
@@ -308,11 +357,8 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
cdev->lan_info.fw_build = pf->hw.aq.fw_build;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cdev->state);
- if (i40e_client_get_params(vsi, &cdev->lan_info.params)) {
- kfree(cdev);
- cdev = NULL;
- return;
- }
+ if (i40e_client_get_params(vsi, &cdev->lan_info.params))
+ goto free_cdev;
mac = list_first_entry(&cdev->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
@@ -324,7 +370,17 @@ static void i40e_client_add_instance(struct i40e_pf *pf)
cdev->client = registered_client;
pf->cinst = cdev;
- i40e_client_update_msix_info(pf);
+ cdev->lan_info.msix_count = pf->num_iwarp_msix;
+ cdev->lan_info.msix_entries = &pf->msix_entries[pf->iwarp_base_vector];
+
+ if (i40e_register_auxiliary_dev(&cdev->lan_info, "iwarp"))
+ goto free_cdev;
+
+ return;
+
+free_cdev:
+ kfree(cdev);
+ pf->cinst = NULL;
}
/**
@@ -345,7 +401,7 @@ void i40e_client_del_instance(struct i40e_pf *pf)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
- struct i40e_client *client = registered_client;
+ struct i40e_client *client;
struct i40e_client_instance *cdev;
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
int ret = 0;
@@ -359,9 +415,11 @@ void i40e_client_subtask(struct i40e_pf *pf)
test_bit(__I40E_CONFIG_BUSY, pf->state))
return;
- if (!client || !cdev)
+ if (!cdev || !cdev->client)
return;
+ client = cdev->client;
+
/* Here we handle client opens. If the client is down, and
* the netdev is registered, then open the client.
*/
@@ -423,16 +481,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.bus_id,
pf->hw.bus.device, pf->hw.bus.func);
- /* If a client has already been registered, we need to add an instance
- * of it to our new LAN device.
- */
- if (registered_client)
- i40e_client_add_instance(pf);
+ i40e_client_add_instance(pf);
- /* Since in some cases register may have happened before a device gets
- * added, we can schedule a subtask to go initiate the clients if
- * they can be launched at probe time.
- */
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
i40e_service_event_schedule(pf);
@@ -449,9 +499,13 @@ out:
**/
int i40e_lan_del_device(struct i40e_pf *pf)
{
+ struct auxiliary_device *aux_dev = pf->cinst->lan_info.aux_dev;
struct i40e_device *ldev, *tmp;
int ret = -ENODEV;
+ auxiliary_device_delete(aux_dev);
+ auxiliary_device_uninit(aux_dev);
+
/* First, remove any client instance. */
i40e_client_del_instance(pf);
@@ -579,7 +633,7 @@ static int i40e_client_setup_qvlist(struct i40e_info *ldev,
u32 v_idx, i, reg_idx, reg;
ldev->qvlist_info = kzalloc(struct_size(ldev->qvlist_info, qv_info,
- qvlist_info->num_vectors - 1), GFP_KERNEL);
+ qvlist_info->num_vectors), GFP_KERNEL);
if (!ldev->qvlist_info)
return -ENOMEM;
ldev->qvlist_info->num_vectors = qvlist_info->num_vectors;
@@ -732,6 +786,42 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
return err;
}
+void i40e_client_device_register(struct i40e_info *ldev, struct i40e_client *client)
+{
+ struct i40e_pf *pf = ldev->pf;
+
+ pf->cinst->client = client;
+ set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+ i40e_service_event_schedule(pf);
+}
+EXPORT_SYMBOL_GPL(i40e_client_device_register);
+
+void i40e_client_device_unregister(struct i40e_info *ldev)
+{
+ struct i40e_pf *pf = ldev->pf;
+ struct i40e_client_instance *cdev = pf->cinst;
+
+ if (!cdev)
+ return;
+
+ while (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state))
+ usleep_range(500, 1000);
+
+ if (test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ cdev->client->ops->close(&cdev->lan_info, cdev->client, false);
+ clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
+ i40e_client_release_qvlist(&cdev->lan_info);
+ }
+
+ pf->cinst->client = NULL;
+ clear_bit(__I40E_SERVICE_SCHED, pf->state);
+}
+EXPORT_SYMBOL_GPL(i40e_client_device_unregister);
+
+/* Retain these legacy global registration/unregistration calls till i40iw is
+ * removed from the kernel. The irdma unified driver does not use these
+ * exported symbols.
+ */
/**
* i40e_register_client - Register a i40e client driver with the L2 driver
* @client: pointer to the i40e_client struct
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 704e474879c5..9db1968fc491 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -16270,6 +16270,7 @@ static void __exit i40e_exit_module(void)
{
pci_unregister_driver(&i40e_driver);
destroy_workqueue(i40e_wq);
+ ida_destroy(&i40e_client_ida);
i40e_dbg_exit();
}
module_exit(i40e_exit_module);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index de70c16ef619..b883ab809df3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = I40E_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 46d884417c63..68f177a86403 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -162,9 +162,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return I40E_XDP_REDIR;
}
switch (act) {
@@ -173,11 +174,14 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
+ if (result == I40E_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 07fe857e9e3a..4f538cdf42c1 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -22,12 +22,14 @@ ice-y := ice_main.o \
ice_ethtool_fdir.o \
ice_flex_pipe.o \
ice_flow.o \
+ ice_idc.o \
ice_devlink.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index e35db3ff583b..a450343fbb92 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,6 +34,7 @@
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/bpf.h>
+#include <linux/auxiliary_bus.h>
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
@@ -55,8 +56,10 @@
#include "ice_switch.h"
#include "ice_common.h"
#include "ice_sched.h"
+#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
#include "ice_sriov.h"
+#include "ice_ptp.h"
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
@@ -72,12 +75,15 @@
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
-#define ICE_AQ_LEN 64
+#define ICE_AQ_LEN 192
#define ICE_MBXSQ_LEN 64
+#define ICE_SBQ_LEN 64
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
#define ICE_FDIR_MSIX 2
+#define ICE_RDMA_NUM_AEQ_MSIX 4
+#define ICE_MIN_RDMA_MSIX 2
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -88,8 +94,9 @@
#define ICE_MAX_LG_RSS_QS 256
#define ICE_RES_VALID_BIT 0x8000
#define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
+#define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
/* All VF control VSIs share the same IRQ, so assign a unique ID for them */
-#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
+#define ICE_RES_VF_CTRL_VEC_ID (ICE_RES_RDMA_VEC_ID - 1)
#define ICE_INVAL_Q_INDEX 0xffff
#define ICE_INVAL_VFID 256
@@ -203,9 +210,9 @@ enum ice_pf_state {
ICE_NEEDS_RESTART,
ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
- ICE_PFR_REQ, /* set by driver and peers */
- ICE_CORER_REQ, /* set by driver and peers */
- ICE_GLOBR_REQ, /* set by driver and peers */
+ ICE_PFR_REQ, /* set by driver */
+ ICE_CORER_REQ, /* set by driver */
+ ICE_GLOBR_REQ, /* set by driver */
ICE_CORER_RECV, /* set by OICR handler */
ICE_GLOBR_RECV, /* set by OICR handler */
ICE_EMPR_RECV, /* set by OICR handler */
@@ -222,6 +229,7 @@ enum ice_pf_state {
ICE_STATE_NOMINAL_CHECK_BITS,
ICE_ADMINQ_EVENT_PENDING,
ICE_MAILBOXQ_EVENT_PENDING,
+ ICE_SIDEBANDQ_EVENT_PENDING,
ICE_MDD_EVENT_PENDING,
ICE_VFLR_EVENT_PENDING,
ICE_FLTR_OVERFLOW_PROMISC,
@@ -332,9 +340,11 @@ struct ice_vsi {
u16 req_rxq; /* User requested Rx queues */
u16 num_rx_desc;
u16 num_tx_desc;
+ u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_ring **xdp_rings; /* XDP ring array */
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -373,17 +383,22 @@ struct ice_q_vector {
enum ice_pf_flags {
ICE_FLAG_FLTR_SYNC,
+ ICE_FLAG_RDMA_ENA,
ICE_FLAG_RSS_ENA,
ICE_FLAG_SRIOV_ENA,
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
ICE_FLAG_FD_ENA,
+ ICE_FLAG_PTP_SUPPORTED, /* PTP is supported by NVM */
+ ICE_FLAG_PTP, /* PTP is enabled by software */
+ ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
+ ICE_FLAG_MOD_POWER_UNSUPPORTED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
@@ -439,12 +454,17 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
+ struct ice_ptp ptp;
+ u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
+ u16 rdma_base_vector;
/* spinlock to protect the AdminQ wait list */
spinlock_t aq_wait_lock;
struct hlist_head aq_wait_list;
wait_queue_head_t aq_wait_queue;
+ wait_queue_head_t reset_wait_queue;
+
u32 hw_csum_rx_error;
u16 oicr_idx; /* Other interrupt cause MSIX vector index */
u16 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
@@ -471,6 +491,8 @@ struct ice_pf {
unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level;
char int_name[ICE_INT_NAME_STR_LEN];
+ struct auxiliary_device *adev;
+ int aux_idx;
u32 sw_int_count;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
@@ -547,15 +569,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
*/
static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
+ struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
- qid -= ring->vsi->num_xdp_txq;
+ qid -= vsi->num_xdp_txq;
- if (!ice_is_xdp_ena_vsi(ring->vsi))
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
- return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
}
/**
@@ -636,6 +659,9 @@ int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+int ice_plug_aux_dev(struct ice_pf *pf);
+void ice_unplug_aux_dev(struct ice_pf *pf);
+int ice_init_rdma(struct ice_pf *pf);
const char *ice_stat_str(enum ice_status stat_err);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
@@ -660,4 +686,25 @@ int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+/**
+ * ice_set_rdma_cap - enable RDMA support
+ * @pf: PF struct
+ */
+static inline void ice_set_rdma_cap(struct ice_pf *pf)
+{
+ if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
+ set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ ice_plug_aux_dev(pf);
+ }
+}
+
+/**
+ * ice_clear_rdma_cap - disable RDMA support
+ * @pf: PF struct
+ */
+static inline void ice_clear_rdma_cap(struct ice_pf *pf)
+{
+ ice_unplug_aux_dev(pf);
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+}
#endif /* _ICE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 5cdfe406af84..21b4c7cd6f05 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -108,6 +108,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_TXQS 0x0042
#define ICE_AQC_CAPS_MSIX 0x0043
#define ICE_AQC_CAPS_FD 0x0045
+#define ICE_AQC_CAPS_1588 0x0046
#define ICE_AQC_CAPS_MAX_MTU 0x0047
#define ICE_AQC_CAPS_NVM_VER 0x0048
#define ICE_AQC_CAPS_PENDING_NVM_VER 0x0049
@@ -115,6 +116,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PENDING_OROM_VER 0x004B
#define ICE_AQC_CAPS_NET_VER 0x004C
#define ICE_AQC_CAPS_PENDING_NET_VER 0x004D
+#define ICE_AQC_CAPS_RDMA 0x0051
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
u8 major_ver;
@@ -1122,7 +1124,9 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_UNDRUTIL_PRT BIT(5)
#define ICE_AQ_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
- u8 reserved1;
+ u8 link_cfg_err;
+#define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
#define ICE_AQ_LINK_FAULT BIT(1)
@@ -1165,7 +1169,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_CFG_PACING_TYPE_FIXED ICE_AQ_CFG_PACING_TYPE_M
/* External Device Power Ability */
u8 power_desc;
-#define ICE_AQ_PWR_CLASS_M 0x3
+#define ICE_AQ_PWR_CLASS_M 0x3F
#define ICE_AQ_LINK_PWR_BASET_LOW_HIGH 0
#define ICE_AQ_LINK_PWR_BASET_HIGH 1
#define ICE_AQ_LINK_PWR_QSFP_CLASS_1 0
@@ -1608,6 +1612,15 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
+/* Sideband Control Interface Commands */
+/* Neighbor Device Request (indirect 0x0C00); also used for the response. */
+struct ice_aqc_neigh_dev_req {
+ __le16 sb_data_len;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
@@ -1684,6 +1697,36 @@ struct ice_aqc_dis_txq_item {
__le16 q_id[];
} __packed;
+/* Add Tx RDMA Queue Set (indirect 0x0C33) */
+struct ice_aqc_add_rdma_qset {
+ u8 num_qset_grps;
+ u8 reserved[7];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* This is the descriptor of each Qset entry for the Add Tx RDMA Queue Set
+ * command (0x0C33). Only used within struct ice_aqc_add_rdma_qset.
+ */
+struct ice_aqc_add_tx_rdma_qset_entry {
+ __le16 tx_qset_id;
+ u8 rsvd[2];
+ __le32 qset_teid;
+ struct ice_aqc_txsched_elem info;
+};
+
+/* The format of the command buffer for Add Tx RDMA Queue Set(0x0C33)
+ * is an array of the following structs. Please note that the length of
+ * each struct ice_aqc_add_rdma_qset is variable due to the variable
+ * number of queues in each group!
+ */
+struct ice_aqc_add_rdma_qset_data {
+ __le32 parent_teid;
+ __le16 num_qsets;
+ u8 rsvd[2];
+ struct ice_aqc_add_tx_rdma_qset_entry rdma_qsets[];
+};
+
/* Configure Firmware Logging Command (indirect 0xFF09)
* Logging Information Read Response (indirect 0xFF10)
* Note: The 0xFF10 command has no input parameters.
@@ -1810,6 +1853,30 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+/* Driver Shared Parameters (direct, 0x0C90) */
+struct ice_aqc_driver_shared_params {
+ u8 set_or_get_op;
+#define ICE_AQC_DRIVER_PARAM_OP_MASK BIT(0)
+#define ICE_AQC_DRIVER_PARAM_SET 0
+#define ICE_AQC_DRIVER_PARAM_GET 1
+ u8 param_indx;
+#define ICE_AQC_DRIVER_PARAM_MAX_IDX 15
+ u8 rsvd[2];
+ __le32 param_val;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+enum ice_aqc_driver_params {
+ /* OS clock index for PTP timer Domain 0 */
+ ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0,
+ /* OS clock index for PTP timer Domain 1 */
+ ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1,
+
+ /* Add new parameters above */
+ ICE_AQC_DRIVER_PARAM_MAX = 16,
+};
+
/* Lan Queue Overflow Event (direct, 0x1001) */
struct ice_aqc_event_lan_overflow {
__le32 prtdcb_ruptq;
@@ -1878,13 +1945,16 @@ struct ice_aq_desc {
struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
+ struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
+ struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_driver_shared_params drv_shared_params;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_mac_cfg set_mac_cfg;
@@ -2025,15 +2095,21 @@ enum ice_adminq_opc {
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
+ /* Sideband Control Interface commands */
+ ice_aqc_opc_neighbour_device_request = 0x0C00,
+
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+ ice_aqc_opc_add_rdma_qset = 0x0C33,
/* package commands */
ice_aqc_opc_download_pkg = 0x0C40,
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ ice_aqc_opc_driver_shared_params = 0x0C90,
+
/* Standalone Commands/Events */
ice_aqc_opc_event_lan_overflow = 0x1001,
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.h b/drivers/net/ethernet/intel/ice/ice_arfs.h
index f39cd16403ed..80ed76f0cace 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.h
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.h
@@ -52,12 +52,12 @@ bool
ice_is_arfs_using_perfect_flow(struct ice_hw *hw,
enum ice_fltr_ptype flow_type);
#else
-#define ice_sync_arfs_fltrs(pf) do {} while (0)
-#define ice_init_arfs(vsi) do {} while (0)
-#define ice_clear_arfs(vsi) do {} while (0)
-#define ice_remove_arfs(pf) do {} while (0)
-#define ice_free_cpu_rx_rmap(vsi) do {} while (0)
-#define ice_rebuild_arfs(pf) do {} while (0)
+static inline void ice_clear_arfs(struct ice_vsi *vsi) { }
+static inline void ice_free_cpu_rx_rmap(struct ice_vsi *vsi) { }
+static inline void ice_init_arfs(struct ice_vsi *vsi) { }
+static inline void ice_sync_arfs_fltrs(struct ice_pf *pf) { }
+static inline void ice_remove_arfs(struct ice_pf *pf) { }
+static inline void ice_rebuild_arfs(struct ice_pf *pf) { }
static inline int ice_set_cpu_rx_rmap(struct ice_vsi __always_unused *vsi)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 5985a7e5ca8a..c36057efc7ae 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -287,6 +287,15 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* make sure the context is associated with the right VSI */
tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+ /* Restrict Tx timestamps to the PF VSI */
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ tlan_ctx->tsyn_ena = 1;
+ break;
+ default:
+ break;
+ }
+
tlan_ctx->tso_ena = ICE_TX_LEGACY;
tlan_ctx->tso_qnum = pf_q;
@@ -319,11 +328,9 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
-int ice_setup_rx_ctx(struct ice_ring *ring)
+static int ice_setup_rx_ctx(struct ice_ring *ring)
{
- struct device *dev = ice_pf_to_dev(ring->vsi->back);
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
- u16 num_bufs = ICE_DESC_UNUSED(ring);
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
@@ -339,48 +346,6 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
- ring->rx_buf_len = vsi->rx_buf_len;
-
- if (ring->vsi->type == ICE_VSI_PF) {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
-
- ring->xsk_pool = ice_xsk_pool(ring);
- if (ring->xsk_pool) {
- xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
-
- ring->rx_buf_len =
- xsk_pool_get_rx_frame_size(ring->xsk_pool);
- /* For AF_XDP ZC, we disallow packets to span on
- * multiple buffers, thus letting us skip that
- * handling in the fast-path.
- */
- chain_len = 1;
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_XSK_BUFF_POOL,
- NULL);
- if (err)
- return err;
- xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
-
- dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
- ring->q_index);
- } else {
- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
- /* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
-
- err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
- MEM_TYPE_PAGE_SHARED,
- NULL);
- if (err)
- return err;
- }
- }
/* Receive Queue Base Address.
* Indicates the starting address of the descriptor queue defined in
* 128 Byte units.
@@ -415,6 +380,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
*/
rlan_ctx.showiv = 0;
+ /* For AF_XDP ZC, we disallow packets to span on
+ * multiple buffers, thus letting us skip that
+ * handling in the fast-path.
+ */
+ if (ring->xsk_pool)
+ chain_len = 1;
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
@@ -431,14 +402,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
* of same priority
*/
if (vsi->type != ICE_VSI_VF)
- ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3);
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
else
- ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3);
+ ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3,
+ false);
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -458,6 +430,66 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
+ return 0;
+}
+
+/**
+ * ice_vsi_cfg_rxq - Configure an Rx queue
+ * @ring: the ring being configured
+ *
+ * Return 0 on success and a negative value on error.
+ */
+int ice_vsi_cfg_rxq(struct ice_ring *ring)
+{
+ struct device *dev = ice_pf_to_dev(ring->vsi->back);
+ u16 num_bufs = ICE_DESC_UNUSED(ring);
+ int err;
+
+ ring->rx_buf_len = ring->vsi->rx_buf_len;
+
+ if (ring->vsi->type == ICE_VSI_PF) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
+ xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index, ring->q_vector->napi.napi_id);
+
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (err)
+ return err;
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
+
+ dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->q_index);
+ } else {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
+ /* coverity[check_return] */
+ xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index, ring->q_vector->napi.napi_id);
+
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+ return err;
+ }
+ }
+
+ err = ice_setup_rx_ctx(ring);
+ if (err) {
+ dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ ring->q_index, err);
+ return err;
+ }
+
if (ring->xsk_pool) {
bool ok;
@@ -470,9 +502,13 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
}
ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
- if (!ok)
+ if (!ok) {
+ u16 pf_q = ring->vsi->rxq_map[ring->q_index];
+
dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 44efdb627043..20e1c29aa68a 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,7 @@
#include "ice.h"
-int ice_setup_rx_ctx(struct ice_ring *ring);
+int ice_vsi_cfg_rxq(struct ice_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e93b1e40f627..2fb81e359cdf 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018, Intel Corporation. */
#include "ice_common.h"
+#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
@@ -58,6 +59,17 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
+ * ice_is_e810
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810 based, false if not.
+ */
+bool ice_is_e810(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_E810;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -424,6 +436,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi);
li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
li->an_info = link_data.an_info;
li->ext_info = link_data.ext_info;
li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
@@ -454,6 +467,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
(unsigned long long)li->phy_type_high);
ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
+ ice_debug(hw, ICE_DBG_LINK, " link_cfg_err = 0x%x\n", li->link_cfg_err);
ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
@@ -1062,7 +1076,8 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
GLNVM_ULD_POR_DONE_1_M |\
GLNVM_ULD_PCIER_DONE_2_M)
- uld_mask = ICE_RESET_DONE_MASK;
+ uld_mask = ICE_RESET_DONE_MASK | (hw->func_caps.common_cap.rdma ?
+ GLNVM_ULD_PE_DONE_M : 0);
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
@@ -1289,6 +1304,64 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
+/* Sideband Queue command wrappers */
+
+/**
+ * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_status_to_errno(ice_sq_send_cmd(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc,
+ buf, buf_size, cd));
+}
+
+/**
+ * ice_sbq_rw_reg - Fill Sideband Queue command
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ */
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+{
+ struct ice_sbq_cmd_desc desc = {0};
+ struct ice_sbq_msg_req msg = {0};
+ u16 msg_len;
+ int status;
+
+ msg_len = sizeof(msg);
+
+ msg.dest_dev = in->dest_dev;
+ msg.opcode = in->opcode;
+ msg.flags = ICE_SBQ_MSG_FLAGS;
+ msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
+ msg.msg_addr_low = cpu_to_le16(in->msg_addr_low);
+ msg.msg_addr_high = cpu_to_le32(in->msg_addr_high);
+
+ if (in->opcode)
+ msg.data = cpu_to_le32(in->data);
+ else
+ /* data read comes back in completion, so shorten the struct by
+ * sizeof(msg.data)
+ */
+ msg_len -= sizeof(msg.data);
+
+ desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
+ desc.param0.cmd_len = cpu_to_le16(msg_len);
+ status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+ if (!status && !in->opcode)
+ in->data = le32_to_cpu
+ (((struct ice_sbq_msg_cmpl *)&msg)->data);
+ return status;
+}
+
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
@@ -1938,6 +2011,10 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
caps->nvm_unified_update);
break;
+ case ICE_AQC_CAPS_RDMA:
+ caps->rdma = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: rdma = %d\n", prefix, caps->rdma);
+ break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
@@ -1971,6 +2048,16 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
caps->maxtc = 4;
ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
caps->maxtc);
+ if (caps->rdma) {
+ ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
+ caps->rdma = 0;
+ }
+
+ /* print message only when processing device capabilities
+ * during initialization.
+ */
+ if (caps == &hw->dev_caps.common_cap)
+ dev_info(ice_hw_to_dev(hw), "RDMA functionality is not available with the current device configuration.\n");
}
}
@@ -2017,6 +2104,48 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
}
/**
+ * ice_parse_1588_func_caps - Parse ICE_AQC_CAPS_1588 function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for ICE_AQC_CAPS_1588.
+ */
+static void
+ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ struct ice_ts_func_info *info = &func_p->ts_func_info;
+ u32 number = le32_to_cpu(cap->number);
+
+ info->ena = ((number & ICE_TS_FUNC_ENA_M) != 0);
+ func_p->common_cap.ieee_1588 = info->ena;
+
+ info->src_tmr_owned = ((number & ICE_TS_SRC_TMR_OWND_M) != 0);
+ info->tmr_ena = ((number & ICE_TS_TMR_ENA_M) != 0);
+ info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
+ info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
+
+ info->clk_freq = (number & ICE_TS_CLK_FREQ_M) >> ICE_TS_CLK_FREQ_S;
+ info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+
+ ice_debug(hw, ICE_DBG_INIT, "func caps: ieee_1588 = %u\n",
+ func_p->common_cap.ieee_1588);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: src_tmr_owned = %u\n",
+ info->src_tmr_owned);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_ena = %u\n",
+ info->tmr_ena);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_owned = %u\n",
+ info->tmr_index_owned);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: tmr_index_assoc = %u\n",
+ info->tmr_index_assoc);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: clk_freq = %u\n",
+ info->clk_freq);
+ ice_debug(hw, ICE_DBG_INIT, "func caps: clk_src = %u\n",
+ info->clk_src);
+}
+
+/**
* ice_parse_fdir_func_caps - Parse ICE_AQC_CAPS_FD function caps
* @hw: pointer to the HW struct
* @func_p: pointer to function capabilities structure
@@ -2082,6 +2211,9 @@ ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_1588:
+ ice_parse_1588_func_caps(hw, func_p, &cap_resp[i]);
+ break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_func_caps(hw, func_p);
break;
@@ -2155,6 +2287,57 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_1588_dev_caps - Parse ICE_AQC_CAPS_1588 device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_1588 for device capabilities.
+ */
+static void
+ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ struct ice_ts_dev_info *info = &dev_p->ts_dev_info;
+ u32 logical_id = le32_to_cpu(cap->logical_id);
+ u32 phys_id = le32_to_cpu(cap->phys_id);
+ u32 number = le32_to_cpu(cap->number);
+
+ info->ena = ((number & ICE_TS_DEV_ENA_M) != 0);
+ dev_p->common_cap.ieee_1588 = info->ena;
+
+ info->tmr0_owner = number & ICE_TS_TMR0_OWNR_M;
+ info->tmr0_owned = ((number & ICE_TS_TMR0_OWND_M) != 0);
+ info->tmr0_ena = ((number & ICE_TS_TMR0_ENA_M) != 0);
+
+ info->tmr1_owner = (number & ICE_TS_TMR1_OWNR_M) >> ICE_TS_TMR1_OWNR_S;
+ info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
+ info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+
+ info->ena_ports = logical_id;
+ info->tmr_own_map = phys_id;
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 = %u\n",
+ dev_p->common_cap.ieee_1588);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owner = %u\n",
+ info->tmr0_owner);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_owned = %u\n",
+ info->tmr0_owned);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr0_ena = %u\n",
+ info->tmr0_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owner = %u\n",
+ info->tmr1_owner);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_owned = %u\n",
+ info->tmr1_owned);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
+ info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
+ info->ena_ports);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
+ info->tmr_own_map);
+}
+
+/**
* ice_parse_fdir_dev_caps - Parse ICE_AQC_CAPS_FD device caps
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2215,6 +2398,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_VSI:
ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_1588:
+ ice_parse_1588_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
case ICE_AQC_CAPS_FD:
ice_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
break;
@@ -3635,6 +3821,52 @@ do_aq:
return status;
}
+/**
+ * ice_aq_add_rdma_qsets
+ * @hw: pointer to the hardware structure
+ * @num_qset_grps: Number of RDMA Qset groups
+ * @qset_list: list of Qset groups to be added
+ * @buf_size: size of buffer for indirect command
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add Tx RDMA Qsets (0x0C33)
+ */
+static int
+ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
+ struct ice_aqc_add_rdma_qset_data *qset_list,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_rdma_qset_data *list;
+ struct ice_aqc_add_rdma_qset *cmd;
+ struct ice_aq_desc desc;
+ u16 i, sum_size = 0;
+
+ cmd = &desc.params.add_rdma_qset;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_rdma_qset);
+
+ if (num_qset_grps > ICE_LAN_TXQ_MAX_QGRPS)
+ return -EINVAL;
+
+ for (i = 0, list = qset_list; i < num_qset_grps; i++) {
+ u16 num_qsets = le16_to_cpu(list->num_qsets);
+
+ sum_size += struct_size(list, rdma_qsets, num_qsets);
+ list = (struct ice_aqc_add_rdma_qset_data *)(list->rdma_qsets +
+ num_qsets);
+ }
+
+ if (buf_size != sum_size)
+ return -EINVAL;
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ cmd->num_qset_grps = num_qset_grps;
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, qset_list,
+ buf_size, cd));
+}
+
/* End of FW Admin Queue command wrappers */
/**
@@ -4133,6 +4365,162 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
}
/**
+ * ice_cfg_vsi_rdma - configure the VSI RDMA queues
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc_bitmap: TC bitmap
+ * @max_rdmaqs: max RDMA queues array per TC
+ *
+ * This function adds/updates the VSI RDMA queues per TC.
+ */
+int
+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_rdmaqs)
+{
+ return ice_status_to_errno(ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap,
+ max_rdmaqs,
+ ICE_SCHED_NODE_OWNER_RDMA));
+}
+
+/**
+ * ice_ena_vsi_rdma_qset
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: TC number
+ * @rdma_qset: pointer to RDMA Qset
+ * @num_qsets: number of RDMA Qsets
+ * @qset_teid: pointer to Qset node TEIDs
+ *
+ * This function adds RDMA Qset
+ */
+int
+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
+{
+ struct ice_aqc_txsched_elem_data node = { 0 };
+ struct ice_aqc_add_rdma_qset_data *buf;
+ struct ice_sched_node *parent;
+ enum ice_status status;
+ struct ice_hw *hw;
+ u16 i, buf_size;
+ int ret;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return -EIO;
+ hw = pi->hw;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return -EINVAL;
+
+ buf_size = struct_size(buf, rdma_qsets, num_qsets);
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ mutex_lock(&pi->sched_lock);
+
+ parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
+ ICE_SCHED_NODE_OWNER_RDMA);
+ if (!parent) {
+ ret = -EINVAL;
+ goto rdma_error_exit;
+ }
+ buf->parent_teid = parent->info.node_teid;
+ node.parent_teid = parent->info.node_teid;
+
+ buf->num_qsets = cpu_to_le16(num_qsets);
+ for (i = 0; i < num_qsets; i++) {
+ buf->rdma_qsets[i].tx_qset_id = cpu_to_le16(rdma_qset[i]);
+ buf->rdma_qsets[i].info.valid_sections =
+ ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
+ ICE_AQC_ELEM_VALID_EIR;
+ buf->rdma_qsets[i].info.generic = 0;
+ buf->rdma_qsets[i].info.cir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->rdma_qsets[i].info.cir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ buf->rdma_qsets[i].info.eir_bw.bw_profile_idx =
+ cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
+ buf->rdma_qsets[i].info.eir_bw.bw_alloc =
+ cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
+ }
+ ret = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
+ if (ret) {
+ ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
+ goto rdma_error_exit;
+ }
+ node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
+ for (i = 0; i < num_qsets; i++) {
+ node.node_teid = buf->rdma_qsets[i].qset_teid;
+ status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1,
+ &node);
+ if (status) {
+ ret = ice_status_to_errno(status);
+ break;
+ }
+ qset_teid[i] = le32_to_cpu(node.node_teid);
+ }
+rdma_error_exit:
+ mutex_unlock(&pi->sched_lock);
+ kfree(buf);
+ return ret;
+}
+
+/**
+ * ice_dis_vsi_rdma_qset - free RDMA resources
+ * @pi: port_info struct
+ * @count: number of RDMA Qsets to free
+ * @qset_teid: TEID of Qset node
+ * @q_id: list of queue IDs being disabled
+ */
+int
+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
+ u16 *q_id)
+{
+ struct ice_aqc_dis_txq_item *qg_list;
+ enum ice_status status = 0;
+ struct ice_hw *hw;
+ u16 qg_size;
+ int i;
+
+ if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
+ return -EIO;
+
+ hw = pi->hw;
+
+ qg_size = struct_size(qg_list, q_id, 1);
+ qg_list = kzalloc(qg_size, GFP_KERNEL);
+ if (!qg_list)
+ return -ENOMEM;
+
+ mutex_lock(&pi->sched_lock);
+
+ for (i = 0; i < count; i++) {
+ struct ice_sched_node *node;
+
+ node = ice_sched_find_node_by_teid(pi->root, qset_teid[i]);
+ if (!node)
+ continue;
+
+ qg_list->parent_teid = node->info.parent_teid;
+ qg_list->num_qs = 1;
+ qg_list->q_id[0] =
+ cpu_to_le16(q_id[i] |
+ ICE_AQC_Q_DIS_BUF_ELEM_TYPE_RDMA_QSET);
+
+ status = ice_aq_dis_lan_txq(hw, 1, qg_list, qg_size,
+ ICE_NO_RESET, 0, NULL);
+ if (status)
+ break;
+
+ ice_free_sched_node(pi, node);
+ }
+
+ mutex_unlock(&pi->sched_lock);
+ kfree(qg_list);
+ return ice_status_to_errno(status);
+}
+
+/**
* ice_replay_pre_init - replay pre initialization
* @hw: pointer to the HW struct
*
@@ -4304,6 +4692,81 @@ ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
}
/**
+ * ice_aq_set_driver_param - Set driver parameter to share via firmware
+ * @hw: pointer to the HW struct
+ * @idx: parameter index to set
+ * @value: the value to set the parameter to
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set the value of one of the software defined parameters. All PFs connected
+ * to this device can read the value using ice_aq_get_driver_param.
+ *
+ * Note that firmware provides no synchronization or locking, and will not
+ * save the parameter value during a device reset. It is expected that
+ * a single PF will write the parameter value, while all other PFs will only
+ * read it.
+ */
+int
+ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_shared_params *cmd;
+ struct ice_aq_desc desc;
+
+ if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
+ return -EIO;
+
+ cmd = &desc.params.drv_shared_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
+
+ cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
+ cmd->param_indx = idx;
+ cmd->param_val = cpu_to_le32(value);
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+}
+
+/**
+ * ice_aq_get_driver_param - Get driver parameter shared via firmware
+ * @hw: pointer to the HW struct
+ * @idx: parameter index to set
+ * @value: storage to return the shared parameter
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get the value of one of the software defined parameters.
+ *
+ * Note that firmware provides no synchronization or locking. It is expected
+ * that only a single PF will write a given parameter.
+ */
+int
+ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 *value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_shared_params *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
+ return -EIO;
+
+ cmd = &desc.params.drv_shared_params;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
+
+ cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
+ cmd->param_indx = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ *value = le32_to_cpu(cmd->param_val);
+
+ return 0;
+}
+
+/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 7a9d2dfb21a2..fb16070f02e2 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -40,6 +40,8 @@ enum ice_status
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+bool ice_is_sbq_supported(struct ice_hw *hw);
+struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -97,6 +99,7 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
enum ice_status
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
+bool ice_is_e810(struct ice_hw *hw);
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
enum ice_status
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
@@ -147,6 +150,15 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
+int
+ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
+ u16 *max_rdmaqs);
+int
+ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);
+int
+ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
+ u16 *q_id);
enum ice_status
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
@@ -164,6 +176,7 @@ void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
@@ -173,6 +186,12 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
+int
+ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 value, struct ice_sq_cd *cd);
+int
+ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
+ u32 *value, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index 87b33bdd4960..03bdb125be36 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -52,6 +52,19 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
}
/**
+ * ice_sb_init_regs - Initialize Sideband registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_sb_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->sbq;
+
+ ICE_CQ_INIT_REGS(cq, PF_SB);
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
@@ -609,6 +622,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_SB:
+ ice_sb_init_regs(hw);
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
ice_mailbox_init_regs(hw);
cq = &hw->mailboxq;
@@ -646,6 +663,32 @@ init_ctrlq_free_sq:
}
/**
+ * ice_is_sbq_supported - is the sideband queue supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if the sideband control queue interface is
+ * supported for the device, false otherwise
+ */
+bool ice_is_sbq_supported(struct ice_hw *hw)
+{
+ /* The device sideband queue is only supported on devices with the
+ * generic MAC type.
+ */
+ return hw->mac_type == ICE_MAC_GENERIC;
+}
+
+/**
+ * ice_get_sbq - returns the right control queue to use for sideband
+ * @hw: pointer to the hardware structure
+ */
+struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
+{
+ if (ice_is_sbq_supported(hw))
+ return &hw->sbq;
+ return &hw->adminq;
+}
+
+/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
@@ -662,6 +705,9 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, true);
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
@@ -685,6 +731,9 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw)
{
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ /* Shutdown PHY Sideband */
+ if (ice_is_sbq_supported(hw))
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -724,6 +773,15 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (status)
return status;
+ /* sideband control queue (SBQ) interface is not supported on some
+ * devices. Initialize if supported, else fallback to the admin queue
+ * interface
+ */
+ if (ice_is_sbq_supported(hw)) {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
+ if (status)
+ return status;
+ }
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -759,6 +817,8 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_init_ctrlq_locks(&hw->sbq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
@@ -791,6 +851,8 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
ice_shutdown_all_ctrlq(hw);
ice_destroy_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_destroy_ctrlq_locks(&hw->sbq);
ice_destroy_ctrlq_locks(&hw->mailboxq);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index fe75871e48ca..c07e9cc9fc6e 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -9,6 +9,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
+#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -29,6 +30,7 @@ enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
+ ICE_CTL_Q_SB,
};
/* Control Queue timeout settings - max delay 1s */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index df02cffdf209..857dc62da7a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -275,6 +275,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct device *dev = ice_pf_to_dev(pf);
int ret = ICE_DCB_NO_HW_CHG;
+ struct iidc_event *event;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -313,6 +314,15 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto free_cfg;
}
+ /* Notify AUX drivers about impending change to TCs */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ set_bit(IIDC_EVENT_BEFORE_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
@@ -640,6 +650,7 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
void ice_pf_dcb_recfg(struct ice_pf *pf)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ struct iidc_event *event;
u8 tc_map = 0;
int v, ret;
@@ -675,6 +686,14 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
+ /* Notify the AUX drivers that TC change is finished */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return;
+
+ set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 35c21d9ae009..261b6e2ed7bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -60,7 +60,7 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf)
test_bit(ICE_FLAG_DCB_ENA, pf->flags));
}
#else
-#define ice_dcb_rebuild(pf) do {} while (0)
+static inline void ice_dcb_rebuild(struct ice_pf *pf) { }
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
@@ -113,11 +113,12 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
return false;
}
-#define ice_update_dcb_stats(pf) do {} while (0)
-#define ice_pf_dcb_recfg(pf) do {} while (0)
-#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
-#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
-#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
-#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0)
+static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
+static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
+static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
+static inline void
+ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
+static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
index 6c630a362293..eac2f34bdcdd 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.h
@@ -11,9 +11,10 @@ void
ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg);
#else
-#define ice_dcbnl_setup(vsi) do {} while (0)
-#define ice_dcbnl_set_all(vsi) do {} while (0)
-#define ice_dcbnl_flush_apps(pf, old_cfg, new_cfg) do {} while (0)
+static inline void ice_dcbnl_setup(struct ice_vsi *vsi) { }
+static inline void ice_dcbnl_set_all(struct ice_vsi *vsi) { }
+static inline void
+ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
+ struct ice_dcbx_cfg *new_cfg) { }
#endif /* CONFIG_DCB */
-
#endif /* _ICE_DCB_NL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index cf685eeea198..91b545ab8b8f 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -276,6 +276,12 @@ static int ice_devlink_info_get(struct devlink *devlink,
size_t i;
int err;
+ err = ice_wait_for_reset(pf, 10 * HZ);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Device is busy resetting");
+ return err;
+ }
+
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -283,6 +289,9 @@ static int ice_devlink_info_get(struct devlink *devlink,
/* discover capabilities first */
status = ice_discover_dev_caps(hw, &ctx->dev_caps);
if (status) {
+ dev_dbg(dev, "Failed to discover device capabilities, status %s aq_err %s\n",
+ ice_stat_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ NL_SET_ERR_MSG_MOD(extack, "Unable to discover device capabilities");
err = -EIO;
goto out_free_ctx;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index d9ddd0bcf65f..d95a5daca114 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1773,49 +1773,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
-
- /* Autoneg PHY types */
- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S ||
- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP ||
- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 ||
- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
- ethtool_link_ksettings_add_link_mode(ks, supported,
- Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- Autoneg);
- }
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -1972,9 +1929,7 @@ ice_get_link_ksettings(struct net_device *netdev,
ks->base.port = PORT_TP;
break;
case ICE_MEDIA_BACKPLANE:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
ethtool_link_ksettings_add_link_mode(ks, advertising,
Backplane);
ks->base.port = PORT_NONE;
@@ -2049,6 +2004,12 @@ ice_get_link_ksettings(struct net_device *netdev,
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
+ /* Set supported and advertised autoneg */
+ if (ice_is_phy_caps_an_enabled(caps)) {
+ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
+ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
+ }
+
done:
kfree(caps);
return err;
@@ -3234,6 +3195,31 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return 0;
}
+static int
+ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(dev);
+
+ /* only report timestamping if PTP is enabled */
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return ethtool_op_get_ts_info(dev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = ice_get_ptp_clock_index(pf);
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
/**
* ice_get_max_txq - return the maximum number of Tx queues for in a PF
* @pf: PF structure
@@ -3501,13 +3487,9 @@ static int
ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
struct ice_ring_container *rc)
{
- struct ice_pf *pf;
-
if (!rc->ring)
return -EINVAL;
- pf = rc->ring->vsi->back;
-
switch (c_type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
@@ -3519,7 +3501,7 @@ ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
- dev_dbg(ice_pf_to_dev(pf), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
return -EINVAL;
}
@@ -4029,7 +4011,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_rxfh = ice_set_rxfh,
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = ice_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index dcec0360ce55..f8601d5b0b19 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -702,6 +702,16 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
}
err = pldmfw_flash_image(&priv.context, fw);
+ if (err == -ENOENT) {
+ dev_err(dev, "Firmware image has no record matching this device\n");
+ NL_SET_ERR_MSG_MOD(extack, "Firmware image has no record matching this device");
+ } else if (err) {
+ /* Do not set a generic extended ACK message here. A more
+ * specific message may already have been set by one of our
+ * ops.
+ */
+ dev_err(dev, "Failed to flash PLDM image, err %d", err);
+ }
ice_release_nvm(hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index de38a0fc9665..6989a76c42a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -31,6 +31,7 @@
#define PF_FW_ATQLEN_ATQOVFL_M BIT(29)
#define PF_FW_ATQLEN_ATQCRIT_M BIT(30)
#define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4))
+#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4))
#define PF_FW_ATQLEN_ATQENABLE_M BIT(31)
#define PF_FW_ATQT 0x00080400
#define PF_MBX_ARQBAH 0x0022E400
@@ -51,6 +52,54 @@
#define PF_MBX_ATQLEN_ATQCRIT_M BIT(30)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
+#define PF_SB_ARQBAH 0x0022FF00
+#define PF_SB_ARQBAH_ARQBAH_S 0
+#define PF_SB_ARQBAH_ARQBAH_M ICE_M(0xFFFFFFFF, 0)
+#define PF_SB_ARQBAL 0x0022FE80
+#define PF_SB_ARQBAL_ARQBAL_LSB_S 0
+#define PF_SB_ARQBAL_ARQBAL_LSB_M ICE_M(0x3F, 0)
+#define PF_SB_ARQBAL_ARQBAL_S 6
+#define PF_SB_ARQBAL_ARQBAL_M ICE_M(0x3FFFFFF, 6)
+#define PF_SB_ARQH 0x00230000
+#define PF_SB_ARQH_ARQH_S 0
+#define PF_SB_ARQH_ARQH_M ICE_M(0x3FF, 0)
+#define PF_SB_ARQLEN 0x0022FF80
+#define PF_SB_ARQLEN_ARQLEN_S 0
+#define PF_SB_ARQLEN_ARQLEN_M ICE_M(0x3FF, 0)
+#define PF_SB_ARQLEN_ARQVFE_S 28
+#define PF_SB_ARQLEN_ARQVFE_M BIT(28)
+#define PF_SB_ARQLEN_ARQOVFL_S 29
+#define PF_SB_ARQLEN_ARQOVFL_M BIT(29)
+#define PF_SB_ARQLEN_ARQCRIT_S 30
+#define PF_SB_ARQLEN_ARQCRIT_M BIT(30)
+#define PF_SB_ARQLEN_ARQENABLE_S 31
+#define PF_SB_ARQLEN_ARQENABLE_M BIT(31)
+#define PF_SB_ARQT 0x00230080
+#define PF_SB_ARQT_ARQT_S 0
+#define PF_SB_ARQT_ARQT_M ICE_M(0x3FF, 0)
+#define PF_SB_ATQBAH 0x0022FC80
+#define PF_SB_ATQBAH_ATQBAH_S 0
+#define PF_SB_ATQBAH_ATQBAH_M ICE_M(0xFFFFFFFF, 0)
+#define PF_SB_ATQBAL 0x0022FC00
+#define PF_SB_ATQBAL_ATQBAL_S 6
+#define PF_SB_ATQBAL_ATQBAL_M ICE_M(0x3FFFFFF, 6)
+#define PF_SB_ATQH 0x0022FD80
+#define PF_SB_ATQH_ATQH_S 0
+#define PF_SB_ATQH_ATQH_M ICE_M(0x3FF, 0)
+#define PF_SB_ATQLEN 0x0022FD00
+#define PF_SB_ATQLEN_ATQLEN_S 0
+#define PF_SB_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
+#define PF_SB_ATQLEN_ATQVFE_S 28
+#define PF_SB_ATQLEN_ATQVFE_M BIT(28)
+#define PF_SB_ATQLEN_ATQOVFL_S 29
+#define PF_SB_ATQLEN_ATQOVFL_M BIT(29)
+#define PF_SB_ATQLEN_ATQCRIT_S 30
+#define PF_SB_ATQLEN_ATQCRIT_M BIT(30)
+#define PF_SB_ATQLEN_ATQENABLE_S 31
+#define PF_SB_ATQLEN_ATQENABLE_M BIT(31)
+#define PF_SB_ATQT 0x0022FE00
+#define PF_SB_ATQT_ATQT_S 0
+#define PF_SB_ATQT_ATQT_M ICE_M(0x3FF, 0)
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
@@ -110,8 +159,6 @@
#define VPGEN_VFRSTAT_VFRD_M BIT(0)
#define VPGEN_VFRTRIG(_VF) (0x00090000 + ((_VF) * 4))
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
-#define PFHMC_ERRORDATA 0x00520500
-#define PFHMC_ERRORINFO 0x00520400
#define GLINT_CTL 0x0016CC54
#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
#define GLINT_CTL_ITR_GRAN_200_S 16
@@ -155,11 +202,13 @@
#define PFINT_MBX_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR 0x0016CA00
+#define PFINT_OICR_TSYN_TX_M BIT(11)
#define PFINT_OICR_ECC_ERR_M BIT(16)
#define PFINT_OICR_MAL_DETECT_M BIT(19)
#define PFINT_OICR_GRST_M BIT(20)
#define PFINT_OICR_PCI_EXCEPTION_M BIT(21)
#define PFINT_OICR_HMC_ERR_M BIT(26)
+#define PFINT_OICR_PE_PUSH_M BIT(27)
#define PFINT_OICR_PE_CRITERR_M BIT(28)
#define PFINT_OICR_VFLR_M BIT(29)
#define PFINT_OICR_SWINT_M BIT(31)
@@ -169,6 +218,9 @@
#define PFINT_OICR_CTL_ITR_INDX_M ICE_M(0x3, 11)
#define PFINT_OICR_CTL_CAUSE_ENA_M BIT(30)
#define PFINT_OICR_ENA 0x0016C900
+#define PFINT_SB_CTL 0x0016B600
+#define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
+#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30)
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
@@ -382,6 +434,23 @@
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PRTRPB_RDPC 0x000AC260
+#define GLTSYN_CMD 0x00088810
+#define GLTSYN_CMD_SYNC 0x00088814
+#define GLTSYN_ENA(_i) (0x00088808 + ((_i) * 4))
+#define GLTSYN_ENA_TSYN_ENA_M BIT(0)
+#define GLTSYN_INCVAL_H(_i) (0x00088920 + ((_i) * 4))
+#define GLTSYN_INCVAL_L(_i) (0x00088918 + ((_i) * 4))
+#define GLTSYN_SHADJ_H(_i) (0x00088910 + ((_i) * 4))
+#define GLTSYN_SHADJ_L(_i) (0x00088908 + ((_i) * 4))
+#define GLTSYN_SHTIME_0(_i) (0x000888E0 + ((_i) * 4))
+#define GLTSYN_SHTIME_H(_i) (0x000888F0 + ((_i) * 4))
+#define GLTSYN_SHTIME_L(_i) (0x000888E8 + ((_i) * 4))
+#define GLTSYN_STAT(_i) (0x000888C0 + ((_i) * 4))
+#define GLTSYN_SYNC_DLAY 0x00088818
+#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
+#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
+#define PFTSYN_SEM 0x00088880
+#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
new file mode 100644
index 000000000000..1f2afdf6cd48
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+/* Inter-Driver Communication */
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_dcb_lib.h"
+
+/**
+ * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
+ * @pf: pointer to PF struct
+ *
+ * This function has to be called with a device_lock on the
+ * pf->adev.dev to avoid race conditions.
+ */
+static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
+{
+ struct auxiliary_device *adev;
+
+ adev = pf->adev;
+ if (!adev || !adev->dev.driver)
+ return NULL;
+
+ return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
+ adrv.driver);
+}
+
+/**
+ * ice_send_event_to_aux - send event to RDMA AUX driver
+ * @pf: pointer to PF struct
+ * @event: event struct
+ */
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
+{
+ struct iidc_auxiliary_drv *iadrv;
+
+ if (!pf->adev)
+ return;
+
+ device_lock(&pf->adev->dev);
+ iadrv = ice_get_auxiliary_drv(pf);
+ if (iadrv && iadrv->event_handler)
+ iadrv->event_handler(pf, event);
+ device_unlock(&pf->adev->dev);
+}
+
+/**
+ * ice_find_vsi - Find the VSI from VSI ID
+ * @pf: The PF pointer to search in
+ * @vsi_num: The VSI ID to search for
+ */
+static struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
+ return pf->vsi[i];
+ return NULL;
+}
+
+/**
+ * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
+ * @pf: PF struct
+ * @qset: Resource to be allocated
+ */
+int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+{
+ u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_vsi *vsi;
+ struct device *dev;
+ u32 qset_teid;
+ u16 qs_handle;
+ int status;
+ int i;
+
+ if (WARN_ON(!pf || !qset))
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ return -EINVAL;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi) {
+ dev_err(dev, "RDMA QSet invalid VSI\n");
+ return -EINVAL;
+ }
+
+ ice_for_each_traffic_class(i)
+ max_rdmaqs[i] = 0;
+
+ max_rdmaqs[qset->tc]++;
+ qs_handle = qset->qs_handle;
+
+ status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_rdmaqs);
+ if (status) {
+ dev_err(dev, "Failed VSI RDMA Qset config\n");
+ return status;
+ }
+
+ status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
+ &qs_handle, 1, &qset_teid);
+ if (status) {
+ dev_err(dev, "Failed VSI RDMA Qset enable\n");
+ return status;
+ }
+ vsi->qset_handle[qset->tc] = qset->qs_handle;
+ qset->teid = qset_teid;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
+
+/**
+ * ice_del_rdma_qset - Delete leaf node for RDMA Qset
+ * @pf: PF struct
+ * @qset: Resource to be freed
+ */
+int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
+{
+ struct ice_vsi *vsi;
+ u32 teid;
+ u16 q_id;
+
+ if (WARN_ON(!pf || !qset))
+ return -EINVAL;
+
+ vsi = ice_find_vsi(pf, qset->vport_id);
+ if (!vsi) {
+ dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
+ return -EINVAL;
+ }
+
+ q_id = qset->qs_handle;
+ teid = qset->teid;
+
+ vsi->qset_handle[qset->tc] = 0;
+
+ return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
+}
+EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
+
+/**
+ * ice_rdma_request_reset - accept request from RDMA to perform a reset
+ * @pf: struct for PF
+ * @reset_type: type of reset
+ */
+int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
+{
+ enum ice_reset_req reset;
+
+ if (WARN_ON(!pf))
+ return -EINVAL;
+
+ switch (reset_type) {
+ case IIDC_PFR:
+ reset = ICE_RESET_PFR;
+ break;
+ case IIDC_CORER:
+ reset = ICE_RESET_CORER;
+ break;
+ case IIDC_GLOBR:
+ reset = ICE_RESET_GLOBR;
+ break;
+ default:
+ dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
+ return -EINVAL;
+ }
+
+ return ice_schedule_reset(pf, reset);
+}
+EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
+
+/**
+ * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
+ * @pf: pointer to struct for PF
+ * @vsi_id: VSI HW idx to update filter on
+ * @enable: bool whether to enable or disable filters
+ */
+int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
+{
+ struct ice_vsi *vsi;
+ int status;
+
+ if (WARN_ON(!pf))
+ return -EINVAL;
+
+ vsi = ice_find_vsi(pf, vsi_id);
+ if (!vsi)
+ return -EINVAL;
+
+ status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
+ if (status) {
+ dev_err(ice_pf_to_dev(pf), "Failed to %sable RDMA filtering\n",
+ enable ? "en" : "dis");
+ } else {
+ if (enable)
+ vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ else
+ vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ }
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
+
+/**
+ * ice_get_qos_params - parse QoS params for RDMA consumption
+ * @pf: pointer to PF struct
+ * @qos: set of QoS values
+ */
+void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
+{
+ struct ice_dcbx_cfg *dcbx_cfg;
+ unsigned int i;
+ u32 up2tc;
+
+ dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
+
+ qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
+ for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
+ qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
+}
+EXPORT_SYMBOL_GPL(ice_get_qos_params);
+
+/**
+ * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
+ * @pf: board private structure to initialize
+ */
+static int ice_reserve_rdma_qvector(struct ice_pf *pf)
+{
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ int index;
+
+ index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
+ ICE_RES_RDMA_VEC_ID);
+ if (index < 0)
+ return index;
+ pf->num_avail_sw_msix -= pf->num_rdma_msix;
+ pf->rdma_base_vector = (u16)index;
+ }
+ return 0;
+}
+
+/**
+ * ice_adev_release - function to be mapped to AUX dev's release op
+ * @dev: pointer to device to free
+ */
+static void ice_adev_release(struct device *dev)
+{
+ struct iidc_auxiliary_dev *iadev;
+
+ iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
+ kfree(iadev);
+}
+
+/**
+ * ice_plug_aux_dev - allocate and register AUX device
+ * @pf: pointer to pf struct
+ */
+int ice_plug_aux_dev(struct ice_pf *pf)
+{
+ struct iidc_auxiliary_dev *iadev;
+ struct auxiliary_device *adev;
+ int ret;
+
+ iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
+ if (!iadev)
+ return -ENOMEM;
+
+ adev = &iadev->adev;
+ pf->adev = adev;
+ iadev->pf = pf;
+
+ adev->id = pf->aux_idx;
+ adev->dev.release = ice_adev_release;
+ adev->dev.parent = &pf->pdev->dev;
+ adev->name = IIDC_RDMA_ROCE_NAME;
+
+ ret = auxiliary_device_init(adev);
+ if (ret) {
+ pf->adev = NULL;
+ kfree(iadev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(adev);
+ if (ret) {
+ pf->adev = NULL;
+ auxiliary_device_uninit(adev);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* ice_unplug_aux_dev - unregister and free AUX device
+ * @pf: pointer to pf struct
+ */
+void ice_unplug_aux_dev(struct ice_pf *pf)
+{
+ if (!pf->adev)
+ return;
+
+ auxiliary_device_delete(pf->adev);
+ auxiliary_device_uninit(pf->adev);
+ pf->adev = NULL;
+}
+
+/**
+ * ice_init_rdma - initializes PF for RDMA use
+ * @pf: ptr to ice_pf
+ */
+int ice_init_rdma(struct ice_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ int ret;
+
+ /* Reserve vector resources */
+ ret = ice_reserve_rdma_qvector(pf);
+ if (ret < 0) {
+ dev_err(dev, "failed to reserve vectors for RDMA\n");
+ return ret;
+ }
+
+ return ice_plug_aux_dev(pf);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_idc_int.h b/drivers/net/ethernet/intel/ice/ice_idc_int.h
new file mode 100644
index 000000000000..b7796b8aecbd
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_idc_int.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _ICE_IDC_INT_H_
+#define _ICE_IDC_INT_H_
+
+#include <linux/net/intel/iidc.h>
+#include "ice.h"
+
+struct ice_pf;
+
+void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event);
+
+#endif /* !_ICE_IDC_INT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 4599fc3b4ed8..37c18c66b5c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -172,6 +172,7 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
}
ice_clear_sriov_cap(pf);
+ ice_clear_rdma_cap(pf);
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
@@ -222,6 +223,7 @@ ice_lag_unlink(struct ice_lag *lag,
}
ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
lag->bonded = false;
lag->role = ICE_LAG_NONE;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 82e2ce23df3d..a46aba5e9c12 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
+ vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_zc_qps;
+
return 0;
+err_zc_qps:
+ devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -163,12 +169,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
switch (vsi->type) {
case ICE_VSI_PF:
- vsi->alloc_txq = min3(pf->num_lan_msix,
- ice_get_avail_txq_count(pf),
- (u16)num_online_cpus());
if (vsi->req_txq) {
vsi->alloc_txq = vsi->req_txq;
vsi->num_txq = vsi->req_txq;
+ } else {
+ vsi->alloc_txq = min3(pf->num_lan_msix,
+ ice_get_avail_txq_count(pf),
+ (u16)num_online_cpus());
}
pf->num_lan_tx = vsi->alloc_txq;
@@ -177,12 +184,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->alloc_rxq = 1;
} else {
- vsi->alloc_rxq = min3(pf->num_lan_msix,
- ice_get_avail_rxq_count(pf),
- (u16)num_online_cpus());
if (vsi->req_rxq) {
vsi->alloc_rxq = vsi->req_rxq;
vsi->num_rxq = vsi->req_rxq;
+ } else {
+ vsi->alloc_rxq = min3(pf->num_lan_msix,
+ ice_get_avail_rxq_count(pf),
+ (u16)num_online_cpus());
}
}
@@ -194,6 +202,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
+ if (vf->num_req_qs)
+ vf->num_vf_qs = vf->num_req_qs;
vsi->alloc_txq = vf->num_vf_qs;
vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_msix_per_vf includes (VF miscellaneous vector +
@@ -288,6 +298,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
+ if (vsi->af_xdp_zc_qps) {
+ bitmap_free(vsi->af_xdp_zc_qps);
+ vsi->af_xdp_zc_qps = NULL;
+ }
/* free the ring and vector containers */
if (vsi->q_vectors) {
devm_kfree(dev, vsi->q_vectors);
@@ -617,6 +631,17 @@ bool ice_is_safe_mode(struct ice_pf *pf)
}
/**
+ * ice_is_aux_ena
+ * @pf: pointer to the PF struct
+ *
+ * returns true if AUX devices/drivers are supported, false otherwise
+ */
+bool ice_is_aux_ena(struct ice_pf *pf)
+{
+ return test_bit(ICE_FLAG_AUX_ENA, pf->flags);
+}
+
+/**
* ice_vsi_clean_rss_flow_fld - Delete RSS configuration
* @vsi: the VSI being cleaned up
*
@@ -1273,6 +1298,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->reg_idx = vsi->txq_map[i];
ring->ring_active = false;
ring->vsi = vsi;
+ ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->tx_rings[i], ring);
@@ -1650,9 +1676,11 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
* @pf_q: index of the Rx queue in the PF's queue space
* @rxdid: flexible descriptor RXDID
* @prio: priority for the RXDID for this queue
+ * @ena_ts: true to enable timestamp and false to disable timestamp
*/
void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts)
{
int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
@@ -1667,9 +1695,40 @@ ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio)
regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
QRXFLXP_CNTXT_RXDID_PRIO_M;
+ if (ena_ts)
+ /* Enable TimeSync on this queue */
+ regval |= QRXFLXP_CNTXT_TS_M;
+
wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
}
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+{
+ if (q_idx >= vsi->num_rxq)
+ return -EINVAL;
+
+ return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
+}
+
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx)
+{
+ struct ice_aqc_add_tx_qgrp *qg_buf;
+ int err;
+
+ if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
+ return -EINVAL;
+
+ qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
+ if (!qg_buf)
+ return -ENOMEM;
+
+ qg_buf->num_txqs = 1;
+
+ err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
+ kfree(qg_buf);
+ return err;
+}
+
/**
* ice_vsi_cfg_rxqs - Configure the VSI for Rx
* @vsi: the VSI being configured
@@ -1687,15 +1746,11 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
ice_vsi_cfg_frame_size(vsi);
setup_rings:
/* set up individual rings */
- for (i = 0; i < vsi->num_rxq; i++) {
- int err;
+ ice_for_each_rxq(vsi, i) {
+ int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
- err = ice_setup_rx_ctx(vsi->rx_rings[i]);
- if (err) {
- dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
- i, err);
+ if (err)
return err;
- }
}
return 0;
@@ -2203,7 +2258,7 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
}
if (status)
- dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
+ dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %s\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
vsi->vsi_num, ice_stat_str(status));
}
@@ -3182,6 +3237,34 @@ bool ice_is_reset_in_progress(unsigned long *state)
test_bit(ICE_GLOBR_REQ, state);
}
+/**
+ * ice_wait_for_reset - Wait for driver to finish reset and rebuild
+ * @pf: pointer to the PF structure
+ * @timeout: length of time to wait, in jiffies
+ *
+ * Wait (sleep) for a short time until the driver finishes cleaning up from
+ * a device reset. The caller must be able to sleep. Use this to delay
+ * operations that could fail while the driver is cleaning up after a device
+ * reset.
+ *
+ * Returns 0 on success, -EBUSY if the reset is not finished within the
+ * timeout, and -ERESTARTSYS if the thread was interrupted.
+ */
+int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
+{
+ long ret;
+
+ ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
+ !ice_is_reset_in_progress(pf->state),
+ timeout);
+ if (ret < 0)
+ return ret;
+ else if (!ret)
+ return -EBUSY;
+ else
+ return 0;
+}
+
#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
@@ -3316,13 +3399,22 @@ int ice_status_to_errno(enum ice_status err)
case ICE_ERR_DOES_NOT_EXIST:
return -ENOENT;
case ICE_ERR_OUT_OF_RANGE:
- return -ENOTTY;
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_EMPTY:
+ case ICE_ERR_AQ_FW_CRITICAL:
+ return -EIO;
case ICE_ERR_PARAM:
+ case ICE_ERR_INVAL_SIZE:
return -EINVAL;
case ICE_ERR_NO_MEMORY:
return -ENOMEM;
case ICE_ERR_MAX_LIMIT:
return -EAGAIN;
+ case ICE_ERR_RESET_ONGOING:
+ return -EBUSY;
+ case ICE_ERR_AQ_FULL:
+ return -ENOSPC;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 511c2316c40c..d5a28bf0fc2c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -12,6 +12,10 @@ bool ice_pf_state_is_nominal(struct ice_pf *pf);
void ice_update_eth_stats(struct ice_vsi *vsi);
+int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
+
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
+
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
@@ -73,9 +77,11 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
bool ice_is_reset_in_progress(unsigned long *state);
+int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
void
-ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio);
+ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
+ bool ena_ts);
void ice_vsi_dis_irq(struct ice_vsi *vsi);
@@ -102,7 +108,7 @@ enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
-
+bool ice_is_aux_ena(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 4ee85a217c6f..96276533822e 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -35,6 +35,8 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
+static DEFINE_IDA(ice_aux_ida);
+
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -454,6 +456,8 @@ ice_prepare_for_reset(struct ice_pf *pf)
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
+ ice_unplug_aux_dev(pf);
+
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
@@ -467,6 +471,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
+
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
@@ -499,6 +506,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_PFR_REQ, pf->state);
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
+ wake_up(&pf->reset_wait_queue);
return;
}
@@ -511,6 +519,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild(pf, reset_type);
clear_bit(ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(ICE_PFR_REQ, pf->state);
+ wake_up(&pf->reset_wait_queue);
ice_reset_all_vfs(pf, true);
}
}
@@ -561,6 +570,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(ICE_PFR_REQ, pf->state);
clear_bit(ICE_CORER_REQ, pf->state);
clear_bit(ICE_GLOBR_REQ, pf->state);
+ wake_up(&pf->reset_wait_queue);
ice_reset_all_vfs(pf, true);
}
@@ -858,6 +868,38 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
}
/**
+ * ice_check_module_power
+ * @pf: pointer to PF struct
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * check module power level returned by a previous call to aq_get_link_info
+ * and print error messages if module power level is not supported
+ */
+static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
+{
+ /* if module power level is supported, clear the flag */
+ if (!(link_cfg_err & (ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT |
+ ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED))) {
+ clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
+ return;
+ }
+
+ /* if ICE_FLAG_MOD_POWER_UNSUPPORTED was previously set and the
+ * above block didn't clear this bit, there's nothing to do
+ */
+ if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags))
+ return;
+
+ if (link_cfg_err & ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT) {
+ dev_err(ice_pf_to_dev(pf), "The installed module is incompatible with the device's NVM image. Cannot start link\n");
+ set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
+ } else if (link_cfg_err & ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED) {
+ dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cannot start link\n");
+ set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags);
+ }
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -892,6 +934,8 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
pi->lport, ice_stat_str(status),
ice_aq_str(pi->hw->adminq.sq_last_status));
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now.
*/
@@ -1190,6 +1234,10 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
cq = &hw->adminq;
qtype = "Admin";
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ qtype = "Sideband";
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qtype = "Mailbox";
@@ -1364,6 +1412,34 @@ static void ice_clean_mailboxq_subtask(struct ice_pf *pf)
}
/**
+ * ice_clean_sbq_subtask - clean the Sideband Queue rings
+ * @pf: board private structure
+ */
+static void ice_clean_sbq_subtask(struct ice_pf *pf)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ /* Nothing to do here if sideband queue is not supported */
+ if (!ice_is_sbq_supported(hw)) {
+ clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
+ return;
+ }
+
+ if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state))
+ return;
+
+ if (__ice_clean_ctrlq(pf, ICE_CTL_Q_SB))
+ return;
+
+ clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
+
+ if (ice_ctrlq_pending(hw, &hw->sbq))
+ __ice_clean_ctrlq(pf, ICE_CTL_Q_SB);
+
+ ice_flush(hw);
+}
+
+/**
* ice_service_task_schedule - schedule the service task to wake up
* @pf: board private structure
*
@@ -2006,6 +2082,8 @@ static void ice_check_media_subtask(struct ice_pf *pf)
if (err)
return;
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
ice_init_phy_user_cfg(pi);
@@ -2063,6 +2141,7 @@ static void ice_service_task(struct work_struct *work)
ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
+ ice_clean_sbq_subtask(pf);
ice_sync_arfs_fltrs(pf);
ice_flush_fdir_ctx(pf);
@@ -2078,6 +2157,7 @@ static void ice_service_task(struct work_struct *work)
test_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) ||
test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) ||
+ test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) ||
test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state))
mod_timer(&pf->serv_tmr, jiffies);
}
@@ -2096,6 +2176,10 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN;
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+ hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+ hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+ hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
}
/**
@@ -2118,6 +2202,8 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset)
return -EBUSY;
}
+ ice_unplug_aux_dev(pf);
+
switch (reset) {
case ICE_RESET_PFR:
set_bit(ICE_PFR_REQ, pf->state);
@@ -2608,6 +2694,7 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_VFLR_M |
PFINT_OICR_HMC_ERR_M |
+ PFINT_OICR_PE_PUSH_M |
PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
@@ -2633,6 +2720,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
dev = ice_pf_to_dev(pf);
set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state);
set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state);
+ set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state);
oicr = rd32(hw, PFINT_OICR);
ena_mask = rd32(hw, PFINT_OICR_ENA);
@@ -2678,8 +2766,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* If a reset cycle isn't already in progress, we set a bit in
* pf->state so that the service task can start a reset/rebuild.
- * We also make note of which reset happened so that peer
- * devices/drivers can be informed.
*/
if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) {
if (reset == ICE_RESET_CORER)
@@ -2706,11 +2792,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
}
}
- if (oicr & PFINT_OICR_HMC_ERR_M) {
- ena_mask &= ~PFINT_OICR_HMC_ERR_M;
- dev_dbg(dev, "HMC Error interrupt - info 0x%x, data 0x%x\n",
- rd32(hw, PFHMC_ERRORINFO),
- rd32(hw, PFHMC_ERRORDATA));
+ if (oicr & PFINT_OICR_TSYN_TX_M) {
+ ena_mask &= ~PFINT_OICR_TSYN_TX_M;
+ ice_ptp_process_ts(pf);
+ }
+
+#define ICE_AUX_CRIT_ERR (PFINT_OICR_PE_CRITERR_M | PFINT_OICR_HMC_ERR_M | PFINT_OICR_PE_PUSH_M)
+ if (oicr & ICE_AUX_CRIT_ERR) {
+ struct iidc_event *event;
+
+ ena_mask &= ~ICE_AUX_CRIT_ERR;
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (event) {
+ set_bit(IIDC_EVENT_CRIT_ERR, event->type);
+ /* report the entire OICR value to AUX driver */
+ event->reg = oicr;
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
}
/* Report any remaining unexpected interrupts */
@@ -2720,8 +2819,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
/* If a critical error is pending there is no choice but to
* reset the device.
*/
- if (oicr & (PFINT_OICR_PE_CRITERR_M |
- PFINT_OICR_PCI_EXCEPTION_M |
+ if (oicr & (PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_ECC_ERR_M)) {
set_bit(ICE_PFR_REQ, pf->state);
ice_service_task_schedule(pf);
@@ -2749,6 +2847,9 @@ static void ice_dis_ctrlq_interrupts(struct ice_hw *hw)
wr32(hw, PFINT_MBX_CTL,
rd32(hw, PFINT_MBX_CTL) & ~PFINT_MBX_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL,
+ rd32(hw, PFINT_SB_CTL) & ~PFINT_SB_CTL_CAUSE_ENA_M);
+
/* disable Control queue Interrupt causes */
wr32(hw, PFINT_OICR_CTL,
rd32(hw, PFINT_OICR_CTL) & ~PFINT_OICR_CTL_CAUSE_ENA_M);
@@ -2803,6 +2904,11 @@ static void ice_ena_ctrlq_interrupts(struct ice_hw *hw, u16 reg_idx)
PFINT_MBX_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_MBX_CTL, val);
+ /* This enables Sideband queue Interrupt causes */
+ val = ((reg_idx & PFINT_SB_CTL_MSIX_INDX_M) |
+ PFINT_SB_CTL_CAUSE_ENA_M);
+ wr32(hw, PFINT_SB_CTL, val);
+
ice_flush(hw);
}
@@ -3266,6 +3372,9 @@ static void ice_deinit_pf(struct ice_pf *pf)
bitmap_free(pf->avail_rxqs);
pf->avail_rxqs = NULL;
}
+
+ if (pf->ptp.clock)
+ ptp_clock_unregister(pf->ptp.clock);
}
/**
@@ -3276,6 +3385,12 @@ static void ice_set_pf_caps(struct ice_pf *pf)
{
struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ clear_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ if (func_caps->common_cap.rdma) {
+ set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+ set_bit(ICE_FLAG_AUX_ENA, pf->flags);
+ }
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
@@ -3306,6 +3421,10 @@ static void ice_set_pf_caps(struct ice_pf *pf)
func_caps->fd_fltr_best_effort);
}
+ clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
+ if (func_caps->common_cap.ieee_1588)
+ set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
+
pf->max_pf_txqs = func_caps->common_cap.num_txq;
pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
}
@@ -3325,6 +3444,8 @@ static int ice_init_pf(struct ice_pf *pf)
spin_lock_init(&pf->aq_wait_lock);
init_waitqueue_head(&pf->aq_wait_queue);
+ init_waitqueue_head(&pf->reset_wait_queue);
+
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
pf->serv_tmr_period = HZ;
@@ -3355,11 +3476,12 @@ static int ice_init_pf(struct ice_pf *pf)
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
- int v_left, v_actual, v_other, v_budget = 0;
+ int num_cpus, v_left, v_actual, v_other, v_budget = 0;
struct device *dev = ice_pf_to_dev(pf);
int needed, err, i;
v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+ num_cpus = num_online_cpus();
/* reserve for LAN miscellaneous handler */
needed = ICE_MIN_LAN_OICR_MSIX;
@@ -3381,13 +3503,23 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_other = v_budget;
/* reserve vectors for LAN traffic */
- needed = min_t(int, num_online_cpus(), v_left);
+ needed = num_cpus;
if (v_left < needed)
goto no_hw_vecs_left_err;
pf->num_lan_msix = needed;
v_budget += needed;
v_left -= needed;
+ /* reserve vectors for RDMA auxiliary driver */
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ pf->num_rdma_msix = needed;
+ v_budget += needed;
+ v_left -= needed;
+ }
+
pf->msix_entries = devm_kcalloc(dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
@@ -3417,16 +3549,46 @@ static int ice_ena_msix_range(struct ice_pf *pf)
err = -ERANGE;
goto msix_err;
} else {
- int v_traffic = v_actual - v_other;
+ int v_remain = v_actual - v_other;
+ int v_rdma = 0, v_min_rdma = 0;
+
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags)) {
+ /* Need at least 1 interrupt in addition to
+ * AEQ MSIX
+ */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+ v_min_rdma = ICE_MIN_RDMA_MSIX;
+ }
if (v_actual == ICE_MIN_MSIX ||
- v_traffic < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
+ dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- else
- pf->num_lan_msix = v_traffic;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining
+ * vectors to LAN MSIX
+ */
+ pf->num_rdma_msix = v_min_rdma;
+ pf->num_lan_msix = v_remain - v_min_rdma;
+ } else {
+ /* Split remaining MSIX with RDMA after
+ * accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
+
+ if (test_bit(ICE_FLAG_RDMA_ENA, pf->flags))
+ dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
}
}
@@ -3441,6 +3603,7 @@ no_hw_vecs_left_err:
needed, v_left);
err = -ERANGE;
exit_err:
+ pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
return err;
}
@@ -4204,6 +4367,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_link_dflt_override(pf->hw.port_info);
+ ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
+
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
ICE_AQ_MEDIA_AVAILABLE) {
@@ -4242,6 +4407,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
}
/* initialize DDP driven features */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
@@ -4268,8 +4435,29 @@ probe_done:
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
+ if (ice_is_aux_ena(pf)) {
+ pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
+ if (pf->aux_idx < 0) {
+ dev_err(dev, "Failed to allocate device ID for AUX driver\n");
+ err = -ENOMEM;
+ goto err_netdev_reg;
+ }
+
+ err = ice_init_rdma(pf);
+ if (err) {
+ dev_err(dev, "Failed to initialize RDMA: %d\n", err);
+ err = -EIO;
+ goto err_init_aux_unroll;
+ }
+ } else {
+ dev_warn(dev, "RDMA is not supported on this device\n");
+ }
+
return 0;
+err_init_aux_unroll:
+ pf->adev = NULL;
+ ida_free(&ice_aux_ida, pf->aux_idx);
err_netdev_reg:
err_send_version_unroll:
ice_vsi_release_all(pf);
@@ -4379,13 +4567,17 @@ static void ice_remove(struct pci_dev *pdev)
ice_free_vfs(pf);
}
- set_bit(ICE_DOWN, pf->state);
ice_service_task_stop(pf);
ice_aq_cancel_waiting_tasks(pf);
+ ice_unplug_aux_dev(pf);
+ ida_free(&ice_aux_ida, pf->aux_idx);
+ set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
ice_deinit_lag(pf);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
@@ -4538,6 +4730,8 @@ static int __maybe_unused ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
+ ice_unplug_aux_dev(pf);
+
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
if (!disabled)
@@ -6128,6 +6322,12 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_clear_pxe_mode(hw);
+ ret = ice_init_nvm(hw);
+ if (ret) {
+ dev_err(dev, "ice_init_nvm failed %s\n", ice_stat_str(ret));
+ goto err_init_ctrlq;
+ }
+
ret = ice_get_caps(hw);
if (ret) {
dev_err(dev, "ice_get_caps failed %s\n", ice_stat_str(ret));
@@ -6169,6 +6369,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
+ /* If the PF previously had enabled PTP, PTP init needs to happen before
+ * the VSI rebuild. If not, this causes the PTP link status events to
+ * fail.
+ */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
+
/* rebuild PF VSI */
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
@@ -6208,6 +6415,8 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* if we get here, reset flow is successful */
clear_bit(ICE_RESET_FAILED, pf->state);
+
+ ice_plug_aux_dev(pf);
return;
err_vsi_rebuild:
@@ -6246,7 +6455,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct iidc_event *event;
u8 count = 0;
+ int err = 0;
if (new_mtu == (int)netdev->mtu) {
netdev_warn(netdev, "MTU is already %u\n", netdev->mtu);
@@ -6279,27 +6490,59 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return -ENOMEM;
+
+ set_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ clear_bit(IIDC_EVENT_BEFORE_MTU_CHANGE, event->type);
+
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
- int err;
-
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
- return err;
+ goto event_after;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
- return err;
+ goto event_after;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
- return 0;
+event_after:
+ set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+
+ return err;
+}
+
+/**
+ * ice_do_ioctl - Access the hwtstamp interface
+ * @netdev: network interface device structure
+ * @ifr: interface request data
+ * @cmd: ioctl command
+ */
+static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return ice_ptp_get_ts_config(pf, ifr);
+ case SIOCSHWTSTAMP:
+ return ice_ptp_set_ts_config(pf, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -6818,6 +7061,8 @@ int ice_open_internal(struct net_device *netdev)
return -EIO;
}
+ ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
clear_bit(ICE_FLAG_NO_MEDIA, pf->flags);
@@ -6950,6 +7195,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
+ .ndo_do_ioctl = ice_do_ioctl,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
new file mode 100644
index 000000000000..e14f81321768
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -0,0 +1,1269 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+
+/**
+ * ice_set_tx_tstamp - Enable or disable Tx timestamping
+ * @pf: The PF pointer to search in
+ * @on: bool value for whether timestamps are enabled or disabled
+ */
+static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
+{
+ struct ice_vsi *vsi;
+ u32 val;
+ u16 i;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return;
+
+ /* Set the timestamp enable flag for all the Tx rings */
+ ice_for_each_rxq(vsi, i) {
+ if (!vsi->tx_rings[i])
+ continue;
+ vsi->tx_rings[i]->ptp_tx = on;
+ }
+
+ /* Configure the Tx timestamp interrupt */
+ val = rd32(&pf->hw, PFINT_OICR_ENA);
+ if (on)
+ val |= PFINT_OICR_TSYN_TX_M;
+ else
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(&pf->hw, PFINT_OICR_ENA, val);
+}
+
+/**
+ * ice_set_rx_tstamp - Enable or disable Rx timestamping
+ * @pf: The PF pointer to search in
+ * @on: bool value for whether timestamps are enabled or disabled
+ */
+static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
+{
+ struct ice_vsi *vsi;
+ u16 i;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return;
+
+ /* Set the timestamp flag for all the Rx rings */
+ ice_for_each_rxq(vsi, i) {
+ if (!vsi->rx_rings[i])
+ continue;
+ vsi->rx_rings[i]->ptp_rx = on;
+ }
+}
+
+/**
+ * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
+ * @pf: Board private structure
+ * @ena: bool value to enable or disable time stamp
+ *
+ * This function will configure timestamping during PTP initialization
+ * and deinitialization
+ */
+static void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
+{
+ ice_set_tx_tstamp(pf, ena);
+ ice_set_rx_tstamp(pf, ena);
+
+ if (ena) {
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
+ } else {
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ }
+}
+
+/**
+ * ice_get_ptp_clock_index - Get the PTP clock index
+ * @pf: the PF pointer
+ *
+ * Determine the clock index of the PTP clock associated with this device. If
+ * this is the PF controlling the clock, just use the local access to the
+ * clock device pointer.
+ *
+ * Otherwise, read from the driver shared parameters to determine the clock
+ * index value.
+ *
+ * Returns: the index of the PTP clock associated with this device, or -1 if
+ * there is no associated clock.
+ */
+int ice_get_ptp_clock_index(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_aqc_driver_params param_idx;
+ struct ice_hw *hw = &pf->hw;
+ u8 tmr_idx;
+ u32 value;
+ int err;
+
+ /* Use the ptp_clock structure if we're the main PF */
+ if (pf->ptp.clock)
+ return ptp_clock_index(pf->ptp.clock);
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (!tmr_idx)
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
+ else
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+
+ err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
+ if (err) {
+ dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ return -1;
+ }
+
+ /* The PTP clock index is an integer, and will be between 0 and
+ * INT_MAX. The highest bit of the driver shared parameter is used to
+ * indicate whether or not the currently stored clock index is valid.
+ */
+ if (!(value & PTP_SHARED_CLK_IDX_VALID))
+ return -1;
+
+ return value & ~PTP_SHARED_CLK_IDX_VALID;
+}
+
+/**
+ * ice_set_ptp_clock_index - Set the PTP clock index
+ * @pf: the PF pointer
+ *
+ * Set the PTP clock index for this device into the shared driver parameters,
+ * so that other PFs associated with this device can read it.
+ *
+ * If the PF is unable to store the clock index, it will log an error, but
+ * will continue operating PTP.
+ */
+static void ice_set_ptp_clock_index(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_aqc_driver_params param_idx;
+ struct ice_hw *hw = &pf->hw;
+ u8 tmr_idx;
+ u32 value;
+ int err;
+
+ if (!pf->ptp.clock)
+ return;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (!tmr_idx)
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
+ else
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+
+ value = (u32)ptp_clock_index(pf->ptp.clock);
+ if (value > INT_MAX) {
+ dev_err(dev, "PTP Clock index is too large to store\n");
+ return;
+ }
+ value |= PTP_SHARED_CLK_IDX_VALID;
+
+ err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
+ if (err) {
+ dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_clear_ptp_clock_index - Clear the PTP clock index
+ * @pf: the PF pointer
+ *
+ * Clear the PTP clock index for this device. Must be called when
+ * unregistering the PTP clock, in order to ensure other PFs stop reporting
+ * a clock object that no longer exists.
+ */
+static void ice_clear_ptp_clock_index(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ enum ice_aqc_driver_params param_idx;
+ struct ice_hw *hw = &pf->hw;
+ u8 tmr_idx;
+ int err;
+
+ /* Do not clear the index if we don't own the timer */
+ if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ return;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
+ if (!tmr_idx)
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
+ else
+ param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+
+ err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
+ if (err) {
+ dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+ }
+}
+
+/**
+ * ice_ptp_read_src_clk_reg - Read the source clock register
+ * @pf: Board private structure
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ */
+static u64
+ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
+{
+ struct ice_hw *hw = &pf->hw;
+ u32 hi, lo, lo2;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ /* Read the system timestamp pre PHC read */
+ if (sts)
+ ptp_read_system_prets(sts);
+
+ lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
+
+ /* Read the system timestamp post PHC read */
+ if (sts)
+ ptp_read_system_postts(sts);
+
+ hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
+ lo2 = rd32(hw, GLTSYN_TIME_L(tmr_idx));
+
+ if (lo2 < lo) {
+ /* if TIME_L rolled over read TIME_L again and update
+ * system timestamps
+ */
+ if (sts)
+ ptp_read_system_prets(sts);
+ lo = rd32(hw, GLTSYN_TIME_L(tmr_idx));
+ if (sts)
+ ptp_read_system_postts(sts);
+ hi = rd32(hw, GLTSYN_TIME_H(tmr_idx));
+ }
+
+ return ((u64)hi << 32) | lo;
+}
+
+/**
+ * ice_ptp_update_cached_phctime - Update the cached PHC time values
+ * @pf: Board specific private structure
+ *
+ * This function updates the system time values which are cached in the PF
+ * structure and the Rx rings.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old. It must also be called whenever the PHC
+ * time has been changed.
+ */
+static void ice_ptp_update_cached_phctime(struct ice_pf *pf)
+{
+ u64 systime;
+ int i;
+
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* Update the cached PHC time stored in the PF structure */
+ WRITE_ONCE(pf->ptp.cached_phc_time, systime);
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+ int j;
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type != ICE_VSI_PF)
+ continue;
+
+ ice_for_each_rxq(vsi, j) {
+ if (!vsi->rx_rings[j])
+ continue;
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
+}
+
+/**
+ * ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
+ * @cached_phc_time: recently cached copy of PHC time
+ * @in_tstamp: Ingress/egress 32b nanoseconds timestamp value
+ *
+ * Hardware captures timestamps which contain only 32 bits of nominal
+ * nanoseconds, as opposed to the 64bit timestamps that the stack expects.
+ * Note that the captured timestamp values may be 40 bits, but the lower
+ * 8 bits are sub-nanoseconds and generally discarded.
+ *
+ * Extend the 32bit nanosecond timestamp using the following algorithm and
+ * assumptions:
+ *
+ * 1) have a recently cached copy of the PHC time
+ * 2) assume that the in_tstamp was captured 2^31 nanoseconds (~2.1
+ * seconds) before or after the PHC time was captured.
+ * 3) calculate the delta between the cached time and the timestamp
+ * 4) if the delta is smaller than 2^31 nanoseconds, then the timestamp was
+ * captured after the PHC time. In this case, the full timestamp is just
+ * the cached PHC time plus the delta.
+ * 5) otherwise, if the delta is larger than 2^31 nanoseconds, then the
+ * timestamp was captured *before* the PHC time, i.e. because the PHC
+ * cache was updated after the timestamp was captured by hardware. In this
+ * case, the full timestamp is the cached time minus the inverse delta.
+ *
+ * This algorithm works even if the PHC time was updated after a Tx timestamp
+ * was requested, but before the Tx timestamp event was reported from
+ * hardware.
+ *
+ * This calculation primarily relies on keeping the cached PHC time up to
+ * date. If the timestamp was captured more than 2^31 nanoseconds after the
+ * PHC time, it is possible that the lower 32bits of PHC time have
+ * overflowed more than once, and we might generate an incorrect timestamp.
+ *
+ * This is prevented by (a) periodically updating the cached PHC time once
+ * a second, and (b) discarding any Tx timestamp packet if it has waited for
+ * a timestamp for more than one second.
+ */
+static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
+{
+ u32 delta, phc_time_lo;
+ u64 ns;
+
+ /* Extract the lower 32 bits of the PHC time */
+ phc_time_lo = (u32)cached_phc_time;
+
+ /* Calculate the delta between the lower 32bits of the cached PHC
+ * time and the in_tstamp value
+ */
+ delta = (in_tstamp - phc_time_lo);
+
+ /* Do not assume that the in_tstamp is always more recent than the
+ * cached PHC time. If the delta is large, it indicates that the
+ * in_tstamp was taken in the past, and should be converted
+ * forward.
+ */
+ if (delta > (U32_MAX / 2)) {
+ /* reverse the delta calculation here */
+ delta = (phc_time_lo - in_tstamp);
+ ns = cached_phc_time - delta;
+ } else {
+ ns = cached_phc_time + delta;
+ }
+
+ return ns;
+}
+
+/**
+ * ice_ptp_extend_40b_ts - Convert a 40b timestamp to 64b nanoseconds
+ * @pf: Board private structure
+ * @in_tstamp: Ingress/egress 40b timestamp value
+ *
+ * The Tx and Rx timestamps are 40 bits wide, including 32 bits of nominal
+ * nanoseconds, 7 bits of sub-nanoseconds, and a valid bit.
+ *
+ * *--------------------------------------------------------------*
+ * | 32 bits of nanoseconds | 7 high bits of sub ns underflow | v |
+ * *--------------------------------------------------------------*
+ *
+ * The low bit is an indicator of whether the timestamp is valid. The next
+ * 7 bits are a capture of the upper 7 bits of the sub-nanosecond underflow,
+ * and the remaining 32 bits are the lower 32 bits of the PHC timer.
+ *
+ * It is assumed that the caller verifies the timestamp is valid prior to
+ * calling this function.
+ *
+ * Extract the 32bit nominal nanoseconds and extend them. Use the cached PHC
+ * time stored in the device private PTP structure as the basis for timestamp
+ * extension.
+ *
+ * See ice_ptp_extend_32b_ts for a detailed explanation of the extension
+ * algorithm.
+ */
+static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
+{
+ const u64 mask = GENMASK_ULL(31, 0);
+
+ return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
+ (in_tstamp >> 8) & mask);
+}
+
+/**
+ * ice_ptp_read_time - Read the time from the device
+ * @pf: Board private structure
+ * @ts: timespec structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * This function reads the source clock registers and stores them in a timespec.
+ * However, since the registers are 64 bits of nanoseconds, we must convert the
+ * result to a timespec before we can return.
+ */
+static void
+ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
+
+ *ts = ns_to_timespec64(time_ns);
+}
+
+/**
+ * ice_ptp_write_init - Set PHC time to provided value
+ * @pf: Board private structure
+ * @ts: timespec structure that holds the new time value
+ *
+ * Set the PHC time to the specified time provided in the timespec.
+ */
+static int ice_ptp_write_init(struct ice_pf *pf, struct timespec64 *ts)
+{
+ u64 ns = timespec64_to_ns(ts);
+ struct ice_hw *hw = &pf->hw;
+
+ return ice_ptp_init_time(hw, ns);
+}
+
+/**
+ * ice_ptp_write_adj - Adjust PHC clock time atomically
+ * @pf: Board private structure
+ * @adj: Adjustment in nanoseconds
+ *
+ * Perform an atomic adjustment of the PHC time by the specified number of
+ * nanoseconds.
+ */
+static int ice_ptp_write_adj(struct ice_pf *pf, s32 adj)
+{
+ struct ice_hw *hw = &pf->hw;
+
+ return ice_ptp_adj_clock(hw, adj);
+}
+
+/**
+ * ice_ptp_adjfine - Adjust clock increment rate
+ * @info: the driver's PTP info structure
+ * @scaled_ppm: Parts per million with 16-bit fractional field
+ *
+ * Adjust the frequency of the clock by the indicated scaled ppm from the
+ * base frequency.
+ */
+static int ice_ptp_adjfine(struct ptp_clock_info *info, long scaled_ppm)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ u64 freq, divisor = 1000000ULL;
+ struct ice_hw *hw = &pf->hw;
+ s64 incval, diff;
+ int neg_adj = 0;
+ int err;
+
+ incval = ICE_PTP_NOMINAL_INCVAL_E810;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ while ((u64)scaled_ppm > div_u64(U64_MAX, incval)) {
+ /* handle overflow by scaling down the scaled_ppm and
+ * the divisor, losing some precision
+ */
+ scaled_ppm >>= 2;
+ divisor >>= 2;
+ }
+
+ freq = (incval * (u64)scaled_ppm) >> 16;
+ diff = div_u64(freq, divisor);
+
+ if (neg_adj)
+ incval -= diff;
+ else
+ incval += diff;
+
+ err = ice_ptp_write_incval_locked(hw, incval);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set incval, err %d\n",
+ err);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_gettimex64 - Get the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure to hold the current time value
+ * @sts: Optional parameter for holding a pair of system timestamps from
+ * the system clock. Will be ignored if NULL is given.
+ *
+ * Read the device clock and return the correct value on ns, after converting it
+ * into a timespec struct.
+ */
+static int
+ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+
+ if (!ice_ptp_lock(hw)) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
+ return -EBUSY;
+ }
+
+ ice_ptp_read_time(pf, ts, sts);
+ ice_ptp_unlock(hw);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_settime64 - Set the time of the clock
+ * @info: the driver's PTP info structure
+ * @ts: timespec64 structure that holds the new time value
+ *
+ * Set the device clock to the user input value. The conversion from timespec
+ * to ns happens in the write function.
+ */
+static int
+ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct timespec64 ts64 = *ts;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ err = ice_ptp_write_init(pf, &ts64);
+ ice_ptp_unlock(hw);
+
+ if (!err)
+ ice_ptp_update_cached_phctime(pf);
+
+exit:
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_adjtime_nonatomic - Do a non-atomic clock adjustment
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ */
+static int ice_ptp_adjtime_nonatomic(struct ptp_clock_info *info, s64 delta)
+{
+ struct timespec64 now, then;
+
+ then = ns_to_timespec64(delta);
+ ice_ptp_gettimex64(info, &now, NULL);
+ now = timespec64_add(now, then);
+
+ return ice_ptp_settime64(info, (const struct timespec64 *)&now);
+}
+
+/**
+ * ice_ptp_adjtime - Adjust the time of the clock by the indicated delta
+ * @info: the driver's PTP info structure
+ * @delta: Offset in nanoseconds to adjust the time by
+ */
+static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+
+ /* Hardware only supports atomic adjustments using signed 32-bit
+ * integers. For any adjustment outside this range, perform
+ * a non-atomic get->adjust->set flow.
+ */
+ if (delta > S32_MAX || delta < S32_MIN) {
+ dev_dbg(dev, "delta = %lld, adjtime non-atomic\n", delta);
+ return ice_ptp_adjtime_nonatomic(info, delta);
+ }
+
+ if (!ice_ptp_lock(hw)) {
+ dev_err(dev, "PTP failed to acquire semaphore in adjtime\n");
+ return -EBUSY;
+ }
+
+ err = ice_ptp_write_adj(pf, delta);
+
+ ice_ptp_unlock(hw);
+
+ if (err) {
+ dev_err(dev, "PTP failed to adjust time, err %d\n", err);
+ return err;
+ }
+
+ ice_ptp_update_cached_phctime(pf);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_get_ts_config - ioctl interface to read the timestamping config
+ * @pf: Board private structure
+ * @ifr: ioctl data
+ *
+ * Copy the timestamping config to user buffer
+ */
+int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return -EIO;
+
+ config = &pf->ptp.tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * ice_ptp_set_timestamp_mode - Setup driver for requested timestamp mode
+ * @pf: Board private structure
+ * @config: hwtstamp settings requested or saved
+ */
+static int
+ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
+{
+ /* Reserved for future extensions. */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ ice_set_tx_tstamp(pf, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ ice_set_tx_tstamp(pf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ice_set_rx_tstamp(pf, false);
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ ice_set_rx_tstamp(pf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_ts_config - ioctl interface to control the timestamping
+ * @pf: Board private structure
+ * @ifr: ioctl data
+ *
+ * Get the user config and store it
+ */
+int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return -EAGAIN;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = ice_ptp_set_timestamp_mode(pf, &config);
+ if (err)
+ return err;
+
+ /* Save these settings for future reference */
+ pf->ptp.tstamp_config = config;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+/**
+ * ice_ptp_rx_hwtstamp - Check for an Rx timestamp
+ * @rx_ring: Ring to get the VSI info
+ * @rx_desc: Receive descriptor
+ * @skb: Particular skb to send timestamp with
+ *
+ * The driver receives a notification in the receive descriptor with timestamp.
+ * The timestamp is in ns, so we must convert the result first.
+ */
+void
+ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
+{
+ u32 ts_high;
+ u64 ts_ns;
+
+ /* Populate timesync data into skb */
+ if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
+ struct skb_shared_hwtstamps *hwtstamps;
+
+ /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
+ * cached PHC value, rather than accessing the PF. This also
+ * allows us to simply pass the upper 32bits of nanoseconds
+ * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
+ * it would just discard these bits itself.
+ */
+ ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
+ ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
+ }
+}
+
+/**
+ * ice_ptp_set_caps - Set PTP capabilities
+ * @pf: Board private structure
+ */
+static void ice_ptp_set_caps(struct ice_pf *pf)
+{
+ struct ptp_clock_info *info = &pf->ptp.info;
+ struct device *dev = ice_pf_to_dev(pf);
+
+ snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
+ dev_driver_string(dev), dev_name(dev));
+ info->owner = THIS_MODULE;
+ info->max_adj = 999999999;
+ info->adjtime = ice_ptp_adjtime;
+ info->adjfine = ice_ptp_adjfine;
+ info->gettimex64 = ice_ptp_gettimex64;
+ info->settime64 = ice_ptp_settime64;
+}
+
+/**
+ * ice_ptp_create_clock - Create PTP clock device for userspace
+ * @pf: Board private structure
+ *
+ * This function creates a new PTP clock device. It only creates one if we
+ * don't already have one. Will return error if it can't create one, but success
+ * if we already have a device. Should be used by ice_ptp_init to create clock
+ * initially, and prevent global resets from creating new clock devices.
+ */
+static long ice_ptp_create_clock(struct ice_pf *pf)
+{
+ struct ptp_clock_info *info;
+ struct ptp_clock *clock;
+ struct device *dev;
+
+ /* No need to create a clock device if we already have one */
+ if (pf->ptp.clock)
+ return 0;
+
+ ice_ptp_set_caps(pf);
+
+ info = &pf->ptp.info;
+ dev = ice_pf_to_dev(pf);
+
+ /* Attempt to register the clock before enabling the hardware. */
+ clock = ptp_clock_register(info, dev);
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
+
+ pf->ptp.clock = clock;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
+ * @work: pointer to the kthread_work struct
+ *
+ * Process timestamps captured by the PHY associated with this port. To do
+ * this, loop over each index with a waiting skb.
+ *
+ * If a given index has a valid timestamp, perform the following steps:
+ *
+ * 1) copy the timestamp out of the PHY register
+ * 4) clear the timestamp valid bit in the PHY register
+ * 5) unlock the index by clearing the associated in_use bit.
+ * 2) extend the 40b timestamp value to get a 64bit timestamp
+ * 3) send that timestamp to the stack
+ *
+ * After looping, if we still have waiting SKBs, then re-queue the work. This
+ * may cause us effectively poll even when not strictly necessary. We do this
+ * because it's possible a new timestamp was requested around the same time as
+ * the interrupt. In some cases hardware might not interrupt us again when the
+ * timestamp is captured.
+ *
+ * Note that we only take the tracking lock when clearing the bit and when
+ * checking if we need to re-queue this task. The only place where bits can be
+ * set is the hard xmit routine where an SKB has a request flag set. The only
+ * places where we clear bits are this work function, or the periodic cleanup
+ * thread. If the cleanup thread clears a bit we're processing we catch it
+ * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
+ * starts a new timestamp, we might not begin processing it right away but we
+ * will notice it at the end when we re-queue the work item. If a Tx thread
+ * starts a new timestamp just after this function exits without re-queuing,
+ * the interrupt when the timestamp finishes should trigger. Avoiding holding
+ * the lock for the entire function is important in order to ensure that Tx
+ * threads do not get blocked while waiting for the lock.
+ */
+static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
+{
+ struct ice_ptp_port *ptp_port;
+ struct ice_ptp_tx *tx;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ u8 idx;
+
+ tx = container_of(work, struct ice_ptp_tx, work);
+ if (!tx->init)
+ return;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+ hw = &pf->hw;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 phy_idx = idx + tx->quad_offset;
+ u64 raw_tstamp, tstamp;
+ struct sk_buff *skb;
+ int err;
+
+ err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
+ &raw_tstamp);
+ if (err)
+ continue;
+
+ /* Check if the timestamp is valid */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID))
+ continue;
+
+ /* clear the timestamp register, so that it won't show valid
+ * again when re-used.
+ */
+ ice_clear_phy_tstamp(hw, tx->quad, phy_idx);
+
+ /* The timestamp is valid, so we'll go ahead and clear this
+ * index and then send the timestamp up to the stack.
+ */
+ spin_lock(&tx->lock);
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ spin_unlock(&tx->lock);
+
+ /* it's (unlikely but) possible we raced with the cleanup
+ * thread for discarding old timestamp requests.
+ */
+ if (!skb)
+ continue;
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check if we still have work to do. If so, re-queue this task to
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+ if (!bitmap_empty(tx->in_use, tx->len))
+ kthread_queue_work(pf->ptp.kworker, &tx->work);
+ spin_unlock(&tx->lock);
+}
+
+/**
+ * ice_ptp_request_ts - Request an available Tx timestamp index
+ * @tx: the PTP Tx timestamp tracker to request from
+ * @skb: the SKB to associate with this timestamp request
+ */
+s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
+{
+ u8 idx;
+
+ /* Check if this tracker is initialized */
+ if (!tx->init)
+ return -1;
+
+ spin_lock(&tx->lock);
+ /* Find and set the first available index */
+ idx = find_first_zero_bit(tx->in_use, tx->len);
+ if (idx < tx->len) {
+ /* We got a valid index that no other thread could have set. Store
+ * a reference to the skb and the start time to allow discarding old
+ * requests.
+ */
+ set_bit(idx, tx->in_use);
+ tx->tstamps[idx].start = jiffies;
+ tx->tstamps[idx].skb = skb_get(skb);
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
+ spin_unlock(&tx->lock);
+
+ /* return the appropriate PHY timestamp register index, -1 if no
+ * indexes were available.
+ */
+ if (idx >= tx->len)
+ return -1;
+ else
+ return idx + tx->quad_offset;
+}
+
+/**
+ * ice_ptp_process_ts - Spawn kthread work to handle timestamps
+ * @pf: Board private structure
+ *
+ * Queue work required to process the PTP Tx timestamps outside of interrupt
+ * context.
+ */
+void ice_ptp_process_ts(struct ice_pf *pf)
+{
+ if (pf->ptp.port.tx.init)
+ kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
+}
+
+/**
+ * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
+ * @tx: Tx tracking structure to initialize
+ *
+ * Assumes that the length has already been initialized. Do not call directly,
+ * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
+ */
+static int
+ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+{
+ tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
+ if (!tx->tstamps)
+ return -ENOMEM;
+
+ tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
+ if (!tx->in_use) {
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&tx->lock);
+ kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
+
+ tx->init = 1;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * @pf: Board private structure
+ * @tx: the tracker to flush
+ */
+static void
+ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ for (idx = 0; idx < tx->len; idx++) {
+ u8 phy_idx = idx + tx->quad_offset;
+
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+
+ if (tx->tstamps[idx].skb) {
+ dev_kfree_skb_any(tx->tstamps[idx].skb);
+ tx->tstamps[idx].skb = NULL;
+ }
+ }
+}
+
+/**
+ * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
+ * @pf: Board private structure
+ * @tx: Tx tracking structure to release
+ *
+ * Free memory associated with the Tx timestamp tracker.
+ */
+static void
+ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->init = 0;
+
+ kthread_cancel_work_sync(&tx->work);
+
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+
+ kfree(tx->in_use);
+ tx->in_use = NULL;
+
+ tx->len = 0;
+}
+
+/**
+ * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ *
+ * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
+ * port has its own block of timestamps, independent of the other ports.
+ */
+static int
+ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->quad = pf->hw.port_info->lport;
+ tx->quad_offset = 0;
+ tx->len = INDEX_PER_QUAD;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @tx: PTP Tx tracker to clean up
+ *
+ * Loop through the Tx timestamp requests and see if any of them have been
+ * waiting for a long time. Discard any SKBs that have been waiting for more
+ * than 2 seconds. This is long enough to be reasonably sure that the
+ * timestamp will never be captured. This might happen if the packet gets
+ * discarded before it reaches the PHY timestamping block.
+ */
+static void ice_ptp_tx_tstamp_cleanup(struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ if (!tx->init)
+ return;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct sk_buff *skb;
+
+ /* Check if this SKB has been waiting for too long */
+ if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
+ continue;
+
+ spin_lock(&tx->lock);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Free the SKB after we've cleared the bit */
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ice_ptp_periodic_work(struct kthread_work *work)
+{
+ struct ice_ptp *ptp = container_of(work, struct ice_ptp, work.work);
+ struct ice_pf *pf = container_of(ptp, struct ice_pf, ptp);
+
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
+ ice_ptp_update_cached_phctime(pf);
+
+ ice_ptp_tx_tstamp_cleanup(&pf->ptp.port.tx);
+
+ /* Run twice a second */
+ kthread_queue_delayed_work(ptp->kworker, &ptp->work,
+ msecs_to_jiffies(500));
+}
+
+/**
+ * ice_ptp_init_owner - Initialize PTP_1588_CLOCK device
+ * @pf: Board private structure
+ *
+ * Setup and initialize a PTP clock device that represents the device hardware
+ * clock. Save the clock index for other functions connected to the same
+ * hardware resource.
+ */
+static int ice_ptp_init_owner(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ struct timespec64 ts;
+ u8 src_idx;
+ int err;
+
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
+ /* Clear some HW residue and enable source clock */
+ src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Enable source clocks */
+ wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+
+ /* Enable PHY time sync */
+ err = ice_ptp_init_phy_e810(hw);
+ if (err)
+ goto err_exit;
+
+ /* Clear event status indications for auxiliary pins */
+ (void)rd32(hw, GLTSYN_STAT(src_idx));
+
+ /* Acquire the global hardware lock */
+ if (!ice_ptp_lock(hw)) {
+ err = -EBUSY;
+ goto err_exit;
+ }
+
+ /* Write the increment time value to PHY and LAN */
+ err = ice_ptp_write_incval(hw, ICE_PTP_NOMINAL_INCVAL_E810);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err_exit;
+ }
+
+ ts = ktime_to_timespec64(ktime_get_real());
+ /* Write the initial Time value to PHY and LAN */
+ err = ice_ptp_write_init(pf, &ts);
+ if (err) {
+ ice_ptp_unlock(hw);
+ goto err_exit;
+ }
+
+ /* Release the global hardware lock */
+ ice_ptp_unlock(hw);
+
+ /* Ensure we have a clock device */
+ err = ice_ptp_create_clock(pf);
+ if (err)
+ goto err_clk;
+
+ /* Store the PTP clock index for other PFs */
+ ice_set_ptp_clock_index(pf);
+
+ return 0;
+
+err_clk:
+ pf->ptp.clock = NULL;
+err_exit:
+ dev_err(dev, "PTP failed to register clock, err %d\n", err);
+
+ return err;
+}
+
+/**
+ * ice_ptp_init - Initialize the PTP support after device probe or reset
+ * @pf: Board private structure
+ *
+ * This function sets device up for PTP support. The first time it is run, it
+ * will create a clock device. It does not create a clock device if one
+ * already exists. It also reconfigures the device after a reset.
+ */
+void ice_ptp_init(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct kthread_worker *kworker;
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ /* PTP is currently only supported on E810 devices */
+ if (!ice_is_e810(hw))
+ return;
+
+ /* Check if this PF owns the source timer */
+ if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ err = ice_ptp_init_owner(pf);
+ if (err)
+ return;
+ }
+
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+ /* Initialize the PTP port Tx timestamp tracker */
+ ice_ptp_init_tx_e810(pf, &pf->ptp.port.tx);
+
+ /* Initialize work functions */
+ kthread_init_delayed_work(&pf->ptp.work, ice_ptp_periodic_work);
+
+ /* Allocate a kworker for handling work required for the ports
+ * connected to the PTP hardware clock.
+ */
+ kworker = kthread_create_worker(0, "ice-ptp-%s", dev_name(dev));
+ if (IS_ERR(kworker)) {
+ err = PTR_ERR(kworker);
+ goto err_kworker;
+ }
+ pf->ptp.kworker = kworker;
+
+ set_bit(ICE_FLAG_PTP, pf->flags);
+
+ /* Start periodic work going */
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0);
+
+ dev_info(dev, "PTP init successful\n");
+ return;
+
+err_kworker:
+ /* If we registered a PTP clock, release it */
+ if (pf->ptp.clock) {
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+ }
+ dev_err(dev, "PTP failed %d\n", err);
+}
+
+/**
+ * ice_ptp_release - Disable the driver/HW support and unregister the clock
+ * @pf: Board private structure
+ *
+ * This function handles the cleanup work required from the initialization by
+ * clearing out the important information and unregistering the clock
+ */
+void ice_ptp_release(struct ice_pf *pf)
+{
+ /* Disable timestamping for both Tx and Rx */
+ ice_ptp_cfg_timestamp(pf, false);
+
+ ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+
+ clear_bit(ICE_FLAG_PTP, pf->flags);
+
+ kthread_cancel_delayed_work_sync(&pf->ptp.work);
+
+ if (pf->ptp.kworker) {
+ kthread_destroy_worker(pf->ptp.kworker);
+ pf->ptp.kworker = NULL;
+ }
+
+ if (!pf->ptp.clock)
+ return;
+
+ ice_clear_ptp_clock_index(pf);
+ ptp_clock_unregister(pf->ptp.clock);
+ pf->ptp.clock = NULL;
+
+ dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
new file mode 100644
index 000000000000..41e14f98f0e6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_H_
+#define _ICE_PTP_H_
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/kthread.h>
+
+#include "ice_ptp_hw.h"
+
+/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
+ * is stored in a buffer of registers. Depending on the specific hardware,
+ * this buffer might be shared across multiple PHY ports.
+ *
+ * On transmit of a packet to be timestamped, software is responsible for
+ * selecting an open index. Hardware makes no attempt to lock or prevent
+ * re-use of an index for multiple packets.
+ *
+ * To handle this, timestamp indexes must be tracked by software to ensure
+ * that an index is not re-used for multiple transmitted packets. The
+ * structures and functions declared in this file track the available Tx
+ * register indexes, as well as provide storage for the SKB pointers.
+ *
+ * To allow multiple ports to access the shared register block independently,
+ * the blocks are split up so that indexes are assigned to each port based on
+ * hardware logical port number.
+ */
+
+/**
+ * struct ice_tx_tstamp - Tracking for a single Tx timestamp
+ * @skb: pointer to the SKB for this timestamp request
+ * @start: jiffies when the timestamp was first requested
+ *
+ * This structure tracks a single timestamp request. The SKB pointer is
+ * provided when initiating a request. The start time is used to ensure that
+ * we discard old requests that were not fulfilled within a 2 second time
+ * window.
+ */
+struct ice_tx_tstamp {
+ struct sk_buff *skb;
+ unsigned long start;
+};
+
+/**
+ * struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
+ * @work: work function to handle processing of Tx timestamps
+ * @lock: lock to prevent concurrent write to in_use bitmap
+ * @tstamps: array of len to store outstanding requests
+ * @in_use: bitmap of len to indicate which slots are in use
+ * @quad: which quad the timestamps are captured in
+ * @quad_offset: offset into timestamp block of the quad to get the real index
+ * @len: length of the tstamps and in_use fields.
+ * @init: if true, the tracker is initialized;
+ */
+struct ice_ptp_tx {
+ struct kthread_work work;
+ spinlock_t lock; /* lock protecting in_use bitmap */
+ struct ice_tx_tstamp *tstamps;
+ unsigned long *in_use;
+ u8 quad;
+ u8 quad_offset;
+ u8 len;
+ u8 init;
+};
+
+/* Quad and port information for initializing timestamp blocks */
+#define INDEX_PER_QUAD 64
+#define INDEX_PER_PORT (INDEX_PER_QUAD / ICE_PORTS_PER_QUAD)
+
+/**
+ * struct ice_ptp_port - data used to initialize an external port for PTP
+ *
+ * This structure contains PTP data related to the external ports. Currently
+ * it is used for tracking the Tx timestamps of a port. In the future this
+ * structure will also hold information for the E822 port initialization
+ * logic.
+ *
+ * @tx: Tx timestamp tracking for this port
+ */
+struct ice_ptp_port {
+ struct ice_ptp_tx tx;
+};
+
+/**
+ * struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @port: data for the PHY port initialization procedure
+ * @work: delayed work function for periodic tasks
+ * @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @kworker: kwork thread for handling periodic work
+ * @info: structure defining PTP hardware capabilities
+ * @clock: pointer to registered PTP clock device
+ * @tstamp_config: hardware timestamping configuration
+ */
+struct ice_ptp {
+ struct ice_ptp_port port;
+ struct kthread_delayed_work work;
+ u64 cached_phc_time;
+ struct kthread_worker *kworker;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ struct hwtstamp_config tstamp_config;
+};
+
+#define __ptp_port_to_ptp(p) \
+ container_of((p), struct ice_ptp, port)
+#define ptp_port_to_pf(p) \
+ container_of(__ptp_port_to_ptp((p)), struct ice_pf, ptp)
+
+#define __ptp_info_to_ptp(i) \
+ container_of((i), struct ice_ptp, info)
+#define ptp_info_to_pf(i) \
+ container_of(__ptp_info_to_ptp((i)), struct ice_pf, ptp)
+
+#define PTP_SHARED_CLK_IDX_VALID BIT(31)
+#define ICE_PTP_TS_VALID BIT(0)
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+struct ice_pf;
+int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
+int ice_get_ptp_clock_index(struct ice_pf *pf);
+
+s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
+void ice_ptp_process_ts(struct ice_pf *pf);
+
+void
+ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
+void ice_ptp_init(struct ice_pf *pf);
+void ice_ptp_release(struct ice_pf *pf);
+#else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+static inline int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
+{
+ return -1;
+}
+
+static inline
+ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
+{
+ return -1;
+}
+
+static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
+static inline void
+ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
+static inline void ice_ptp_init(struct ice_pf *pf) { }
+static inline void ice_ptp_release(struct ice_pf *pf) { }
+#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
+#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
new file mode 100644
index 000000000000..267312fad59a
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -0,0 +1,653 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2021, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_ptp_hw.h"
+
+/* Low level functions for interacting with and managing the device clock used
+ * for the Precision Time Protocol.
+ *
+ * The ice hardware represents the current time using three registers:
+ *
+ * GLTSYN_TIME_H GLTSYN_TIME_L GLTSYN_TIME_R
+ * +---------------+ +---------------+ +---------------+
+ * | 32 bits | | 32 bits | | 32 bits |
+ * +---------------+ +---------------+ +---------------+
+ *
+ * The registers are incremented every clock tick using a 40bit increment
+ * value defined over two registers:
+ *
+ * GLTSYN_INCVAL_H GLTSYN_INCVAL_L
+ * +---------------+ +---------------+
+ * | 8 bit s | | 32 bits |
+ * +---------------+ +---------------+
+ *
+ * The increment value is added to the GLSTYN_TIME_R and GLSTYN_TIME_L
+ * registers every clock source tick. Depending on the specific device
+ * configuration, the clock source frequency could be one of a number of
+ * values.
+ *
+ * For E810 devices, the increment frequency is 812.5 MHz
+ *
+ * The hardware captures timestamps in the PHY for incoming packets, and for
+ * outgoing packets on request. To support this, the PHY maintains a timer
+ * that matches the lower 64 bits of the global source timer.
+ *
+ * In order to ensure that the PHY timers and the source timer are equivalent,
+ * shadow registers are used to prepare the desired initial values. A special
+ * sync command is issued to trigger copying from the shadow registers into
+ * the appropriate source and PHY registers simultaneously.
+ */
+
+/**
+ * ice_get_ptp_src_clock_index - determine source clock index
+ * @hw: pointer to HW struct
+ *
+ * Determine the source clock index currently in use, based on device
+ * capabilities reported during initialization.
+ */
+u8 ice_get_ptp_src_clock_index(struct ice_hw *hw)
+{
+ return hw->func_caps.ts_func_info.tmr_index_assoc;
+}
+
+/* E810 functions
+ *
+ * The following functions operate on the E810 series devices which use
+ * a separate external PHY.
+ */
+
+/**
+ * ice_read_phy_reg_e810 - Read register from external PHY on E810
+ * @hw: pointer to the HW struct
+ * @addr: the address to read from
+ * @val: On return, the value read from the PHY
+ *
+ * Read a register from the external PHY on the E810 device.
+ */
+static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int status;
+
+ msg.msg_addr_low = lower_16_bits(addr);
+ msg.msg_addr_high = upper_16_bits(addr);
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = rmn_0;
+
+ status = ice_sbq_rw_reg(hw, &msg);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
+ status);
+ return status;
+ }
+
+ *val = msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_phy_reg_e810 - Write register on external PHY on E810
+ * @hw: pointer to the HW struct
+ * @addr: the address to writem to
+ * @val: the value to write to the PHY
+ *
+ * Write a value to a register of the external PHY on the E810 device.
+ */
+static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input msg = {0};
+ int status;
+
+ msg.msg_addr_low = lower_16_bits(addr);
+ msg.msg_addr_high = upper_16_bits(addr);
+ msg.opcode = ice_sbq_msg_wr;
+ msg.dest_dev = rmn_0;
+ msg.data = val;
+
+ status = ice_sbq_rw_reg(hw, &msg);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block of the external PHY
+ * on the E810 device.
+ */
+static int
+ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
+{
+ u32 lo_addr, hi_addr, lo, hi;
+ int status;
+
+ lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+
+ status = ice_read_phy_reg_e810(hw, lo_addr, &lo);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_read_phy_reg_e810(hw, hi_addr, &hi);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ /* For E810 devices, the timestamp is reported with the lower 32 bits
+ * in the low register, and the upper 8 bits in the high register.
+ */
+ *tstamp = ((u64)hi) << TS_HIGH_S | ((u64)lo & TS_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_phy_tstamp_e810 - Clear a timestamp from the external PHY
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the timestamp block of the
+ * external PHY on the E810 device.
+ */
+static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
+{
+ u32 lo_addr, hi_addr;
+ int status;
+
+ lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+
+ status = ice_write_phy_reg_e810(hw, lo_addr, 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, hi_addr, 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
+ * @hw: pointer to HW struct
+ *
+ * Enable the timesync PTP functionality for the external PHY connected to
+ * this function.
+ */
+int ice_ptp_init_phy_e810(struct ice_hw *hw)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
+ GLTSYN_ENA_TSYN_ENA_M);
+ if (status)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed in ena_phy_time_syn %d\n",
+ status);
+
+ return status;
+}
+
+/**
+ * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
+ * @hw: Board private structure
+ * @time: Time to initialize the PHY port clock to
+ *
+ * Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
+ * initial clock time. The time will not actually be programmed until the
+ * driver issues an INIT_TIME command.
+ *
+ * The time value is the upper 32 bits of the PHY timer, usually in units of
+ * nominal nanoseconds.
+ */
+static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_0(tmr_idx), 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_0, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHTIME_L(tmr_idx), time);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write SHTIME_L, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_e810 - Prep PHY port for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment value to program
+ *
+ * Prepare the PHY port for an atomic adjustment by programming the PHY
+ * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
+ * is completed by issuing an ADJ_TIME sync command.
+ *
+ * The adjustment value only contains the portion used for the upper 32bits of
+ * the PHY timer, usually in units of nominal nanoseconds. Negative
+ * adjustments are supported using 2s complement arithmetic.
+ */
+static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Adjustments are represented as signed 2's complement values in
+ * nanoseconds. Sub-nanosecond adjustment is not supported.
+ */
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), 0);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_L, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), adj);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write adj to PHY SHADJ_H, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_e810 - Prep PHY port increment value change
+ * @hw: pointer to HW struct
+ * @incval: The new 40bit increment value to prepare
+ *
+ * Prepare the PHY port for a new increment value by programming the PHY
+ * ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
+ * completed by issuing an INIT_INCVAL command.
+ */
+static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
+{
+ u32 high, low;
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ low = lower_32_bits(incval);
+ high = upper_32_bits(incval);
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_L(tmr_idx), low);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval to PHY SHADJ_L, status %d\n",
+ status);
+ return status;
+ }
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_SHADJ_H(tmr_idx), high);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval PHY SHADJ_H, status %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd_e810 - Prepare all external PHYs for a timer command
+ * @hw: pointer to HW struct
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the external PHYs connected to this device for a timer sync
+ * command.
+ */
+static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, val;
+ int status;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val = GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val = GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_TIME;
+ break;
+ case READ_TIME:
+ cmd_val = GLTSYN_CMD_READ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ }
+
+ /* Read, modify, write */
+ status = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, status %d\n", status);
+ return status;
+ }
+
+ /* Modify necessary bits only and perform write */
+ val &= ~TS_CMD_MASK_E810;
+ val |= cmd_val;
+
+ status = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, status %d\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+/* Device agnostic functions
+ *
+ * The following functions implement useful behavior to hide the differences
+ * between E810 and other devices. They call the device-specific
+ * implementations where necessary.
+ *
+ * Currently, the driver only supports E810, but future work will enable
+ * support for E822-based devices.
+ */
+
+/**
+ * ice_ptp_lock - Acquire PTP global semaphore register lock
+ * @hw: pointer to the HW struct
+ *
+ * Acquire the global PTP hardware semaphore lock. Returns true if the lock
+ * was acquired, false otherwise.
+ *
+ * The PFTSYN_SEM register sets the busy bit on read, returning the previous
+ * value. If software sees the busy bit cleared, this means that this function
+ * acquired the lock (and the busy bit is now set). If software sees the busy
+ * bit set, it means that another function acquired the lock.
+ *
+ * Software must clear the busy bit with a write to release the lock for other
+ * functions when done.
+ */
+bool ice_ptp_lock(struct ice_hw *hw)
+{
+ u32 hw_lock;
+ int i;
+
+#define MAX_TRIES 5
+
+ for (i = 0; i < MAX_TRIES; i++) {
+ hw_lock = rd32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ hw_lock = hw_lock & PFTSYN_SEM_BUSY_M;
+ if (hw_lock) {
+ /* Somebody is holding the lock */
+ usleep_range(10000, 20000);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return !hw_lock;
+}
+
+/**
+ * ice_ptp_unlock - Release PTP global semaphore register lock
+ * @hw: pointer to the HW struct
+ *
+ * Release the global PTP hardware semaphore lock. This is done by writing to
+ * the PFTSYN_SEM register.
+ */
+void ice_ptp_unlock(struct ice_hw *hw)
+{
+ wr32(hw, PFTSYN_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), 0);
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val;
+ u8 tmr_idx;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+ cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ switch (cmd) {
+ case INIT_TIME:
+ cmd_val |= GLTSYN_CMD_INIT_TIME;
+ break;
+ case INIT_INCVAL:
+ cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ADJ_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ADJ_TIME_AT_TIME:
+ cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case READ_TIME:
+ cmd_val |= GLTSYN_CMD_READ_TIME;
+ break;
+ }
+
+ wr32(hw, GLTSYN_CMD, cmd_val);
+}
+
+/**
+ * ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
+ * @hw: pointer to HW struct
+ * @cmd: the command to issue
+ *
+ * Prepare the source timer and PHY timers and then trigger the requested
+ * command. This causes the shadow registers previously written in preparation
+ * for the command to be synchronously applied to both the source and PHY
+ * timers.
+ */
+static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ int status;
+
+ /* First, prepare the source timer */
+ ice_ptp_src_cmd(hw, cmd);
+
+ /* Next, prepare the ports */
+ status = ice_ptp_port_cmd_e810(hw, cmd);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, status %d\n",
+ cmd, status);
+ return status;
+ }
+
+ /* Write the sync command register to drive both source and PHY timer commands
+ * synchronously
+ */
+ wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_init_time - Initialize device time to provided value
+ * @hw: pointer to HW struct
+ * @time: 64bits of time (GLTSYN_TIME_L and GLTSYN_TIME_H)
+ *
+ * Initialize the device to the specified time provided. This requires a three
+ * step process:
+ *
+ * 1) write the new init time to the source timer shadow registers
+ * 2) write the new init time to the PHY timer shadow registers
+ * 3) issue an init_time timer command to synchronously switch both the source
+ * and port timers to the new init time value at the next clock cycle.
+ */
+int ice_ptp_init_time(struct ice_hw *hw, u64 time)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Source timers */
+ wr32(hw, GLTSYN_SHTIME_L(tmr_idx), lower_32_bits(time));
+ wr32(hw, GLTSYN_SHTIME_H(tmr_idx), upper_32_bits(time));
+ wr32(hw, GLTSYN_SHTIME_0(tmr_idx), 0);
+
+ /* PHY timers */
+ /* Fill Rx and Tx ports and send msg to PHY */
+ status = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
+ if (status)
+ return status;
+
+ return ice_ptp_tmr_cmd(hw, INIT_TIME);
+}
+
+/**
+ * ice_ptp_write_incval - Program PHC with new increment value
+ * @hw: pointer to HW struct
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Program the PHC with a new increment value. This requires a three-step
+ * process:
+ *
+ * 1) Write the increment value to the source timer shadow registers
+ * 2) Write the increment value to the PHY timer shadow registers
+ * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
+ * source and port timers to the new increment value at the next clock
+ * cycle.
+ */
+int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Shadow Adjust */
+ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
+ wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
+
+ status = ice_ptp_prep_phy_incval_e810(hw, incval);
+ if (status)
+ return status;
+
+ return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
+}
+
+/**
+ * ice_ptp_write_incval_locked - Program new incval while holding semaphore
+ * @hw: pointer to HW struct
+ * @incval: Source timer increment value per clock cycle
+ *
+ * Program a new PHC incval while holding the PTP semaphore.
+ */
+int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
+{
+ int status;
+
+ if (!ice_ptp_lock(hw))
+ return -EBUSY;
+
+ status = ice_ptp_write_incval(hw, incval);
+
+ ice_ptp_unlock(hw);
+
+ return status;
+}
+
+/**
+ * ice_ptp_adj_clock - Adjust PHC clock time atomically
+ * @hw: pointer to HW struct
+ * @adj: Adjustment in nanoseconds
+ *
+ * Perform an atomic adjustment of the PHC time by the specified number of
+ * nanoseconds. This requires a three-step process:
+ *
+ * 1) Write the adjustment to the source timer shadow registers
+ * 2) Write the adjustment to the PHY timer shadow registers
+ * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
+ * both the source and port timers at the next clock cycle.
+ */
+int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
+{
+ int status;
+ u8 tmr_idx;
+
+ tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+
+ /* Write the desired clock adjustment into the GLTSYN_SHADJ register.
+ * For an ADJ_TIME command, this set of registers represents the value
+ * to add to the clock time. It supports subtraction by interpreting
+ * the value as a 2's complement integer.
+ */
+ wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
+ wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
+
+ status = ice_ptp_prep_phy_adj_e810(hw, adj);
+ if (status)
+ return status;
+
+ return ice_ptp_tmr_cmd(hw, ADJ_TIME);
+}
+
+/**
+ * ice_read_phy_tstamp - Read a PHY timestamp from the timestamo block
+ * @hw: pointer to the HW struct
+ * @block: the block to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the timestamp block.
+ */
+int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
+{
+ return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
+}
+
+/**
+ * ice_clear_phy_tstamp - Clear a timestamp from the timestamp block
+ * @hw: pointer to the HW struct
+ * @block: the block to read from
+ * @idx: the timestamp index to reset
+ *
+ * Clear a timestamp, resetting its valid bit, from the timestamp block.
+ */
+int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
+{
+ return ice_clear_phy_tstamp_e810(hw, block, idx);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
new file mode 100644
index 000000000000..55a414e87018
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_PTP_HW_H_
+#define _ICE_PTP_HW_H_
+
+enum ice_ptp_tmr_cmd {
+ INIT_TIME,
+ INIT_INCVAL,
+ ADJ_TIME,
+ ADJ_TIME_AT_TIME,
+ READ_TIME
+};
+
+/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
+ * the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
+ */
+#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+
+/* Device agnostic functions */
+u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
+bool ice_ptp_lock(struct ice_hw *hw);
+void ice_ptp_unlock(struct ice_hw *hw);
+int ice_ptp_init_time(struct ice_hw *hw, u64 time);
+int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
+int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
+int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
+int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
+int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
+
+/* E810 family functions */
+int ice_ptp_init_phy_e810(struct ice_hw *hw);
+
+#define PFTSYN_SEM_BYTES 4
+
+/* PHY timer commands */
+#define SEL_CPK_SRC 8
+
+/* Time Sync command Definitions */
+#define GLTSYN_CMD_INIT_TIME BIT(0)
+#define GLTSYN_CMD_INIT_INCVAL BIT(1)
+#define GLTSYN_CMD_ADJ_TIME BIT(2)
+#define GLTSYN_CMD_ADJ_INIT_TIME (BIT(2) | BIT(3))
+#define GLTSYN_CMD_READ_TIME BIT(7)
+
+#define TS_CMD_MASK_E810 0xFF
+#define SYNC_EXEC_CMD 0x3
+
+/* E810 timesync enable register */
+#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
+
+/* E810 shadow init time registers */
+#define ETH_GLTSYN_SHTIME_0(i) (0x03000368 + ((i) * 32))
+#define ETH_GLTSYN_SHTIME_L(i) (0x0300036C + ((i) * 32))
+
+/* E810 shadow time adjust registers */
+#define ETH_GLTSYN_SHADJ_L(_i) (0x03000378 + ((_i) * 32))
+#define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32))
+
+/* E810 timer command register */
+#define ETH_GLTSYN_CMD 0x03000344
+
+/* Source timer incval macros */
+#define INCVAL_HIGH_M 0xFF
+
+/* Timestamp block macros */
+#define TS_LOW_M 0xFFFFFFFF
+#define TS_HIGH_S 32
+
+#define BYTES_PER_IDX_ADDR_L_U 8
+
+/* External PHY timestamp address */
+#define TS_EXT(a, port, idx) ((a) + (0x1000 * (port)) + \
+ ((idx) * BYTES_PER_IDX_ADDR_L_U))
+
+#define LOW_TX_MEMORY_BANK_START 0x03090000
+#define HIGH_TX_MEMORY_BANK_START 0x03090004
+
+#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
new file mode 100644
index 000000000000..ead75fe2bcda
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021, Intel Corporation. */
+
+#ifndef _ICE_SBQ_CMD_H_
+#define _ICE_SBQ_CMD_H_
+
+/* This header file defines the Sideband Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+/* Sideband Queue command structure and opcodes */
+enum ice_sbq_opc {
+ /* Sideband Queue commands */
+ ice_sbq_opc_neigh_dev_req = 0x0C00,
+ ice_sbq_opc_neigh_dev_ev = 0x0C01
+};
+
+/* Sideband Queue descriptor. Indirect command
+ * and non posted
+ */
+struct ice_sbq_cmd_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+
+ /* Opaque message data */
+ __le32 cookie_high;
+ __le32 cookie_low;
+
+ union {
+ __le16 cmd_len;
+ __le16 cmpl_len;
+ } param0;
+
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_sbq_evt_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+ u8 data[24];
+};
+
+enum ice_sbq_msg_dev {
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06
+};
+
+enum ice_sbq_msg_opcode {
+ ice_sbq_msg_rd = 0x00,
+ ice_sbq_msg_wr = 0x01
+};
+
+#define ICE_SBQ_MSG_FLAGS 0x40
+#define ICE_SBQ_MSG_SBE_FBE 0x0F
+
+struct ice_sbq_msg_req {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ u8 sbe_fbe;
+ u8 func_id;
+ __le16 msg_addr_low;
+ __le32 msg_addr_high;
+ __le32 data;
+};
+
+struct ice_sbq_msg_cmpl {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ __le32 data;
+};
+
+/* Internal struct */
+struct ice_sbq_msg_input {
+ u8 dest_dev;
+ u8 opcode;
+ u16 msg_addr_low;
+ u32 msg_addr_high;
+ u32 data;
+};
+#endif /* _ICE_SBQ_CMD_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 2f097637e405..a17e24e54cf3 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -596,6 +596,50 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
}
/**
+ * ice_alloc_rdma_q_ctx - allocate RDMA queue contexts for the given VSI and TC
+ * @hw: pointer to the HW struct
+ * @vsi_handle: VSI handle
+ * @tc: TC number
+ * @new_numqs: number of queues
+ */
+static enum ice_status
+ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+ struct ice_q_ctx *q_ctx;
+
+ vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!vsi_ctx)
+ return ICE_ERR_PARAM;
+ /* allocate RDMA queue contexts */
+ if (!vsi_ctx->rdma_q_ctx[tc]) {
+ vsi_ctx->rdma_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
+ new_numqs,
+ sizeof(*q_ctx),
+ GFP_KERNEL);
+ if (!vsi_ctx->rdma_q_ctx[tc])
+ return ICE_ERR_NO_MEMORY;
+ vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+ return 0;
+ }
+ /* num queues are increased, update the queue contexts */
+ if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
+ u16 prev_num = vsi_ctx->num_rdma_q_entries[tc];
+
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
+ return ICE_ERR_NO_MEMORY;
+ memcpy(q_ctx, vsi_ctx->rdma_q_ctx[tc],
+ prev_num * sizeof(*q_ctx));
+ devm_kfree(ice_hw_to_dev(hw), vsi_ctx->rdma_q_ctx[tc]);
+ vsi_ctx->rdma_q_ctx[tc] = q_ctx;
+ vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
+ }
+ return 0;
+}
+
+/**
* ice_aq_rl_profile - performs a rate limiting task
* @hw: pointer to the HW struct
* @opcode: opcode for add, query, or remove profile(s)
@@ -1774,13 +1818,22 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
if (!vsi_ctx)
return ICE_ERR_PARAM;
- prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ prev_numqs = vsi_ctx->sched.max_lanq[tc];
+ else
+ prev_numqs = vsi_ctx->sched.max_rdmaq[tc];
/* num queues are not changed or less than the previous number */
if (new_numqs <= prev_numqs)
return status;
- status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
- if (status)
- return status;
+ if (owner == ICE_SCHED_NODE_OWNER_LAN) {
+ status = ice_alloc_lan_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+ } else {
+ status = ice_alloc_rdma_q_ctx(hw, vsi_handle, tc, new_numqs);
+ if (status)
+ return status;
+ }
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
@@ -1795,7 +1848,10 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
new_num_nodes, owner);
if (status)
return status;
- vsi_ctx->sched.max_lanq[tc] = new_numqs;
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[tc] = new_numqs;
+ else
+ vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
return 0;
}
@@ -1861,6 +1917,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
* recreate the child nodes all the time in these cases.
*/
vsi_ctx->sched.max_lanq[tc] = 0;
+ vsi_ctx->sched.max_rdmaq[tc] = 0;
}
/* update the VSI child nodes */
@@ -1990,6 +2047,8 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi_ctx->sched.max_lanq[i] = 0;
+ else
+ vsi_ctx->sched.max_rdmaq[i] = 0;
}
status = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 357d3073d814..3b6c1420aa7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */
+#include "ice_lib.h"
#include "ice_switch.h"
#define ICE_ETH_DA_OFFSET 0
@@ -302,6 +303,10 @@ static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
vsi->lan_q_ctx[i] = NULL;
}
+ if (vsi->rdma_q_ctx[i]) {
+ devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
+ vsi->rdma_q_ctx[i] = NULL;
+ }
}
}
@@ -423,6 +428,29 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
}
/**
+ * ice_cfg_rdma_fltr - enable/disable RDMA filtering on VSI
+ * @hw: pointer to HW struct
+ * @vsi_handle: VSI SW index
+ * @enable: boolean for enable/disable
+ */
+int
+ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
+{
+ struct ice_vsi_ctx *ctx;
+
+ ctx = ice_get_vsi_ctx(hw, vsi_handle);
+ if (!ctx)
+ return -EIO;
+
+ if (enable)
+ ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+ else
+ ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
+
+ return ice_status_to_errno(ice_update_vsi(hw, vsi_handle, ctx, NULL));
+}
+
+/**
* ice_aq_alloc_free_vsi_list
* @hw: pointer to the HW struct
* @vsi_list_id: VSI list ID returned or used for lookup
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 8b4f9d35c860..6bb7358ff67b 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -26,6 +26,8 @@ struct ice_vsi_ctx {
u8 vf_num;
u16 num_lan_q_entries[ICE_MAX_TRAFFIC_CLASS];
struct ice_q_ctx *lan_q_ctx[ICE_MAX_TRAFFIC_CLASS];
+ u16 num_rdma_q_entries[ICE_MAX_TRAFFIC_CLASS];
+ struct ice_q_ctx *rdma_q_ctx[ICE_MAX_TRAFFIC_CLASS];
};
enum ice_sw_fwd_act_type {
@@ -223,6 +225,8 @@ enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
+int
+ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index e2b4b29ea207..917eba7fdd0c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog)
{
struct ice_ring *xdp_ring;
- int err;
+ int err, result;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
return ICE_XDP_PASS;
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- return ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
+ return result;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ return ICE_XDP_REDIR;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -2131,6 +2137,41 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
}
/**
+ * ice_tstamp - set up context descriptor for hardware timestamp
+ * @tx_ring: pointer to the Tx ring to send buffer on
+ * @skb: pointer to the SKB we're sending
+ * @first: Tx buffer
+ * @off: Tx offload parameters
+ */
+static void
+ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
+ struct ice_tx_buf *first, struct ice_tx_offload_params *off)
+{
+ s8 idx;
+
+ /* only timestamp the outbound packet if the user has requested it */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ return;
+
+ if (!tx_ring->ptp_tx)
+ return;
+
+ /* Tx timestamps cannot be sampled when doing TSO */
+ if (first->tx_flags & ICE_TX_FLAGS_TSO)
+ return;
+
+ /* Grab an open timestamp slot */
+ idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
+ if (idx < 0)
+ return;
+
+ off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
+ (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
+ ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
+ first->tx_flags |= ICE_TX_FLAGS_TSYN;
+}
+
+/**
* ice_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer
* @tx_ring: ring to send buffer on
@@ -2143,6 +2184,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
struct ice_tx_buf *first;
+ struct ethhdr *eth;
unsigned int count;
int tso, csum;
@@ -2189,13 +2231,17 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
goto out_drop;
/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
- if (unlikely(skb->priority == TC_PRIO_CONTROL &&
+ eth = (struct ethhdr *)skb_mac_header(skb);
+ if (unlikely((skb->priority == TC_PRIO_CONTROL ||
+ eth->h_proto == htons(ETH_P_LLDP)) &&
vsi->type == ICE_VSI_PF &&
vsi->port_info->qos_cfg.is_sw_lldp))
offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
ICE_TX_CTX_DESC_SWTCH_UPLINK <<
ICE_TXD_CTX_QW1_CMD_S);
+ ice_tstamp(tx_ring, skb, first, &offload);
+
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
u16 i = tx_ring->next_to_use;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index c5a92ac787d6..1e46e80f3d6f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -118,6 +118,7 @@ static inline int ice_skb_pad(void)
* freed instead of returned like skb packets.
*/
#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+#define ICE_TX_FLAGS_TSYN BIT(4)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
#define ICE_TX_FLAGS_TUNNEL BIT(7)
@@ -311,6 +312,10 @@ struct ice_ring {
u32 txq_teid; /* Added Tx queue TEID */
u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
+ struct ice_ptp_tx *tx_tstamps;
+ u64 cached_phctime;
+ u8 ptp_rx:1;
+ u8 ptp_tx:1;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 207f6ee3a7f6..166cf25d1139 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -175,6 +175,9 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
+
+ if (rx_ring->ptp_rx)
+ ice_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4474dd6a7ba1..d33d1906103c 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -14,6 +14,7 @@
#include "ice_lan_tx_rx.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
+#include "ice_sbq_cmd.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -45,8 +46,10 @@ static inline u32 ice_round_to_num(u32 N, u32 R)
#define ICE_DBG_FLOW BIT_ULL(9)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
+#define ICE_DBG_RDMA BIT_ULL(15)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
+#define ICE_DBG_PTP BIT_ULL(19)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
@@ -63,7 +66,7 @@ enum ice_aq_res_ids {
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
-#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+#define ICE_GLOBAL_CFG_LOCK_TIMEOUT 5000
enum ice_aq_res_access_type {
ICE_RES_READ = 1,
@@ -146,6 +149,7 @@ struct ice_link_status {
u16 max_frame_size;
u16 link_speed;
u16 req_speeds;
+ u8 link_cfg_err;
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
@@ -262,6 +266,8 @@ struct ice_hw_common_caps {
u8 rss_table_entry_width; /* RSS Entry width in bits */
u8 dcb;
+ u8 ieee_1588;
+ u8 rdma;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -273,6 +279,54 @@ struct ice_hw_common_caps {
#define ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
};
+/* IEEE 1588 TIME_SYNC specific info */
+/* Function specific definitions */
+#define ICE_TS_FUNC_ENA_M BIT(0)
+#define ICE_TS_SRC_TMR_OWND_M BIT(1)
+#define ICE_TS_TMR_ENA_M BIT(2)
+#define ICE_TS_TMR_IDX_OWND_S 4
+#define ICE_TS_TMR_IDX_OWND_M BIT(4)
+#define ICE_TS_CLK_FREQ_S 16
+#define ICE_TS_CLK_FREQ_M ICE_M(0x7, ICE_TS_CLK_FREQ_S)
+#define ICE_TS_CLK_SRC_S 20
+#define ICE_TS_CLK_SRC_M BIT(20)
+#define ICE_TS_TMR_IDX_ASSOC_S 24
+#define ICE_TS_TMR_IDX_ASSOC_M BIT(24)
+
+struct ice_ts_func_info {
+ /* Function specific info */
+ u32 clk_freq;
+ u8 clk_src;
+ u8 tmr_index_assoc;
+ u8 ena;
+ u8 tmr_index_owned;
+ u8 src_tmr_owned;
+ u8 tmr_ena;
+};
+
+/* Device specific definitions */
+#define ICE_TS_TMR0_OWNR_M 0x7
+#define ICE_TS_TMR0_OWND_M BIT(3)
+#define ICE_TS_TMR1_OWNR_S 4
+#define ICE_TS_TMR1_OWNR_M ICE_M(0x7, ICE_TS_TMR1_OWNR_S)
+#define ICE_TS_TMR1_OWND_M BIT(7)
+#define ICE_TS_DEV_ENA_M BIT(24)
+#define ICE_TS_TMR0_ENA_M BIT(25)
+#define ICE_TS_TMR1_ENA_M BIT(26)
+
+struct ice_ts_dev_info {
+ /* Device specific info */
+ u32 ena_ports;
+ u32 tmr_own_map;
+ u32 tmr0_owner;
+ u32 tmr1_owner;
+ u8 tmr0_owned;
+ u8 tmr1_owned;
+ u8 ena;
+ u8 tmr0_ena;
+ u8 tmr1_ena;
+};
+
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
@@ -281,6 +335,7 @@ struct ice_hw_func_caps {
u32 guar_num_vsi;
u32 fd_fltr_guar; /* Number of filters guaranteed */
u32 fd_fltr_best_effort; /* Number of best effort filters */
+ struct ice_ts_func_info ts_func_info;
};
/* Device wide capabilities */
@@ -289,6 +344,7 @@ struct ice_hw_dev_caps {
u32 num_vfs_exposed; /* Total number of VFs exposed */
u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
u32 num_flow_director_fltr; /* Number of FD filters available */
+ struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
};
@@ -440,6 +496,7 @@ struct ice_sched_node {
u8 tc_num;
u8 owner;
#define ICE_SCHED_NODE_OWNER_LAN 0
+#define ICE_SCHED_NODE_OWNER_RDMA 2
};
/* Access Macros for Tx Sched Elements data */
@@ -511,6 +568,7 @@ struct ice_sched_vsi_info {
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
+ u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */
@@ -749,6 +807,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info sbq;
struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
@@ -784,6 +843,14 @@ struct ice_hw {
u8 ucast_shared; /* true if VSIs can share unicast addr */
+#define ICE_PHY_PER_NAC 1
+#define ICE_MAX_QUAD 2
+#define ICE_NUM_QUAD_TYPE 2
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PHY_0_LAST_QUAD 1
+#define ICE_PORTS_PER_PHY 8
+#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
u32 active_track_id;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index a1d22d2aa0bd..6392e0b31b90 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -713,13 +713,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
*/
clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it
- * in the case of VFR. If this is done for PFR, it can mess up VF
- * resets because the VF driver may already have started cleanup
- * by the time we get here.
+ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver
+ * needs to clear them in the case of VFR/VFLR. If this is done for
+ * PFR, it can mess up VF resets because the VF driver may already
+ * have started cleanup by the time we get here.
*/
- if (!is_pfr)
+ if (!is_pfr) {
wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0);
+ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0);
+ }
/* In the case of a VFLR, the HW has already reset the VF and we
* just need to clean up, so don't hit the VFRTRIG register.
@@ -937,16 +939,18 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
vf->num_mac++;
- if (is_valid_ether_addr(vf->dflt_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->dflt_lan_addr.addr,
+ if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
ICE_FWD_TO_VSI);
if (status) {
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %s\n",
- &vf->dflt_lan_addr.addr[0], vf->vf_id,
+ &vf->hw_lan_addr.addr[0], vf->vf_id,
ice_stat_str(status));
return ice_status_to_errno(status);
}
vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
}
return 0;
@@ -1698,7 +1702,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_vf_ctrl_vsi_release(vf);
ice_vf_pre_vsi_rebuild(vf);
- ice_vf_rebuild_vsi_with_release(vf);
+
+ if (ice_vf_rebuild_vsi_with_release(vf)) {
+ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id);
+ return false;
+ }
+
ice_vf_post_vsi_rebuild(vf);
/* if the VF has been reset allow it to come up again */
@@ -2379,7 +2388,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
- vf->dflt_lan_addr.addr);
+ vf->hw_lan_addr.addr);
/* match guest capabilities */
vf->driver_caps = vfres->vf_cap_flags;
@@ -3535,10 +3544,9 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- u16 num_rxq = 0, num_txq = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- int i;
+ int i, q_idx;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3576,18 +3584,31 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+
+ q_idx = qpi->rxq.queue_id;
+
+ /* make sure selected "q_idx" is in valid range of queues
+ * for selected "vsi"
+ */
+ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
/* copy Tx queue info from VF into VSI */
if (qpi->txq.ring_len > 0) {
- num_txq++;
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
+ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
}
/* copy Rx queue info from VF into VSI */
if (qpi->rxq.ring_len > 0) {
u16 max_frame_size = ice_vc_get_max_frame_size(vf);
- num_rxq++;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
@@ -3604,27 +3625,20 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- }
-
- vsi->max_frame = qpi->rxq.max_pkt_size;
- /* add space for the port VLAN since the VF driver is not
- * expected to account for it in the MTU calculation
- */
- if (vf->port_vlan_info)
- vsi->max_frame += VLAN_HLEN;
- }
- /* VF can request to configure less than allocated queues or default
- * allocated queues. So update the VSI with new number
- */
- vsi->num_txq = num_txq;
- vsi->num_rxq = num_rxq;
- /* All queues of VF VSI are in TC 0 */
- vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
- vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
+ vsi->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is not
+ * expected to account for it in the MTU calculation
+ */
+ if (vf->port_vlan_info)
+ vsi->max_frame += VLAN_HLEN;
- if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
- v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+ }
+ }
error_param:
/* send the response to the VF */
@@ -3660,19 +3674,95 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
}
/**
+ * ice_vc_ether_addr_type - get type of virtchnl_ether_addr
+ * @vc_ether_addr: used to extract the type
+ */
+static u8
+ice_vc_ether_addr_type(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ return (vc_ether_addr->type & VIRTCHNL_ETHER_ADDR_TYPE_MASK);
+}
+
+/**
+ * ice_is_vc_addr_legacy - check if the MAC address is from an older VF
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ */
+static bool
+ice_is_vc_addr_legacy(struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_LEGACY);
+}
+
+/**
+ * ice_is_vc_addr_primary - check if the MAC address is the VF's primary MAC
+ * @vc_ether_addr: VIRTCHNL structure that contains MAC and type
+ *
+ * This function should only be called when the MAC address in
+ * virtchnl_ether_addr is a valid unicast MAC
+ */
+static bool
+ice_is_vc_addr_primary(struct virtchnl_ether_addr __maybe_unused *vc_ether_addr)
+{
+ u8 type = ice_vc_ether_addr_type(vc_ether_addr);
+
+ return (type == VIRTCHNL_ETHER_ADDR_PRIMARY);
+}
+
+/**
+ * ice_vfhw_mac_add - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to add
+ */
+static void
+ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return;
+
+ /* only allow legacy VF drivers to set the device and hardware MAC if it
+ * is zero and allow new VF drivers to set the hardware MAC if the type
+ * was correctly specified over VIRTCHNL
+ */
+ if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
+ is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
+ ice_is_vc_addr_primary(vc_ether_addr)) {
+ ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
+ }
+
+ /* hardware and device MACs are already set, but its possible that the
+ * VF driver sent the VIRTCHNL_OP_ADD_ETH_ADDR message before the
+ * VIRTCHNL_OP_DEL_ETH_ADDR when trying to update its MAC, so save it
+ * away for the legacy VF driver case as it will be updated in the
+ * delete flow for this case
+ */
+ if (ice_is_vc_addr_legacy(vc_ether_addr)) {
+ ether_addr_copy(vf->legacy_last_added_umac.addr,
+ mac_addr);
+ vf->legacy_last_added_umac.time_modified = jiffies;
+ }
+}
+
+/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
- * @mac_addr: MAC address to add
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to add MAC
*/
static int
-ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
{
struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
enum ice_status status;
- /* default unicast MAC already added */
- if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ /* device MAC already added */
+ if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
return 0;
if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
@@ -3691,12 +3781,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EIO;
}
- /* Set the default LAN address to the latest unicast MAC address added
- * by the VF. The default LAN address is reported by the PF via
- * ndo_get_vf_config.
- */
- if (is_unicast_ether_addr(mac_addr))
- ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
+ ice_vfhw_mac_add(vf, vc_ether_addr);
vf->num_mac++;
@@ -3704,19 +3789,65 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
}
/**
+ * ice_is_legacy_umac_expired - check if last added legacy unicast MAC expired
+ * @last_added_umac: structure used to check expiration
+ */
+static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
+{
+#define ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME msecs_to_jiffies(3000)
+ return time_is_before_jiffies(last_added_umac->time_modified +
+ ICE_LEGACY_VF_MAC_CHANGE_EXPIRE_TIME);
+}
+
+/**
+ * ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to delete
+ */
+static void
+ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
+{
+ u8 *mac_addr = vc_ether_addr->addr;
+
+ if (!is_valid_ether_addr(mac_addr) ||
+ !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ return;
+
+ /* allow the device MAC to be repopulated in the add flow and don't
+ * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
+ * to be persistent on VM reboot and across driver unload/load, which
+ * won't work if we clear the hardware MAC here
+ */
+ eth_zero_addr(vf->dev_lan_addr.addr);
+
+ /* only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+ if (ice_is_vc_addr_legacy(vc_ether_addr) &&
+ !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
+ ether_addr_copy(vf->dev_lan_addr.addr,
+ vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr.addr,
+ vf->legacy_last_added_umac.addr);
+ }
+}
+
+/**
* ice_vc_del_mac_addr - attempt to delete the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
- * @mac_addr: MAC address to delete
+ * @vc_ether_addr: VIRTCHNL MAC address structure used to delete MAC
*/
static int
-ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
+ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *vc_ether_addr)
{
struct device *dev = ice_pf_to_dev(vf->pf);
+ u8 *mac_addr = vc_ether_addr->addr;
enum ice_status status;
if (!ice_can_vf_change_mac(vf) &&
- ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
+ ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
@@ -3730,8 +3861,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
return -EIO;
}
- if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
- eth_zero_addr(vf->dflt_lan_addr.addr);
+ ice_vfhw_mac_del(vf, vc_ether_addr);
vf->num_mac--;
@@ -3750,7 +3880,8 @@ static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
int (*ice_vc_cfg_mac)
- (struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
+ (struct ice_vf *vf, struct ice_vsi *vsi,
+ struct virtchnl_ether_addr *virtchnl_ether_addr);
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
@@ -3799,7 +3930,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
is_zero_ether_addr(mac_addr))
continue;
- result = ice_vc_cfg_mac(vf, vsi, mac_addr);
+ result = ice_vc_cfg_mac(vf, vsi, &al->list[i]);
if (result == -EEXIST || result == -ENOENT) {
continue;
} else if (result) {
@@ -4437,7 +4568,7 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
return -EBUSY;
ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->dflt_lan_addr.addr);
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
/* VF configuration for VLAN and applicable QoS */
ivi->vlan = vf->port_vlan_info & VLAN_VID_MASK;
@@ -4513,7 +4644,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
vf = &pf->vf[vf_id];
/* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dflt_lan_addr.addr, mac))
+ if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr.addr, mac))
return 0;
ret = ice_check_vf_ready_for_cfg(vf);
@@ -4529,7 +4661,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* VF is notified of its new MAC via the PF's response to the
* VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- ether_addr_copy(vf->dflt_lan_addr.addr, mac);
+ ether_addr_copy(vf->dev_lan_addr.addr, mac);
+ ether_addr_copy(vf->hw_lan_addr.addr, mac);
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
@@ -4682,7 +4815,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dflt_lan_addr.addr,
+ vf->dev_lan_addr.addr,
test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
? "on" : "off");
}
@@ -4726,7 +4859,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
vf->mdd_tx_events.count, hw->pf_id, i,
- vf->dflt_lan_addr.addr);
+ vf->dev_lan_addr.addr);
}
}
}
@@ -4816,7 +4949,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
if (pf_vsi)
dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dflt_lan_addr.addr[0],
+ &vf->dev_lan_addr.addr[0],
pf_vsi->netdev->dev_addr);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index d800ed83d6c3..842cb077df86 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -58,6 +58,11 @@ enum ice_virtchnl_cap {
ICE_VIRTCHNL_VF_CAP_PRIVILEGE,
};
+struct ice_time_mac {
+ unsigned long time_modified;
+ u8 addr[ETH_ALEN];
+};
+
/* VF MDD events print structure */
struct ice_mdd_vf_events {
u16 count; /* total count of Rx|Tx events */
@@ -78,7 +83,9 @@ struct ice_vf {
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dflt_lan_addr;
+ struct virtchnl_ether_addr dev_lan_addr;
+ struct virtchnl_ether_addr hw_lan_addr;
+ struct ice_time_mac legacy_last_added_umac;
DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
u16 port_vlan_info; /* Port VLAN ID and QoS */
@@ -151,16 +158,18 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen);
bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id);
#else /* CONFIG_PCI_IOV */
-#define ice_process_vflr_event(pf) do {} while (0)
-#define ice_free_vfs(pf) do {} while (0)
-#define ice_vc_process_vf_msg(pf, event) do {} while (0)
-#define ice_vc_notify_link_state(pf) do {} while (0)
-#define ice_vc_notify_reset(pf) do {} while (0)
-#define ice_set_vf_state_qs_dis(vf) do {} while (0)
-#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
-#define ice_print_vfs_mdd_events(pf) do {} while (0)
-#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
-#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
+static inline void ice_process_vflr_event(struct ice_pf *pf) { }
+static inline void ice_free_vfs(struct ice_pf *pf) { }
+static inline
+void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
+static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
+static inline
+void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
+static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
+static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
static inline bool
ice_is_malicious_vf(struct ice_pf __always_unused *pf,
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index faa7b8d96adb..239b9bf10794 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
- err = ice_setup_rx_ctx(rx_ring);
+ err = ice_vsi_cfg_rxq(rx_ring);
if (err)
goto free_buf;
@@ -270,6 +270,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
+ clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,6 +301,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
return 0;
}
@@ -473,9 +476,10 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return ICE_XDP_REDIR;
}
switch (act) {
@@ -484,11 +488,14 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_TX:
xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ if (result == ICE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index fad783690134..ea208808623a 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-#define ice_xsk_clean_rx_ring(rx_ring) do {} while (0)
-#define ice_xsk_clean_xdp_ring(xdp_ring) do {} while (0)
+static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { }
+static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 50863fd87d53..cbe92fd23a70 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2756,6 +2756,7 @@ out:
return ret_val;
}
+#ifdef CONFIG_IGB_HWMON
static const u8 e1000_emc_temp_data[4] = {
E1000_EMC_INTERNAL_DATA,
E1000_EMC_DIODE1_DATA,
@@ -2769,7 +2770,6 @@ static const u8 e1000_emc_therm_limit[4] = {
E1000_EMC_DIODE3_THERM_LIMIT
};
-#ifdef CONFIG_IGB_HWMON
/**
* igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 7bda8c5edea5..2d3daf022651 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_tx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb);
+ ktime_t *timestamp);
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7545da216d8b..636a1b1fb7e1 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -831,7 +831,7 @@ static int igb_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 038a9fd1af44..5db303d64d14 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -356,7 +356,7 @@ static void igb_dump(struct igb_adapter *adapter)
struct igb_reg_info *reginfo;
struct igb_ring *tx_ring;
union e1000_adv_tx_desc *tx_desc;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 { __le64 a; __le64 b; } *u0;
struct igb_ring *rx_ring;
union e1000_adv_rx_desc *rx_desc;
u32 staterr;
@@ -2643,7 +2643,8 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
}
input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
- input->filter.vlan_tci = match.key->vlan_priority;
+ input->filter.vlan_tci =
+ (__force __be16)match.key->vlan_priority;
}
}
@@ -6275,12 +6276,12 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter,
cmd_type |= len | IGB_TXD_DCMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
- olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
+ olinfo_status = len << E1000_ADVTXD_PAYLEN_SHIFT;
/* 82575 requires a unique index per ring */
if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;
- tx_desc->read.olinfo_status = olinfo_status;
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
@@ -8280,7 +8281,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8300,12 +8301,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
if (unlikely(!skb))
return NULL;
- if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
- xdp->data += IGB_TS_HDR_LEN;
- size -= IGB_TS_HDR_LEN;
- }
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* Determine available headroom for copy */
headlen = size;
@@ -8336,7 +8333,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
struct xdp_buff *xdp,
- union e1000_adv_rx_desc *rx_desc)
+ ktime_t timestamp)
{
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
@@ -8363,11 +8360,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
if (metasize)
skb_metadata_set(skb, metasize);
- /* pull timestamp out of packet data */
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
- __skb_pull(skb, IGB_TS_HDR_LEN);
- }
+ if (timestamp)
+ skb_hwtstamps(skb)->hwtstamp = timestamp;
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -8401,18 +8395,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
break;
case XDP_TX:
result = igb_xdp_xmit_back(adapter, xdp);
+ if (result == IGB_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IGB_XDP_REDIR;
- else
- result = IGB_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IGB_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
@@ -8597,7 +8593,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
else
vid = le16_to_cpu(rx_desc->wb.upper.vlan);
@@ -8682,7 +8678,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer;
+ ktime_t timestamp = 0;
+ int pkt_offset = 0;
unsigned int size;
+ void *pktbuf;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
@@ -8702,14 +8701,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
dma_rmb();
rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
+ pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset;
+
+ /* pull rx packet timestamp if available and valid */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ int ts_hdr_len;
+
+ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector,
+ pktbuf, &timestamp);
+
+ pkt_offset += ts_hdr_len;
+ size -= ts_hdr_len;
+ }
/* retrieve a buffer from the ring */
if (!skb) {
- unsigned int offset = igb_rx_offset(rx_ring);
- unsigned char *hard_start;
+ unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring);
+ unsigned int offset = pkt_offset + igb_rx_offset(rx_ring);
- hard_start = page_address(rx_buffer->page) +
- rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true);
#if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */
@@ -8732,10 +8741,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
} else if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp,
+ timestamp);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
- &xdp, rx_desc);
+ &xdp, timestamp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ba61fe9bfaf4..0011b15e678c 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
dev_kfree_skb_any(skb);
}
-#define IGB_RET_PTP_DISABLED 1
-#define IGB_RET_PTP_INVALID 2
-
/**
* igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
- * @skb: Buffer containing timestamp and packet
+ * @timestamp: Pointer where timestamp will be stored
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8
*
- * Returns: 0 if success, nonzero if failure
+ * Returns: The timestamp header length or 0 if not available
**/
int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
- struct sk_buff *skb)
+ ktime_t *timestamp)
{
struct igb_adapter *adapter = q_vector->adapter;
+ struct skb_shared_hwtstamps ts;
__le64 *regval = (__le64 *)va;
int adjust = 0;
if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
- return IGB_RET_PTP_DISABLED;
+ return 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
/* check reserved dwords are zero, be/le doesn't matter for zero */
if (regval[0])
- return IGB_RET_PTP_INVALID;
+ return 0;
- igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
- le64_to_cpu(regval[1]));
+ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1]));
/* adjust timestamp for the RX latency based on link speed */
if (adapter->hw.mac.type == e1000_i210) {
@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
break;
}
}
- skb_hwtstamps(skb)->hwtstamp =
- ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
- return 0;
+ *timestamp = ktime_sub_ns(ts.hwtstamp, adjust);
+
+ return IGB_TS_HDR_LEN;
}
/**
@@ -1134,12 +1131,12 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
| E1000_FTQF_MASK); /* mask all inputs */
ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
- wr32(E1000_IMIR(3), htons(PTP_EV_PORT));
+ wr32(E1000_IMIR(3), (__force unsigned int)htons(PTP_EV_PORT));
wr32(E1000_IMIREXT(3),
(E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
if (hw->mac.type == e1000_82576) {
/* enable source port check */
- wr32(E1000_SPQF(3), htons(PTP_EV_PORT));
+ wr32(E1000_SPQF(3), (__force unsigned int)htons(PTP_EV_PORT));
ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
}
wr32(E1000_FTQF(3), ftqf);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index fb3fbcb13331..1bbe9862a758 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -83,14 +83,14 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct net_device *netdev,
struct sk_buff *skb,
- u32 status, u16 vlan)
+ u32 status, __le16 vlan)
{
u16 vid;
if (status & E1000_RXD_STAT_VP) {
if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
(status & E1000_RXDEXT_STATERR_LB))
- vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ vid = be16_to_cpu((__force __be16)vlan) & E1000_RXD_SPC_VLAN_MASK;
else
vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans))
@@ -2056,7 +2056,7 @@ static int igbvf_tso(struct igbvf_ring *tx_ring,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* MSS L4LEN IDX */
mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index c71b0d7dbcee..ba9bb3132d5d 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -35,31 +35,31 @@ struct e1000_hw;
/* Receive Descriptor - Advanced */
union e1000_adv_rx_desc {
struct {
- u64 pkt_addr; /* Packet buffer address */
- u64 hdr_addr; /* Header buffer address */
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
} read;
struct {
struct {
union {
- u32 data;
+ __le32 data;
struct {
- u16 pkt_info; /* RSS/Packet type */
+ __le16 pkt_info; /* RSS/Packet type */
/* Split Header, hdr buffer length */
- u16 hdr_info;
+ __le16 hdr_info;
} hs_rss;
} lo_dword;
union {
- u32 rss; /* RSS Hash */
+ __le32 rss; /* RSS Hash */
struct {
- u16 ip_id; /* IP id */
- u16 csum; /* Packet Checksum */
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
} csum_ip;
} hi_dword;
} lower;
struct {
- u32 status_error; /* ext status/error */
- u16 length; /* Packet length */
- u16 vlan; /* VLAN tag */
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
} upper;
} wb; /* writeback */
};
@@ -70,14 +70,14 @@ union e1000_adv_rx_desc {
/* Transmit Descriptor - Advanced */
union e1000_adv_tx_desc {
struct {
- u64 buffer_addr; /* Address of descriptor's data buf */
- u32 cmd_type_len;
- u32 olinfo_status;
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
} read;
struct {
- u64 rsvd; /* Reserved */
- u32 nxtseq_seed;
- u32 status;
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
} wb;
};
@@ -94,10 +94,10 @@ union e1000_adv_tx_desc {
/* Context descriptors */
struct e1000_adv_tx_context_desc {
- u32 vlan_macip_lens;
- u32 seqnum_seed;
- u32 type_tucmd_mlhl;
- u32 mss_l4len_idx;
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
};
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index b6d3277c6f52..9e0bbb2e55e3 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -372,6 +372,7 @@ extern char igc_driver_name[];
/* VLAN info */
#define IGC_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IGC_TX_FLAGS_VLAN_SHIFT 16
/* igc_test_staterr - tests bits within Rx descriptor status and error fields */
static inline __le32 igc_test_staterr(union igc_adv_rx_desc *rx_desc,
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 0103dda32f39..c3a5a5518790 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -94,12 +94,13 @@
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
#define IGC_CTRL_FRCSPD 0x00000800 /* Force Speed */
#define IGC_CTRL_FRCDPX 0x00001000 /* Force Duplex */
+#define IGC_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define IGC_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
#define IGC_CTRL_TFCE 0x10000000 /* Transmit flow control enable */
-#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
-#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
+#define IGC_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
+#define IGC_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */
#define MAX_JUMBO_FRAME_SIZE 0x2600
@@ -128,7 +129,6 @@
#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
/* 1000BASE-T Control Register */
-#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */
#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
@@ -323,6 +323,9 @@
#define IGC_RXD_STAT_IXSM 0x04 /* Ignore checksum */
#define IGC_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
#define IGC_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */
+#define IGC_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+
+#define IGC_RXDEXT_STATERR_LB 0x00040000
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
diff --git a/drivers/net/ethernet/intel/igc/igc_dump.c b/drivers/net/ethernet/intel/igc/igc_dump.c
index 495bed47ed0a..c09c95cc5f70 100644
--- a/drivers/net/ethernet/intel/igc/igc_dump.c
+++ b/drivers/net/ethernet/intel/igc/igc_dump.c
@@ -112,7 +112,7 @@ static void igc_regdump(struct igc_hw *hw, struct igc_reg_info *reginfo)
void igc_rings_dump(struct igc_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct my_u0 { u64 a; u64 b; } *u0;
+ struct my_u0 { __le64 a; __le64 b; } *u0;
union igc_adv_tx_desc *tx_desc;
union igc_adv_rx_desc *rx_desc;
struct igc_ring *tx_ring;
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 9722449d7633..fa4171860623 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -554,7 +554,7 @@ static int igc_ethtool_set_eeprom(struct net_device *netdev,
memcpy(ptr, bytes, eeprom->len);
for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+ cpu_to_le16s(&eeprom_buff[i]);
ret_val = hw->nvm.ops.write(hw, first_word,
last_word - first_word + 1, eeprom_buff);
@@ -765,35 +765,22 @@ static void igc_ethtool_get_strings(struct net_device *netdev, u32 stringset,
IGC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
- memcpy(p, igc_gstrings_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) {
- memcpy(p, igc_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++)
+ ethtool_sprintf(&p, igc_gstrings_stats[i].stat_string);
+ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++)
+ ethtool_sprintf(&p,
+ igc_gstrings_net_stats[i].stat_string);
for (i = 0; i < adapter->num_tx_queues; i++) {
- sprintf(p, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_queue_%u_restart", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "tx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "tx_queue_%u_restart", i);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- sprintf(p, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_drops", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_csum_err", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_queue_%u_alloc_failed", i);
- p += ETH_GSTRING_LEN;
+ ethtool_sprintf(&p, "rx_queue_%u_packets", i);
+ ethtool_sprintf(&p, "rx_queue_%u_bytes", i);
+ ethtool_sprintf(&p, "rx_queue_%u_drops", i);
+ ethtool_sprintf(&p, "rx_queue_%u_csum_err", i);
+ ethtool_sprintf(&p, "rx_queue_%u_alloc_failed", i);
}
/* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ea998d2defa4..3f6b6d4543a8 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -111,6 +111,9 @@ void igc_reset(struct igc_adapter *adapter)
if (!netif_running(adapter->netdev))
igc_power_down_phy_copper_base(&adapter->hw);
+ /* Enable HW to recognize an 802.1Q VLAN Ethernet packet */
+ wr32(IGC_VET, ETH_P_8021Q);
+
/* Re-enable PTP, where applicable. */
igc_ptp_reset(adapter);
@@ -1122,13 +1125,17 @@ static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size)
((u32)((_input) & (_flag)) * ((_result) / (_flag))) : \
((u32)((_input) & (_flag)) / ((_flag) / (_result))))
-static u32 igc_tx_cmd_type(u32 tx_flags)
+static u32 igc_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
u32 cmd_type = IGC_ADVTXD_DTYP_DATA |
IGC_ADVTXD_DCMD_DEXT |
IGC_ADVTXD_DCMD_IFCS;
+ /* set HW vlan bit if vlan is present */
+ cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_VLAN,
+ IGC_ADVTXD_DCMD_VLE);
+
/* set segmentation bits for TSO */
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSO,
(IGC_ADVTXD_DCMD_TSE));
@@ -1137,6 +1144,9 @@ static u32 igc_tx_cmd_type(u32 tx_flags)
cmd_type |= IGC_SET_FLAG(tx_flags, IGC_TX_FLAGS_TSTAMP,
(IGC_ADVTXD_MAC_TSTAMP));
+ /* insert frame checksum */
+ cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS);
+
return cmd_type;
}
@@ -1171,8 +1181,9 @@ static int igc_tx_map(struct igc_ring *tx_ring,
u16 i = tx_ring->next_to_use;
unsigned int data_len, size;
dma_addr_t dma;
- u32 cmd_type = igc_tx_cmd_type(tx_flags);
+ u32 cmd_type;
+ cmd_type = igc_tx_cmd_type(skb, tx_flags);
tx_desc = IGC_TX_DESC(tx_ring, i);
igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
@@ -1443,6 +1454,11 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
}
}
+ if (skb_vlan_tag_present(skb)) {
+ tx_flags |= IGC_TX_FLAGS_VLAN;
+ tx_flags |= (skb_vlan_tag_get(skb) << IGC_TX_FLAGS_VLAN_SHIFT);
+ }
+
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
@@ -1542,6 +1558,25 @@ static inline void igc_rx_hash(struct igc_ring *ring,
PKT_HASH_TYPE_L3);
}
+static void igc_rx_vlan(struct igc_ring *rx_ring,
+ union igc_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
+ u16 vid;
+
+ if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ igc_test_staterr(rx_desc, IGC_RXD_STAT_VP)) {
+ if (igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_LB) &&
+ test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ }
+}
+
/**
* igc_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
@@ -1560,11 +1595,37 @@ static void igc_process_skb_fields(struct igc_ring *rx_ring,
igc_rx_checksum(rx_ring, rx_desc, skb);
+ igc_rx_vlan(rx_ring, rx_desc, skb);
+
skb_record_rx_queue(skb, rx_ring->queue_index);
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static void igc_vlan_mode(struct net_device *netdev, netdev_features_t features)
+{
+ bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+ u32 ctrl;
+
+ ctrl = rd32(IGC_CTRL);
+
+ if (enable) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IGC_CTRL_VME;
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl &= ~IGC_CTRL_VME;
+ }
+ wr32(IGC_CTRL, ctrl);
+}
+
+static void igc_restore_vlan(struct igc_adapter *adapter)
+{
+ igc_vlan_mode(adapter->netdev, adapter->netdev->features);
+}
+
static struct igc_rx_buffer *igc_get_rx_buffer(struct igc_ring *rx_ring,
const unsigned int size,
int *rx_buffer_pgcnt)
@@ -2153,15 +2214,19 @@ static int __igc_xdp_run_prog(struct igc_adapter *adapter,
case XDP_PASS:
return IGC_XDP_PASS;
case XDP_TX:
- return igc_xdp_xmit_back(adapter, xdp) < 0 ?
- IGC_XDP_CONSUMED : IGC_XDP_TX;
+ if (igc_xdp_xmit_back(adapter, xdp) < 0)
+ goto out_failure;
+ return IGC_XDP_TX;
case XDP_REDIRECT:
- return xdp_do_redirect(adapter->netdev, xdp, prog) < 0 ?
- IGC_XDP_CONSUMED : IGC_XDP_REDIRECT;
+ if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ return IGC_XDP_REDIRECT;
+ break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(adapter->netdev, prog, act);
fallthrough;
case XDP_DROP:
@@ -3248,6 +3313,8 @@ static void igc_configure(struct igc_adapter *adapter)
igc_get_hw_control(adapter);
igc_set_rx_mode(netdev);
+ igc_restore_vlan(adapter);
+
igc_setup_tctl(adapter);
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
@@ -4547,6 +4614,9 @@ static int igc_set_features(struct net_device *netdev,
netdev_features_t changed = netdev->features ^ features;
struct igc_adapter *adapter = netdev_priv(netdev);
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ igc_vlan_mode(netdev, features);
+
/* Add VLAN support */
if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
return 0;
@@ -5873,11 +5943,15 @@ static int igc_probe(struct pci_dev *pdev,
/* copy netdev features into list of user selectable features */
netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= netdev->features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index cc174853554b..0f82990567d9 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -10,8 +10,8 @@
#define IGC_EECD 0x00010 /* EEPROM/Flash Control - RW */
#define IGC_CTRL_EXT 0x00018 /* Extended Device Control - RW */
#define IGC_MDIC 0x00020 /* MDI Control - RW */
-#define IGC_MDICNFG 0x00E04 /* MDC/MDIO Configuration - RW */
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
+#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
#define IGC_I225_PHPM 0x00E14 /* I225 PHY Power Management */
#define IGC_GPHY_VERSION 0x0001E /* I225 gPHY Firmware Version */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index e324e42fab2d..58ea959a4482 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1514,8 +1514,7 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
#define IXGBE_WRITE_REG_BE32(a, reg, value) \
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
-#define IXGBE_STORE_AS_BE16(_value) \
- ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+#define IXGBE_STORE_AS_BE16(_value) __swab16(ntohs((_value)))
s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
union ixgbe_atr_input *input_mask)
@@ -1651,13 +1650,13 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record source and destination port (little-endian)*/
- fdirport = ntohs(input->formatted.dst_port);
+ fdirport = be16_to_cpu(input->formatted.dst_port);
fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
- fdirport |= ntohs(input->formatted.src_port);
+ fdirport |= be16_to_cpu(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
/* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan = IXGBE_STORE_AS_BE16((__force u16)input->formatted.flex_bytes);
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
fdirvlan |= ntohs(input->formatted.vlan_id);
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 03ccbe6b66d2..e90b5047e695 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3678,10 +3678,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
bool return_data)
{
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
- union {
- struct ixgbe_hic_hdr hdr;
- u32 u32arr[1];
- } *bp = buffer;
+ struct ixgbe_hic_hdr *hdr = buffer;
+ u32 *u32arr = buffer;
u16 buf_len, dword_len;
s32 status;
u32 bi;
@@ -3707,12 +3705,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&bp->u32arr[bi]);
+ u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&u32arr[bi]);
}
/* If there is any thing in data position pull it in */
- buf_len = bp->hdr.buf_len;
+ buf_len = hdr->buf_len;
if (!buf_len)
goto rel_out;
@@ -3727,8 +3725,8 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
/* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&bp->u32arr[bi]);
+ u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&u32arr[bi]);
}
rel_out:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 54d47265a7ac..e596e1a9fc75 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -511,14 +511,14 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
continue;
reg = IXGBE_READ_REG(hw, MIPAF_ARR(3, i));
- if (reg == xs->id.daddr.a4)
+ if (reg == (__force u32)xs->id.daddr.a4)
return 1;
}
}
if ((bmcipval & BMCIP_MASK) == BMCIP_V4) {
reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(3));
- if (reg == xs->id.daddr.a4)
+ if (reg == (__force u32)xs->id.daddr.a4)
return 1;
}
@@ -533,7 +533,7 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
for (j = 0; j < 4; j++) {
reg = IXGBE_READ_REG(hw, MIPAF_ARR(i, j));
- if (reg != xs->id.daddr.a6[j])
+ if (reg != (__force u32)xs->id.daddr.a6[j])
break;
}
if (j == 4) /* did we match all 4 words? */
@@ -543,7 +543,7 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
if ((bmcipval & BMCIP_MASK) == BMCIP_V6) {
for (j = 0; j < 4; j++) {
reg = IXGBE_READ_REG(hw, IXGBE_BMCIP(j));
- if (reg != xs->id.daddr.a6[j])
+ if (reg != (__force u32)xs->id.daddr.a6[j])
break;
}
if (j == 4) /* did we match all 4 words? */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c5ec17d19c59..2ac5b82676f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
- if (!err)
- result = IXGBE_XDP_REDIR;
- else
- result = IXGBE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
+ result = IXGBE_XDP_REDIR;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 988db46bff0e..214a38de3f41 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -467,12 +467,16 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return err;
}
-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
+static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int max_frame = msgbuf[1];
u32 max_frs;
+ if (max_frame < ETH_MIN_MTU || max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
+ }
+
/*
* For 82599EB we have to keep all PFs and VFs operating with
* the same max_frame value in order to avoid sending an oversize
@@ -533,12 +537,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
}
}
- /* MTU < 68 is an error and causes problems on some kernels */
- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
- e_err(drv, "VF max_frame %d out of range\n", max_frame);
- return -EINVAL;
- }
-
/* pull current max frame size from hardware */
max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
max_frs &= IXGBE_MHADD_MFS_MASK;
@@ -1249,7 +1247,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_LPE:
- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf);
break;
case IXGBE_VF_SET_MACVLAN:
retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 91ad5b902673..f72d2978263b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -106,9 +106,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED;
+ if (err)
+ goto out_failure;
rcu_read_unlock();
- return result;
+ return IXGBE_XDP_REDIR;
}
switch (act) {
@@ -116,16 +117,17 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
break;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf)) {
- result = IXGBE_XDP_CONSUMED;
- break;
- }
+ if (unlikely(!xdpf))
+ goto out_failure;
result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ if (result == IXGBE_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ba2ed8a43d2d..dc56931fc1dc 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter,
case XDP_TX:
xdp_ring = adapter->xdp_ring[rx_ring->queue_index];
result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp);
+ if (result == IXGBEVF_XDP_CONSUMED)
+ goto out_failure;
break;
default:
bpf_warn_invalid_xdp_action(act);
fallthrough;
case XDP_ABORTED:
+out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough; /* handle aborts by dropping packet */
case XDP_DROP:
@@ -3814,7 +3817,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
/* remove payload length from inner checksum */
paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+ csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 6f987a7ffcb3..b30a45725374 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1315,23 +1315,23 @@ static int korina_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq_byname(pdev, "tx");
p = devm_platform_ioremap_resource_byname(pdev, "emac");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->eth_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_rx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->rx_dma_regs = p;
p = devm_platform_ioremap_resource_byname(pdev, "dma_tx");
- if (!p) {
+ if (IS_ERR(p)) {
printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
- return -ENOMEM;
+ return PTR_ERR(p);
}
lp->tx_dma_regs = p;
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 41c2ad210bc9..27df06ed355e 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
@@ -163,16 +164,17 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
goto skip;
}
- ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(ch->priv->dev,
- ch->skb[ch->dma.desc]->data, XRX200_DMA_DATA_LEN,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ch->priv->dev,
- ch->dma.desc_base[ch->dma.desc].addr))) {
+ mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
+ XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ret = -ENOMEM;
goto skip;
}
+ ch->dma.desc_base[ch->dma.desc].addr = mapping;
+ /* Make sure the address is written before we give it to HW */
+ wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
@@ -196,6 +198,8 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
ch->dma.desc %= LTQ_DESC_NUM;
if (ret) {
+ ch->skb[ch->dma.desc] = skb;
+ net_dev->stats.rx_dropped++;
netdev_err(net_dev, "failed to allocate new rx buffer\n");
return ret;
}
@@ -432,7 +436,6 @@ static int xrx200_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res;
struct xrx200_priv *priv;
struct net_device *net_dev;
int err;
@@ -452,13 +455,7 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->max_mtu = XRX200_DMA_DATA_LEN;
/* load the memory ranges */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "failed to get resources\n");
- return -ENOENT;
- }
-
- priv->pmac_reg = devm_ioremap_resource(dev, res);
+ priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(priv->pmac_reg))
return PTR_ERR(priv->pmac_reg);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 7d5cd9bc6c99..c15ce06427d0 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2320,7 +2320,7 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
}
static struct sk_buff *
-mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
+mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool,
struct xdp_buff *xdp, u32 desc_status)
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -2331,7 +2331,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
if (!skb)
return ERR_PTR(-ENOMEM);
- page_pool_release_page(rxq->page_pool, virt_to_page(xdp->data));
+ skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool);
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
@@ -2343,7 +2343,10 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(frag), skb_frag_off(frag),
skb_frag_size(frag), PAGE_SIZE);
- page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
+ /* We don't need to reset pp_recycle here. It's already set, so
+ * just mark fragments for recycling.
+ */
+ page_pool_store_mem_info(skb_frag_page(frag), pool);
}
return skb;
@@ -2425,7 +2428,7 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, frame_sz, &ps))
goto next;
- skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
+ skb = mvneta_swbm_build_skb(pp, rxq->page_pool, &xdp_buf, desc_status);
if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 8edba5ea90f0..4a61c90003b5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -993,6 +993,14 @@ enum mvpp22_ptp_packet_format {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS 12
+#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+ (((info) & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
struct mvpp2_tai;
/* Definitions */
@@ -1002,6 +1010,20 @@ struct mvpp2_rss_table {
u32 indir[MVPP22_RSS_TABLE_ENTRIES];
};
+struct mvpp2_buff_hdr {
+ __le32 next_phys_addr;
+ __le32 next_dma_addr;
+ __le16 byte_count;
+ __le16 info;
+ __le16 reserved1; /* bm_qset (for future use, BM) */
+ u8 next_phys_addr_high;
+ u8 next_dma_addr_high;
+ __le16 reserved2;
+ __le16 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+};
+
/* Shared Packet Processor resources */
struct mvpp2 {
/* Shared registers' base addresses */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index b2259bf1d299..9bca8c8f9f8d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3839,6 +3839,35 @@ mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
return ret;
}
+static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
+ int pool, u32 rx_status)
+{
+ phys_addr_t phys_addr, phys_addr_next;
+ dma_addr_t dma_addr, dma_addr_next;
+ struct mvpp2_buff_hdr *buff_hdr;
+
+ phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
+ dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+
+ do {
+ buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
+
+ phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
+ dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
+
+ if (port->priv->hw_version >= MVPP22) {
+ phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
+ dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
+ }
+
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+
+ phys_addr = phys_addr_next;
+ dma_addr = dma_addr_next;
+
+ } while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
+}
+
/* Main rx processing */
static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
int rx_todo, struct mvpp2_rx_queue *rxq)
@@ -3871,28 +3900,24 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
phys_addr_t phys_addr;
u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
+ struct page *page;
void *data;
+ phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
+ data = (void *)phys_to_virt(phys_addr);
+ page = virt_to_page(data);
+ prefetch(page);
+
rx_done++;
rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
rx_bytes -= MVPP2_MH_SIZE;
dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
- phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
- data = (void *)phys_to_virt(phys_addr);
pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
MVPP2_RXD_BM_POOL_ID_OFFS;
bm_pool = &port->priv->bm_pools[pool];
- /* In case of an error, release the requested buffer pointer
- * to the Buffer Manager. This request process is controlled
- * by the hardware, and the information about the buffer is
- * comprised by the RX descriptor.
- */
- if (rx_status & MVPP2_RXD_ERR_SUMMARY)
- goto err_drop_frame;
-
if (port->priv->percpu_pools) {
pp = port->priv->page_pool[pool];
dma_dir = page_pool_get_dma_dir(pp);
@@ -3904,8 +3929,20 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
rx_bytes + MVPP2_MH_SIZE,
dma_dir);
+ /* Buffer header not supported */
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ goto err_drop_frame;
+
+ /* In case of an error, release the requested buffer pointer
+ * to the Buffer Manager. This request process is controlled
+ * by the hardware, and the information about the buffer is
+ * comprised by the RX descriptor.
+ */
+ if (rx_status & MVPP2_RXD_ERR_SUMMARY)
+ goto err_drop_frame;
+
/* Prefetch header */
- prefetch(data);
+ prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
if (bm_pool->frag_size > PAGE_SIZE)
frag_size = 0;
@@ -3964,7 +4001,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
if (pp)
- page_pool_release_page(pp, virt_to_page(data));
+ skb_mark_for_recycle(skb, page, pp);
else
dma_unmap_single_attrs(dev->dev.parent, dma_addr,
bm_pool->buf_size, DMA_FROM_DEVICE,
@@ -3975,8 +4012,8 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
skb_put(skb, rx_bytes);
- skb->protocol = eth_type_trans(skb, dev);
mvpp2_rx_csum(port, rx_status, skb);
+ skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(napi, skb);
continue;
@@ -3985,7 +4022,10 @@ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
/* Return the buffer to the pool */
- mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
+ if (rx_status & MVPP2_RXD_BUF_HDR)
+ mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
+ else
+ mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
}
rcu_read_unlock();
@@ -7341,6 +7381,10 @@ static int mvpp2_probe(struct platform_device *pdev)
return PTR_ERR(priv->lms_base);
} else {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
if (has_acpi_companion(&pdev->dev)) {
/* In case the MDIO memory region is declared in
* the ACPI, it can already appear as 'in-use'
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index e66109367487..47f5ed006a93 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -197,6 +197,11 @@ enum nix_scheduler {
#define SDP_CHANNELS 256
+/* The mask is to extract lower 10-bits of channel number
+ * which CPT will pass to X2P.
+ */
+#define NIX_CHAN_CPT_X2P_MASK (0x3ffull)
+
/* NIX LSO format indices.
* As of now TSO is the only one using, so statically assigning indices.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index cedb2616c509..7d7dfa8d8a3f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -134,6 +134,7 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \
@@ -259,7 +260,11 @@ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
-M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info)
+M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
+M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
+ nix_bandprof_alloc_rsp) \
+M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
+ msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
@@ -611,7 +616,12 @@ enum nix_af_status {
NIX_AF_INVAL_SSO_PF_FUNC = -420,
NIX_AF_ERR_TX_VTAG_NOSPC = -421,
NIX_AF_ERR_RX_VTAG_INUSE = -422,
- NIX_AF_ERR_NPC_KEY_NOT_SUPP = -423,
+ NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
+ NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
+ NIX_AF_ERR_INVALID_NIXBLK = -425,
+ NIX_AF_ERR_INVALID_BANDPROF = -426,
+ NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
+ NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
};
/* For NIX RX vtag action */
@@ -680,6 +690,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
union {
struct nix_cn10k_rq_ctx_s rq_mask;
@@ -687,6 +698,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ struct nix_bandprof_s prof_mask;
};
};
@@ -698,6 +710,7 @@ struct nix_cn10k_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ struct nix_bandprof_s prof;
};
};
@@ -713,6 +726,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
union {
struct nix_rq_ctx_s rq_mask;
@@ -720,6 +734,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
+ u64 prof_mask;
};
};
@@ -731,6 +746,7 @@ struct nix_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
+ u64 prof;
};
};
@@ -913,6 +929,7 @@ struct nix_rx_mode {
#define NIX_RX_MODE_UCAST BIT(0)
#define NIX_RX_MODE_PROMISC BIT(1)
#define NIX_RX_MODE_ALLMULTI BIT(2)
+#define NIX_RX_MODE_USE_MCE BIT(3)
u16 mode;
};
@@ -971,6 +988,31 @@ struct nix_hw_info {
u16 min_mtu;
};
+struct nix_bandprof_alloc_req {
+ struct mbox_msghdr hdr;
+ /* Count of profiles needed per layer */
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+};
+
+struct nix_bandprof_alloc_rsp {
+ struct mbox_msghdr hdr;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+
+ /* There is no need to allocate morethan 1 bandwidth profile
+ * per RQ of a PF_FUNC's NIXLF. So limit the maximum
+ * profiles to 64 per PF_FUNC.
+ */
+#define MAX_BANDPROF_PER_PFFUNC 64
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
+struct nix_bandprof_free_req {
+ struct mbox_msghdr hdr;
+ u8 free_all;
+ u16 prof_count[BAND_PROF_NUM_LAYERS];
+ u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
+};
+
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
@@ -1228,6 +1270,14 @@ struct ptp_rsp {
u64 clk;
};
+struct set_vf_perm {
+ struct mbox_msghdr hdr;
+ u16 vf;
+#define RESET_VF_PERM BIT_ULL(0)
+#define VF_TRUSTED BIT_ULL(1)
+ u64 flags;
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 1e012e787260..19bad9a59c8f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -33,6 +33,10 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_IH_2_ETHER,
NPC_LT_LA_HIGIG2_ETHER,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
+ NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_LT_LA_CH_LEN_90B_ETHER,
+ NPC_LT_LA_CPT_HDR,
+ NPC_LT_LA_CUSTOM_L2_24B_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -42,7 +46,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CTAG,
NPC_LT_LB_STAG_QINQ,
NPC_LT_LB_BTAG,
- NPC_LT_LB_ITAG,
+ NPC_LT_LB_PPPOE,
NPC_LT_LB_DSA,
NPC_LT_LB_DSA_VLAN,
NPC_LT_LB_EDSA,
@@ -50,6 +54,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
NPC_LT_LB_FDSA,
+ NPC_LT_LB_VLAN_EXDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
@@ -65,6 +70,7 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_NSH,
NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
+ NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
NPC_LT_LC_CUSTOM1 = 0xF,
};
@@ -146,7 +152,14 @@ enum npc_kpu_lh_ltype {
* Ethernet interfaces, LBK interfaces, etc.
*/
enum npc_pkind_type {
- NPC_TX_DEF_PKIND = 63ULL, /* NIX-TX PKIND */
+ NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
+ NPC_RX_CHLEN24B_PKIND = 57ULL,
+ NPC_RX_CPT_HDR_PKIND,
+ NPC_RX_CHLEN90B_PKIND,
+ NPC_TX_HIGIG_PKIND,
+ NPC_RX_HIGIG_PKIND,
+ NPC_RX_EDSA_PKIND,
+ NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
};
/* list of known and supported fields in packet header and
@@ -213,7 +226,7 @@ struct npc_kpu_profile_cam {
u16 dp1_mask;
u16 dp2;
u16 dp2_mask;
-};
+} __packed;
struct npc_kpu_profile_action {
u8 errlev;
@@ -233,13 +246,13 @@ struct npc_kpu_profile_action {
u8 mask;
u8 right;
u8 shift;
-};
+} __packed;
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- const struct npc_kpu_profile_cam *cam;
- const struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -425,7 +438,19 @@ struct nix_tx_action {
/* NPC MCAM reserved entry index per nixlf */
#define NIXLF_UCAST_ENTRY 0
#define NIXLF_BCAST_ENTRY 1
-#define NIXLF_PROMISC_ENTRY 2
+#define NIXLF_ALLMULTI_ENTRY 2
+#define NIXLF_PROMISC_ENTRY 3
+
+struct npc_coalesced_kpu_prfl {
+#define NPC_SIGN 0x00666f727063706e
+#define NPC_PRFL_NAME "npc_prfls_array"
+#define NPC_NAME_LEN 32
+ __le64 signature; /* "npcprof\0" (8 bytes/ASCII characters) */
+ u8 name[NPC_NAME_LEN]; /* KPU Profile name */
+ u64 version; /* KPU firmware/profile version */
+ u8 num_prfl; /* No of NPC profiles. */
+ u16 prfl_sz[0];
+};
struct npc_mcam_kex {
/* MKEX Profle Header */
@@ -445,6 +470,15 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_kpu_fwdata {
+ int entries;
+ /* What follows is:
+ * struct npc_kpu_profile_cam[entries];
+ * struct npc_kpu_profile_action[entries];
+ */
+ u8 data[0];
+} __packed;
+
struct npc_lt_def {
u8 ltype_mask;
u8 ltype_match;
@@ -459,6 +493,29 @@ struct npc_lt_def_ipsec {
u8 spi_nz;
};
+struct npc_lt_def_apad {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+} __packed;
+
+struct npc_lt_def_color {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 noffset;
+ u8 offset;
+} __packed;
+
+struct npc_lt_def_et {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 valid;
+ u8 offset;
+} __packed;
+
struct npc_lt_def_cfg {
struct npc_lt_def rx_ol2;
struct npc_lt_def rx_oip4;
@@ -476,7 +533,41 @@ struct npc_lt_def_cfg {
struct npc_lt_def pck_oip4;
struct npc_lt_def pck_oip6;
struct npc_lt_def pck_iip4;
-};
+ struct npc_lt_def_apad rx_apad0;
+ struct npc_lt_def_apad rx_apad1;
+ struct npc_lt_def_color ovlan;
+ struct npc_lt_def_color ivlan;
+ struct npc_lt_def_color rx_gen0_color;
+ struct npc_lt_def_color rx_gen1_color;
+ struct npc_lt_def_et rx_et[2];
+} __packed;
+
+/* Loadable KPU profile firmware data */
+struct npc_kpu_profile_fwdata {
+#define KPU_SIGN 0x00666f727075706b
+#define KPU_NAME_LEN 32
+/** Maximum number of custom KPU entries supported by the built-in profile. */
+#define KPU_MAX_CST_ENT 2
+ /* KPU Profle Header */
+ __le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
+ u8 name[KPU_NAME_LEN]; /* KPU Profile name */
+ __le64 version; /* KPU profile version */
+ u8 kpus;
+ u8 reserved[7];
+
+ /* Default MKEX profile to be used with this KPU profile. May be
+ * overridden with mkex_profile module parameter. Format is same as for
+ * the MKEX profile to streamline processing.
+ */
+ struct npc_mcam_kex mkex;
+ /* LTYPE values for specific HW offloaded protocols. */
+ struct npc_lt_def_cfg lt_def;
+ /* Dynamically sized data:
+ * Custom KPU CAM and ACTION configuration entries.
+ * struct npc_kpu_fwdata kpu[kpus];
+ */
+ u8 data[0];
+} __packed;
struct rvu_npc_mcam_rule {
struct flow_msg packet;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 5c372d2c24a1..fee655cc7523 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -11,7 +11,10 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100050000
+#define NPC_KPU_PROFILE_VER 0x0000000100060000
+#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF))
+#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF))
+#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF))
#define NPC_IH_W 0x8000
#define NPC_IH_UTAG 0x2000
@@ -20,6 +23,7 @@
#define NPC_ETYPE_IP6 0x86dd
#define NPC_ETYPE_ARP 0x0806
#define NPC_ETYPE_RARP 0x8035
+#define NPC_ETYPE_NGIO 0x8842
#define NPC_ETYPE_MPLSU 0x8847
#define NPC_ETYPE_MPLSM 0x8848
#define NPC_ETYPE_ETAG 0x893f
@@ -33,6 +37,10 @@
#define NPC_ETYPE_PPP 0x880b
#define NPC_ETYPE_NSH 0x894f
#define NPC_ETYPE_DSA 0xdada
+#define NPC_ETYPE_PPPOE 0x8864
+
+#define NPC_PPP_IP 0x0021
+#define NPC_PPP_IP6 0x0057
#define NPC_IPNH_HOP 0
#define NPC_IPNH_ICMP 1
@@ -142,14 +150,15 @@
#define NPC_DSA_EDSA 0x8000
#define NPC_DSA_FDSA 0xc000
-#define NPC_KEXOF_DMAC 8
-#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
+#define NPC_KEXOF_DMAC 9
+#define MKEX_SIGN 0x19bbfdbd15f
#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
(((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
((flags_ena) << 6) | ((key_ofs) & 0x3F))
/* Rx parse key extract nibble enable */
#define NPC_PARSE_NIBBLE_INTF_RX (NPC_PARSE_NIBBLE_CHAN | \
+ NPC_PARSE_NIBBLE_ERRCODE | \
NPC_PARSE_NIBBLE_LA_LTYPE | \
NPC_PARSE_NIBBLE_LB_LTYPE | \
NPC_PARSE_NIBBLE_LC_LTYPE | \
@@ -170,25 +179,31 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
+ NPC_S_KPU1_CUSTOM_L2_90B,
+ NPC_S_KPU1_CPT_HDR,
+ NPC_S_KPU1_CUSTOM_L2_24B,
+ NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
- NPC_S_KPU2_ITAG,
NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
+ NPC_S_KPU2_NGIO,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
- NPC_S_KPU3_ITAG,
NPC_S_KPU3_CTAG_C,
NPC_S_KPU3_STAG_C,
NPC_S_KPU3_QINQ_C,
NPC_S_KPU3_DSA,
+ NPC_S_KPU3_VLAN_EXDSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
NPC_S_KPU4_FDSA,
+ NPC_S_KPU4_VLAN_EXDSA,
+ NPC_S_KPU4_PPPOE,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -198,13 +213,19 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_MPLS,
NPC_S_KPU5_MPLS_PL,
NPC_S_KPU5_NSH,
+ NPC_S_KPU5_CPT_IP,
+ NPC_S_KPU5_CPT_IP6,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
NPC_S_KPU6_IP6_FRAG,
+ NPC_S_KPU6_IP6_CPT_FRAG,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST,
+ NPC_S_KPU6_IP6_CPT_ROUT,
NPC_S_KPU7_IP6_EXT,
NPC_S_KPU7_IP6_ROUT,
NPC_S_KPU7_IP6_FRAG,
+ NPC_S_KPU7_CPT_IP6_FRAG,
NPC_S_KPU8_TCP,
NPC_S_KPU8_UDP,
NPC_S_KPU8_SCTP,
@@ -265,7 +286,6 @@ enum npc_kpu_la_lflag {
NPC_F_LA_L_UNK_ETYPE = 1,
NPC_F_LA_L_WITH_VLAN,
NPC_F_LA_L_WITH_ETAG,
- NPC_F_LA_L_WITH_ITAG,
NPC_F_LA_L_WITH_MPLS,
NPC_F_LA_L_WITH_NSH,
};
@@ -442,7 +462,28 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static const struct npc_kpu_profile_action ikpu_action_entries[] = {
+#define NPC_KPU_NOP_CAM \
+ { \
+ NPC_S_NA, 0xff, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ 0x0000, \
+ }
+
+#define NPC_KPU_NOP_ACTION \
+ { \
+ NPC_ERRLEV_RE, NPC_EC_NOERR, \
+ 0, 0, 0, 0, 0, \
+ NPC_S_NA, 0, 0, \
+ NPC_LID_LA, NPC_LT_NA, \
+ 0, \
+ 0, 0, 0, 0, \
+ }
+
+static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -950,7 +991,7 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ NPC_S_KPU1_VLAN_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -958,8 +999,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 36, 40, 44, 0, 0,
+ NPC_S_KPU1_CUSTOM_L2_24B, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -967,8 +1008,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 40, 54, 58, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -976,8 +1017,8 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
+ 102, 106, 110, 0, 0,
+ NPC_S_KPU1_CUSTOM_L2_90B, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -1021,7 +1062,9 @@ static const struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1080,6 +1123,15 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_ETHER, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1123,7 +1175,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1132,7 +1184,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1141,7 +1193,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1150,7 +1202,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_NSH,
+ NPC_ETYPE_DSA,
0xffff,
0x0000,
0x0000,
@@ -1159,7 +1211,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_DSA,
+ NPC_ETYPE_PPPOE,
0xffff,
0x0000,
0x0000,
@@ -1294,15 +1346,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH_NIX, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1339,8 +1382,8 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH, 0xff,
- NPC_IH_W|NPC_IH_UTAG,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1349,7 +1392,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_IH, 0xff,
NPC_IH_W,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1358,7 +1401,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_IH, 0xff,
0x0000,
- NPC_IH_W|NPC_IH_UTAG,
+ NPC_IH_W | NPC_IH_UTAG,
0x0000,
0x0000,
0x0000,
@@ -1501,15 +1544,6 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_HIGIG2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1645,7 +1679,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_ITAG,
+ NPC_ETYPE_MPLSU,
0xffff,
0x0000,
0x0000,
@@ -1654,7 +1688,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSU,
+ NPC_ETYPE_MPLSM,
0xffff,
0x0000,
0x0000,
@@ -1663,7 +1697,7 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
- NPC_ETYPE_MPLSM,
+ NPC_ETYPE_NSH,
0xffff,
0x0000,
0x0000,
@@ -1672,6 +1706,132 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1680,7 +1840,88 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_IH_NIX_HIGIG2, 0xff,
+ NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
+ 0x0000,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU1_CPT_HDR, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1689,6 +1930,150 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_SBTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_QINQ,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_ETAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_MPLSU,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_MPLSM,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_ETYPE_NSH,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU1_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -1699,7 +2084,9 @@ static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -1783,6 +2170,24 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
0x0000,
0x0000,
0x0000,
@@ -2226,15 +2631,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
NPC_S_KPU2_ETAG, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
- NPC_ETYPE_ITAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ETAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
0x0000,
0x0000,
0x0000,
@@ -2313,159 +2709,6 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CTAG2, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -2817,6 +3060,15 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -2827,7 +3079,9 @@ static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3243,159 +3497,6 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU3_ITAG, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU3_CTAG_C, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -3936,6 +4037,15 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU3_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -3946,7 +4056,9 @@ static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4084,6 +4196,78 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
{
NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PPPOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ NPC_PPP_IP6,
+ 0xffff,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
0x0000,
NPC_DSA_FDSA,
0x0000,
@@ -4092,6 +4276,87 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_VLAN_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_PPPOE, 0xff,
+ NPC_PPP_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4102,7 +4367,9 @@ static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4125,116 +4392,116 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_TCP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_UDP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_SCTP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_ICMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IGMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_ESP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_AH,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_GRE,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_IP6,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
NPC_IPNH_MPLS,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -4245,7 +4512,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4254,7 +4521,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4263,7 +4530,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4272,7 +4539,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4281,7 +4548,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4290,7 +4557,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4299,7 +4566,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4308,7 +4575,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4317,7 +4584,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4326,7 +4593,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4335,7 +4602,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4344,7 +4611,7 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_IP_VER_4,
NPC_IP_VER_MASK,
0x0000,
- NPC_IP_HDR_MF|NPC_IP_HDR_FRAGOFF,
+ NPC_IP_HDR_MF | NPC_IP_HDR_FRAGOFF,
},
{
NPC_S_KPU5_IP, 0xff,
@@ -4662,6 +4929,429 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ NPC_IP_TTL_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0001,
+ NPC_IP_HDR_FRAGOFF,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_TCP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_UDP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_SCTP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ICMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IGMP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_ESP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_AH,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_GRE,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_IP6,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ NPC_IPNH_MPLS,
+ 0x00ff,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_4,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ NPC_IP6_HOP_MASK,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_DEST << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_MOBILITY << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_HOSTID << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ NPC_IPNH_SHIM6 << 8,
+ 0xff00,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_IP_VER_6,
+ NPC_IP_VER_MASK,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU5_CPT_IP6, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4672,7 +5362,9 @@ static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -5007,6 +5699,330 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_ROUT << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ NPC_IPNH_FRAG << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU6_IP6_CPT_ROUT, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5017,7 +6033,9 @@ static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5226,6 +6244,105 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_TCP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_UDP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_SCTP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ICMP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_ESP << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_AH << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_GRE << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_IP6 << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ NPC_IPNH_MPLS << 8,
+ 0xff00,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU7_CPT_IP6_FRAG, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -5236,7 +6353,9 @@ static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5259,8 +6378,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -5268,8 +6387,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -5277,8 +6396,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -5286,8 +6405,8 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -5565,7 +6684,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5574,7 +6693,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5583,7 +6702,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5592,7 +6711,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5637,7 +6756,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5646,7 +6765,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5655,7 +6774,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5664,7 +6783,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5709,7 +6828,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5718,7 +6837,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5727,7 +6846,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5736,7 +6855,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_NSH,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5781,7 +6900,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5790,7 +6909,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5799,7 +6918,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5808,7 +6927,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5853,7 +6972,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY,
0xffff,
0x0000,
0x0000,
@@ -5862,7 +6981,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5871,7 +6990,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5880,7 +6999,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_IP6,
0xffff,
- NPC_GRE_F_CSUM|NPC_GRE_F_KEY|NPC_GRE_F_SEQ,
+ NPC_GRE_F_CSUM | NPC_GRE_F_KEY | NPC_GRE_F_SEQ,
0xffff,
0x0000,
0x0000,
@@ -5916,7 +7035,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5925,7 +7044,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5934,7 +7053,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_ACK | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5943,7 +7062,7 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_S_KPU8_GRE, 0xff,
NPC_ETYPE_PPP,
0xffff,
- NPC_GRE_F_KEY|NPC_GRE_F_SEQ|NPC_GRE_F_ACK|NPC_GRE_VER_1,
+ NPC_GRE_F_KEY | NPC_GRE_F_SEQ | NPC_GRE_F_ACK | NPC_GRE_VER_1,
0xffff,
0x0000,
0x0000,
@@ -5977,7 +7096,9 @@ static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6387,8 +7508,8 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- 0x0000,
- 0x0000,
+ NPC_GTP_PT_GTP | NPC_GTP_VER1,
+ NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
0x0000,
},
@@ -6448,7 +7569,9 @@ static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6613,7 +7736,9 @@ static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6922,13 +8047,15 @@ static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6936,8 +8063,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_UDP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6945,8 +8072,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_SCTP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6954,8 +8081,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_ICMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6963,8 +8090,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_IGMP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6972,8 +8099,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_ESP,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6981,8 +8108,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_AH,
0x00ff,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -6990,8 +8117,8 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_S_KPU12_TU_IP, 0xff,
0x0000,
0x0000,
- NPC_IP_VER_4|NPC_IP_HDR_LEN_5,
- NPC_IP_VER_MASK|NPC_IP_HDR_LEN_MASK,
+ NPC_IP_VER_4 | NPC_IP_HDR_LEN_5,
+ NPC_IP_VER_MASK | NPC_IP_HDR_LEN_MASK,
0x0000,
0x0000,
},
@@ -7177,7 +8304,9 @@ static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7189,7 +8318,9 @@ static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7201,7 +8332,9 @@ static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7224,8 +8357,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -7233,8 +8366,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_URG|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_URG | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -7242,8 +8375,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
- NPC_TCP_FLAGS_RST|NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
+ NPC_TCP_FLAGS_RST | NPC_TCP_FLAGS_SYN,
0x0000,
0x0000,
},
@@ -7251,8 +8384,8 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
0x0000,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
- NPC_TCP_FLAGS_SYN|NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
+ NPC_TCP_FLAGS_SYN | NPC_TCP_FLAGS_FIN,
0x0000,
0x0000,
},
@@ -7402,7 +8535,9 @@ static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7459,7 +8594,9 @@ static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu1_action_entries[] = {
+static struct npc_kpu_profile_action kpu1_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7511,6 +8648,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
+ NPC_S_KPU2_NGIO, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7518,7 +8663,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
+ 4, 8, 12, 0, 0,
NPC_S_KPU2_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -7550,14 +8695,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 14, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -7590,6 +8727,14 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 2, 0,
+ NPC_S_KPU4_PPPOE, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LA, NPC_LT_LA_8023,
@@ -7707,15 +8852,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 20, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 22, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_ETHER,
@@ -7788,7 +8924,7 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 16, 2, 0,
+ 4, 8, 12, 2, 0,
NPC_S_KPU4_FDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
0,
@@ -7897,15 +9033,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 28, 1,
- NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_HIGIG2 | NPC_F_LA_U_HAS_TAG
- | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 30, 1,
NPC_LID_LA, NPC_LT_LA_HIGIG2_ETHER,
@@ -8031,15 +9158,6 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 18, 22, 26, 0, 0,
- NPC_S_KPU2_ITAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
- NPC_F_LA_U_HAS_IH_NIX | NPC_F_LA_U_HAS_HIGIG2
- | NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ITAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
NPC_S_KPU4_MPLS, 38, 1,
NPC_LID_LA, NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
@@ -8075,6 +9193,326 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 102, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 104, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 56, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 56, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 54, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 54, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_CPT_IP, 60, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_CPT_IP6, 60, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 58, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 58, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 3, 0,
+ NPC_S_KPU5_IP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 3, 0,
+ NPC_S_KPU5_IP6, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_ARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_RARP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_PTP, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU5_FCOE, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 0, 0, 0,
+ NPC_S_KPU2_CTAG2, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_CTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 22, 0, 0,
+ NPC_S_KPU2_SBTAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 0, 0, 0,
+ NPC_S_KPU2_QINQ, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 12, 26, 0, 0,
+ NPC_S_KPU2_ETAG, 36, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU4_MPLS, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_MPLS,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU4_NSH, 38, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_WITH_NSH,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_F_LA_L_UNK_ETYPE,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 12, 0, 0, 1, 0,
+ NPC_S_KPU3_VLAN_EXDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_L2_K1,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -8084,7 +9522,9 @@ static const struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu2_action_entries[] = {
+static struct npc_kpu_profile_action kpu2_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8159,6 +9599,22 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -8170,7 +9626,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8178,7 +9634,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8186,7 +9642,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8194,7 +9650,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_RARP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8202,7 +9658,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_PTP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8210,7 +9666,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_FCOE, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8218,7 +9674,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8226,7 +9682,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8234,7 +9690,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 1, 0,
NPC_S_KPU4_NSH, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8242,7 +9698,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG_UNK,
0, 0, 0, 0,
},
{
@@ -8250,7 +9706,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_CTAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8258,7 +9714,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG_STAG,
0, 0, 0, 0,
},
{
@@ -8266,7 +9722,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8274,7 +9730,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8282,7 +9738,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8290,7 +9746,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_RARP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8298,7 +9754,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_PTP, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8306,7 +9762,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_FCOE, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8314,7 +9770,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8322,7 +9778,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 10, 1, 0,
NPC_S_KPU4_MPLS, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8330,7 +9786,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 1, 0,
NPC_S_KPU4_NSH, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8338,7 +9794,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_STAG, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
0, 0, 0, 0,
},
{
@@ -8346,7 +9802,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 24, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8354,7 +9810,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_BTAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
0, 0, 0, 0,
},
{
@@ -8546,15 +10002,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 16, 20, 24, 0, 0,
- NPC_S_KPU3_ITAG, 14, 1,
- NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_BTAG_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
0, 0, 0, 0,
},
{
@@ -8562,7 +10010,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_STAG, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_STAG,
0, 0, 0, 0,
},
{
@@ -8570,7 +10018,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 6, 0, 0, 0,
NPC_S_KPU3_QINQ, 10, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_QINQ,
0, 0, 0, 0,
},
{
@@ -8578,7 +10026,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8586,7 +10034,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
6, 0, 0, 2, 0,
NPC_S_KPU5_IP6, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8594,7 +10042,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 2, 0,
NPC_S_KPU5_ARP, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG,
0, 0, 0, 0,
},
{
@@ -8602,7 +10050,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_STAG, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_STAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_STAG,
0, 0, 0, 0,
},
{
@@ -8610,7 +10058,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
2, 0, 0, 0, 0,
NPC_S_KPU3_CTAG, 28, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_CTAG,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_CTAG,
0, 0, 0, 0,
},
{
@@ -8618,7 +10066,7 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_ETAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_ITAG_UNK,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_ITAG_UNK,
0, 0, 0, 0,
},
{
@@ -8632,142 +10080,6 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 20, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 28, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_STAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 24, 1,
- NPC_LID_LB, NPC_LT_LB_ITAG,
- NPC_F_LB_U_MORE_TAG|NPC_F_LB_L_WITH_CTAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 10, 1,
NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
0,
@@ -9078,6 +10390,14 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -9087,11 +10407,13 @@ static const struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu3_action_entries[] = {
+static struct npc_kpu_profile_action kpu3_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 4, 0,
+ NPC_S_KPU5_IP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9099,7 +10421,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 4, 0,
+ NPC_S_KPU5_IP6, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9107,7 +10429,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 4, 0,
+ NPC_S_KPU5_ARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9115,7 +10437,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_RARP, 4, 0,
+ NPC_S_KPU5_RARP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9123,7 +10445,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_PTP, 4, 0,
+ NPC_S_KPU5_PTP, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9131,7 +10453,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 1, 0,
- NPC_S_KPU5_FCOE, 4, 0,
+ NPC_S_KPU5_FCOE, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9139,7 +10461,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9147,7 +10469,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 0, 0,
- NPC_S_KPU4_MPLS, 4, 0,
+ NPC_S_KPU4_MPLS, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9155,7 +10477,7 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 0, 0,
- NPC_S_KPU4_NSH, 4, 0,
+ NPC_S_KPU4_NSH, 6, 0,
NPC_LID_LB, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -9458,142 +10780,6 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 18, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 26, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 1, 0,
- NPC_S_KPU5_IP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 1, 0,
- NPC_S_KPU5_IP6, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU5_ARP, 22, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_LB, NPC_EC_L2_K3_ETYPE_UNK,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
NPC_S_KPU5_IP, 4, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -10073,6 +11259,14 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU4_VLAN_EXDSA, 12, 1,
+ NPC_LID_LB, NPC_LT_LB_VLAN_EXDSA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10082,7 +11276,9 @@ static const struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu4_action_entries[] = {
+static struct npc_kpu_profile_action kpu4_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10205,6 +11401,70 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 14, 1,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
NPC_LID_LB, NPC_LT_LB_FDSA,
@@ -10212,6 +11472,78 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 2, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 10, 0,
+ NPC_LID_LB, NPC_LT_LB_PPPOE,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10221,7 +11553,9 @@ static const struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu5_action_entries[] = {
+static struct npc_kpu_profile_action kpu5_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10719,6 +12053,382 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_FRAG_OFFSET_1,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_IP_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_6TO4,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 2, 0,
+ NPC_S_KPU8_UDP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_IGMP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ 0,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 6, 0,
+ NPC_S_KPU12_TU_IP, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_IP_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_6TO4,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 20, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_L_MPLS_IN_IP,
+ 0, 0xf, 0, 2,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP_OPT,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_HOP_0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 2, 0,
+ NPC_S_KPU8_TCP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 0, 0, 2, 0,
+ NPC_S_KPU8_UDP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_SCTP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_ICMP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_GRE, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 6, 0,
+ NPC_S_KPU12_TU_IP6, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_TUN_IP6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 3, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_L_IP6_MPLS_IN_IP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOP,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_HOP_DEST, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_DEST,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_ROUT, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_ROUT,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU6_IP6_CPT_FRAG, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_U_IP6_FRAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU8_AH, 40, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_MOBILITY,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_HOSTID,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6_EXT,
+ NPC_F_LC_L_EXT_SHIM6,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ NPC_F_LC_U_UNK_PROTO,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_LC, NPC_EC_IP6_VER,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_IP6,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10728,7 +12438,9 @@ static const struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu6_action_entries[] = {
+static struct npc_kpu_profile_action kpu6_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11026,6 +12738,294 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU7_IP6_ROUT, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 1, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 1, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 5, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 2, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 2, 0, 0, 0,
+ NPC_S_KPU7_CPT_IP6_FRAG, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 1, 0xff, 0, 3,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11035,7 +13035,9 @@ static const struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu7_action_entries[] = {
+static struct npc_kpu_profile_action kpu7_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -11221,6 +13223,94 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 12, 0, 0, 0,
+ NPC_S_KPU8_TCP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 8, 10, 0, 0,
+ NPC_S_KPU8_UDP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_SCTP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_ICMP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_AH, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU8_GRE, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 4, 0,
+ NPC_S_KPU12_TU_IP6, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 2, 6, 10, 1, 0,
+ NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LC, NPC_LT_NA,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -11230,7 +13320,9 @@ static const struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu8_action_entries[] = {
+static struct npc_kpu_profile_action kpu8_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11889,7 +13981,9 @@ static const struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu9_action_entries[] = {
+static struct npc_kpu_profile_action kpu9_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12252,10 +14346,10 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU12_TU_IP, 8, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_UNK,
+ 0,
0, 0, 0, 0,
},
{
@@ -12308,7 +14402,9 @@ static const struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu10_action_entries[] = {
+static struct npc_kpu_profile_action kpu10_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12455,7 +14551,9 @@ static const struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu11_action_entries[] = {
+static struct npc_kpu_profile_action kpu11_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12730,7 +14828,9 @@ static const struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu12_action_entries[] = {
+static struct npc_kpu_profile_action kpu12_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12957,7 +15057,9 @@ static const struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu13_action_entries[] = {
+static struct npc_kpu_profile_action kpu13_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12968,7 +15070,9 @@ static const struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu14_action_entries[] = {
+static struct npc_kpu_profile_action kpu14_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12979,7 +15083,9 @@ static const struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu15_action_entries[] = {
+static struct npc_kpu_profile_action kpu15_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -13158,7 +15264,9 @@ static const struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static const struct npc_kpu_profile_action kpu16_action_entries[] = {
+static struct npc_kpu_profile_action kpu16_action_entries[] = {
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13209,7 +15317,7 @@ static const struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static const struct npc_kpu_profile npc_kpu_profiles[] = {
+static struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13308,12 +15416,22 @@ static const struct npc_kpu_profile npc_kpu_profiles[] = {
},
};
-static const struct npc_lt_def_cfg npc_lt_defaults = {
+static struct npc_lt_def_cfg npc_lt_defaults = {
.rx_ol2 = {
.lid = NPC_LID_LA,
.ltype_match = NPC_LT_LA_ETHER,
.ltype_mask = 0x0F,
},
+ .ovlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_CTAG,
+ .ltype_mask = 0x0F,
+ },
+ .ivlan = {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_LB_STAG_QINQ,
+ .ltype_mask = 0x0F,
+ },
.rx_oip4 = {
.lid = NPC_LID_LC,
.ltype_match = NPC_LT_LC_IP,
@@ -13392,6 +15510,30 @@ static const struct npc_lt_def_cfg npc_lt_defaults = {
.ltype_match = NPC_LT_LG_TU_IP,
.ltype_mask = 0x0F,
},
+ .rx_apad0 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_apad1 = {
+ .valid = 0,
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_et = {
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ {
+ .lid = NPC_LID_LB,
+ .ltype_match = NPC_LT_NA,
+ .ltype_mask = 0x0,
+ },
+ },
};
static struct npc_mcam_kex npc_mkex_default = {
@@ -13399,7 +15541,7 @@ static struct npc_mcam_kex npc_mkex_default = {
.name = "default",
.kpu_version = NPC_KPU_PROFILE_VER,
.keyx_cfg = {
- /* nibble: LA..LE (ltype only) + channel */
+ /* nibble: LA..LE (ltype only) + Error code + Channel */
[NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_RX,
/* nibble: LA..LE (ltype only) */
[NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | NPC_PARSE_NIBBLE_INTF_TX,
@@ -13410,30 +15552,40 @@ static struct npc_mcam_kex npc_mkex_default = {
[NPC_LID_LA] = {
/* Layer A: Ethernet: */
[NPC_LT_LA_ETHER] = {
- /* DMAC: 6 bytes, KW1[47:0] */
+ /* DMAC: 6 bytes, KW1[55:8] */
KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x5),
+ },
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_HIGIG2_ETHER] = {
+ /* Classification: 2 bytes, KW1[23:8] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* VID: 2 bytes, KW1[39:24] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0,
+ NPC_KEXOF_DMAC + 2),
},
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
[NPC_LT_LB_CTAG] = {
- KEX_LD_CFG(0x03, 0x2, 0x1, 0x0, 0x4),
+ /* CTAG VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
/* Layer B: Stacked VLAN (STAG|QinQ) */
[NPC_LT_LB_STAG_QINQ] = {
- /* Outer VLAN: 2 bytes, KW0[63:48] */
- KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x4),
+ /* Outer VLAN: 2 bytes, KW1[7:0], KW0[63:56] */
+ KEX_LD_CFG(0x01, 0x2, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x8, 0x1, 0x0, 0x5),
},
[NPC_LT_LB_FDSA] = {
- /* SWITCH PORT: 1 byte, KW0[63:48] */
- KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
- /* Ethertype: 2 bytes, KW0[47:32] */
- KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ /* SWITCH PORT: 1 byte, KW0[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x7),
+ /* Ethertype: 2 bytes, KW0[55:40] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x5),
},
},
[NPC_LID_LC] = {
@@ -13477,6 +15629,13 @@ static struct npc_mcam_kex npc_mkex_default = {
/* DMAC: 6 bytes, KW1[63:16] */
KEX_LD_CFG(0x05, 0x8, 0x1, 0x0, 0xa),
},
+ /* Layer A: HiGig2: */
+ [NPC_LT_LA_IH_NIX_HIGIG2_ETHER] = {
+ /* PF_FUNC: 2B , KW0 [47:32] */
+ KEX_LD_CFG(0x01, 0x0, 0x1, 0x0, 0x4),
+ /* VID: 2 bytes, KW1[31:16] */
+ KEX_LD_CFG(0x01, 0x10, 0x1, 0x0, 0xa),
+ },
},
[NPC_LID_LB] = {
/* Layer B: Single VLAN (CTAG) */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ab24a5e8ee8a..0b092949d7ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -57,6 +57,10 @@ static char *mkex_profile; /* MKEX profile name */
module_param(mkex_profile, charp, 0000);
MODULE_PARM_DESC(mkex_profile, "MKEX profile name string");
+static char *kpu_profile; /* KPU profile name */
+module_param(kpu_profile, charp, 0000);
+MODULE_PARM_DESC(kpu_profile, "KPU profile name string");
+
static void rvu_setup_hw_capabilities(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -180,6 +184,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
return (rsrc->max - used);
}
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
+{
+ if (!rsrc->bmap)
+ return false;
+
+ return !test_bit(id, rsrc->bmap);
+}
+
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
{
rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
@@ -1754,6 +1766,48 @@ int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
+ int blkaddr, nixlf;
+ u16 target;
+
+ /* Only PF can add VF permissions */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) || is_afvf(pcifunc))
+ return -EOPNOTSUPP;
+
+ target = (pcifunc & ~RVU_PFVF_FUNC_MASK) | (req->vf + 1);
+ pfvf = rvu_get_pfvf(rvu, target);
+
+ if (req->flags & RESET_VF_PERM) {
+ pfvf->flags &= RVU_CLEAR_VF_PERM;
+ } else if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) ^
+ (req->flags & VF_TRUSTED)) {
+ change_bit(PF_SET_VF_TRUSTED, &pfvf->flags);
+ /* disable multicast and promisc entries */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags)) {
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, target);
+ if (blkaddr < 0)
+ return 0;
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr],
+ target, 0);
+ if (nixlf < 0)
+ return 0;
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_ALLMULTI_ENTRY,
+ false);
+ npc_enadis_default_mce_entry(rvu, target, nixlf,
+ NIXLF_PROMISC_ENTRY,
+ false);
+ }
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
@@ -2842,6 +2896,8 @@ static void rvu_update_module_params(struct rvu *rvu)
strscpy(rvu->mkex_pfl_name,
mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN);
+ strscpy(rvu->kpu_pfl_name,
+ kpu_profile ? kpu_profile : default_pfl_name, KPU_NAME_LEN);
}
static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index c2cc4806d13c..9e5d9ba6f01e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -223,13 +223,17 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
- u8 pf_set_vf_cfg;
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
- /* Broadcast pkt replication info */
+ /* Broadcast/Multicast/Promisc pkt replication info */
u16 bcast_mce_idx;
+ u16 mcast_mce_idx;
+ u16 promisc_mce_idx;
struct nix_mce_list bcast_mce_list;
+ struct nix_mce_list mcast_mce_list;
+ struct nix_mce_list promisc_mce_list;
+ bool use_mce_list;
struct rvu_npc_mcam_rule *def_ucast_rule;
@@ -239,8 +243,18 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ unsigned long flags;
};
+enum rvu_pfvf_flags {
+ NIXLF_INITIALIZED = 0,
+ PF_SET_VF_MAC,
+ PF_SET_VF_CFG,
+ PF_SET_VF_TRUSTED,
+};
+
+#define RVU_CLEAR_VF_PERM ~GENMASK(PF_SET_VF_TRUSTED, PF_SET_VF_MAC)
+
struct nix_txsch {
struct rsrc_bmap schq;
u8 lvl;
@@ -282,6 +296,13 @@ struct nix_txvlan {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
};
+struct nix_ipolicer {
+ struct rsrc_bmap band_prof;
+ u16 *pfvf_map;
+ u16 *match_id;
+ u16 *ref_count;
+};
+
struct nix_hw {
int blkaddr;
struct rvu *rvu;
@@ -291,6 +312,7 @@ struct nix_hw {
struct nix_mark_format mark_format;
struct nix_lso lso;
struct nix_txvlan txvlan;
+ struct nix_ipolicer *ipolicer;
};
/* RVU block's capabilities or functionality,
@@ -308,6 +330,7 @@ struct hw_cap {
bool nix_rx_multicast; /* Rx packet replication support */
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
+ bool ipolicer;
};
struct rvu_hwinfo {
@@ -386,6 +409,7 @@ struct npc_kpu_profile_adapter {
const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
const struct npc_kpu_profile *kpu; /* array[kpus] */
struct npc_mcam_kex *mkex;
+ bool custom;
size_t pkinds;
size_t kpus;
};
@@ -435,9 +459,13 @@ struct rvu {
struct mutex cgx_cfg_lock; /* serialize cgx configuration */
char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */
+ char kpu_pfl_name[KPU_NAME_LEN]; /* Configured KPU profile name */
/* Firmware data */
struct rvu_fwdata *fwdata;
+ void *kpu_fwdata;
+ size_t kpu_fwdata_sz;
+ void __iomem *kpu_prfl_addr;
/* NPC KPU data */
struct npc_kpu_profile_adapter kpu;
@@ -543,11 +571,16 @@ static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
/* Function Prototypes
* RVU
*/
-static inline int is_afvf(u16 pcifunc)
+static inline bool is_afvf(u16 pcifunc)
{
return !(pcifunc & ~RVU_PFVF_FUNC_MASK);
}
+static inline bool is_vf(u16 pcifunc)
+{
+ return !!(pcifunc & RVU_PFVF_FUNC_MASK);
+}
+
/* check if PF_FUNC is AF */
static inline bool is_pffunc_af(u16 pcifunc)
{
@@ -563,6 +596,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
+bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
@@ -603,6 +637,12 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
*lmac_id = (map & 0xF);
}
+static inline bool is_cgx_vf(struct rvu *rvu, u16 pcifunc)
+{
+ return ((pcifunc & RVU_PFVF_FUNC_MASK) &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)));
+}
+
#define M(_name, _id, fn_name, req, rsp) \
int rvu_mbox_handler_ ## fn_name(struct rvu *, struct req *, struct rsp *);
MBOX_MESSAGES
@@ -632,10 +672,22 @@ void rvu_nix_freemem(struct rvu *rvu);
int rvu_get_nixlf_count(struct rvu *rvu);
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf);
int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr);
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add);
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add);
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx);
struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr);
int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr);
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id);
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
@@ -646,13 +698,19 @@ int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, u8 chan_cnt,
- bool allmulti);
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf);
+ int nixlf, u64 chan, u8 chan_cnt);
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan);
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable);
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan);
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable);
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable);
void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_free_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 9bf8eaabf9ab..3cc3c6fd1d84 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1632,6 +1632,165 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
+static void print_band_prof_ctx(struct seq_file *m,
+ struct nix_bandprof_s *prof)
+{
+ char *str;
+
+ switch (prof->pc_mode) {
+ case NIX_RX_PC_MODE_VLAN:
+ str = "VLAN";
+ break;
+ case NIX_RX_PC_MODE_DSCP:
+ str = "DSCP";
+ break;
+ case NIX_RX_PC_MODE_GEN:
+ str = "Generic";
+ break;
+ case NIX_RX_PC_MODE_RSVD:
+ str = "Reserved";
+ break;
+ }
+ seq_printf(m, "W0: pc_mode\t\t%s\n", str);
+ str = (prof->icolor == 3) ? "Color blind" :
+ (prof->icolor == 0) ? "Green" :
+ (prof->icolor == 1) ? "Yellow" : "Red";
+ seq_printf(m, "W0: icolor\t\t%s\n", str);
+ seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
+ seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
+ seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
+ seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
+ seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
+ seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
+ seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
+ seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
+
+ seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
+ str = (prof->lmode == 0) ? "byte" : "packet";
+ seq_printf(m, "W1: lmode\t\t%s\n", str);
+ seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
+ seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
+ seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
+ seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
+ str = (prof->gc_action == 0) ? "PASS" :
+ (prof->gc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: gc_action\t\t%s\n", str);
+ str = (prof->yc_action == 0) ? "PASS" :
+ (prof->yc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: yc_action\t\t%s\n", str);
+ str = (prof->rc_action == 0) ? "PASS" :
+ (prof->rc_action == 1) ? "DROP" : "RED";
+ seq_printf(m, "W1: rc_action\t\t%s\n", str);
+ seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
+ seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
+ seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
+
+ seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
+ seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
+ seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
+ seq_printf(m, "W4: green_pkt_pass\t%lld\n",
+ (u64)prof->green_pkt_pass);
+ seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
+ (u64)prof->yellow_pkt_pass);
+ seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
+ seq_printf(m, "W7: green_octs_pass\t%lld\n",
+ (u64)prof->green_octs_pass);
+ seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
+ (u64)prof->yellow_octs_pass);
+ seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
+ seq_printf(m, "W10: green_pkt_drop\t%lld\n",
+ (u64)prof->green_pkt_drop);
+ seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
+ (u64)prof->yellow_pkt_drop);
+ seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
+ seq_printf(m, "W13: green_octs_drop\t%lld\n",
+ (u64)prof->green_octs_drop);
+ seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
+ (u64)prof->yellow_octs_drop);
+ seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
+ seq_puts(m, "==============================\n");
+}
+
+static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_ipolicer *ipolicer;
+ int layer, prof_idx, idx, rc;
+ u16 pcifunc;
+ char *str;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
+
+ seq_printf(m, "\n%s bandwidth profiles\n", str);
+ seq_puts(m, "=======================\n");
+
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (is_rsrc_free(&ipolicer->band_prof, idx))
+ continue;
+
+ prof_idx = (idx & 0x3FFF) | (layer << 14);
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ 0x00, NIX_AQ_CTYPE_BANDPROF,
+ prof_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of %s profile %d, err %d\n",
+ __func__, str, idx, rc);
+ return 0;
+ }
+ seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
+ pcifunc = ipolicer->pfvf_map[idx];
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ seq_printf(m, "Allocated to :: PF %d\n",
+ rvu_get_pf(pcifunc));
+ else
+ seq_printf(m, "Allocated to :: PF %d VF %d\n",
+ rvu_get_pf(pcifunc),
+ (pcifunc & RVU_PFVF_FUNC_MASK) - 1);
+ print_band_prof_ctx(m, &aq_rsp.prof);
+ }
+ }
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
+
+static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct nix_ipolicer *ipolicer;
+ int layer;
+ char *str;
+
+ seq_puts(m, "\nBandwidth profile resource free count\n");
+ seq_puts(m, "=====================================\n");
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
+ (layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
+ ipolicer->band_prof.max,
+ rvu_rsrc_free_count(&ipolicer->band_prof));
+ }
+ seq_puts(m, "=====================================\n");
+
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
+
static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
{
struct nix_hw *nix_hw;
@@ -1664,6 +1823,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_qsize_fops);
+ debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_ctx_fops);
+ debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_band_prof_rsrc_fops);
}
static void rvu_dbg_npa_init(struct rvu *rvu)
@@ -2132,6 +2295,7 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
struct rvu *rvu = s->private;
struct npc_mcam *mcam;
int pf, vf = -1;
+ bool enabled;
int blkaddr;
u16 target;
u64 hits;
@@ -2173,7 +2337,9 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
}
rvu_dbg_npc_mcam_show_action(s, iter);
- seq_printf(s, "\tenabled: %s\n", iter->enable ? "yes" : "no");
+
+ enabled = is_mcam_entry_enabled(rvu, mcam, blkaddr, iter->entry);
+ seq_printf(s, "\tenabled: %s\n", enabled ? "yes" : "no");
if (!iter->has_cntr)
continue;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0a8bd667cb11..d6f8210652c5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -21,6 +21,16 @@
static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc);
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id);
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add);
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr);
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc);
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -132,6 +142,22 @@ int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf, int *nix_blkaddr)
return 0;
}
+int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
+ struct nix_hw **nix_hw, int *blkaddr)
+{
+ struct rvu_pfvf *pfvf;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ *blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (!pfvf->nixlf || *blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ *nix_hw = get_nix_hw(rvu->hw, *blkaddr);
+ if (!*nix_hw)
+ return NIX_AF_ERR_INVALID_NIXBLK;
+ return 0;
+}
+
static void nix_mce_list_init(struct nix_mce_list *list, int max)
{
INIT_HLIST_HEAD(&list->head);
@@ -274,7 +300,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->tx_chan_cnt = 1;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
- pfvf->rx_chan_cnt, false);
+ pfvf->rx_chan_cnt);
break;
}
@@ -285,16 +311,17 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf)
pfvf->rx_chan_base, pfvf->mac_addr);
/* Add this PF_FUNC to bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, true);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, true);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to enable PF_FUNC 0x%x\n",
pcifunc);
return err;
}
-
+ /* Install MCAM rule matching Ethernet broadcast mac address */
rvu_npc_install_bcast_match_entry(rvu, pcifunc,
nixlf, pfvf->rx_chan_base);
+
pfvf->maxlen = NIC_HW_MIN_FRS;
pfvf->minlen = NIC_HW_MIN_FRS;
@@ -310,7 +337,7 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
pfvf->minlen = 0;
/* Remove this PF_FUNC from bcast pkt replication list */
- err = nix_update_bcast_mce_list(rvu, pcifunc, false);
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_BCAST_ENTRY, false);
if (err) {
dev_err(rvu->dev,
"Bcast list, failed to disable PF_FUNC 0x%x\n",
@@ -680,8 +707,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
pfvf = rvu_get_pfvf(rvu, pcifunc);
nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
- /* Skip NIXLF check for broadcast MCE entry init */
- if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) {
+ /* Skip NIXLF check for broadcast MCE entry and bandwidth profile
+ * operations done by AF itself.
+ */
+ if (!((!rsp && req->ctype == NIX_AQ_CTYPE_MCE) ||
+ (req->ctype == NIX_AQ_CTYPE_BANDPROF && !pcifunc))) {
if (!pfvf->nixlf || nixlf < 0)
return NIX_AF_ERR_AF_LF_INVALID;
}
@@ -721,6 +751,11 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rsp)
rc = NIX_AF_ERR_AQ_ENQUEUE;
break;
+ case NIX_AQ_CTYPE_BANDPROF:
+ if (nix_verify_bandprof((struct nix_cn10k_aq_enq_req *)req,
+ nix_hw, pcifunc))
+ rc = NIX_AF_ERR_INVALID_BANDPROF;
+ break;
default:
rc = NIX_AF_ERR_AQ_ENQUEUE;
}
@@ -777,6 +812,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(mask, &req->mce_mask,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(mask, &req->prof_mask,
+ sizeof(struct nix_bandprof_s));
fallthrough;
case NIX_AQ_INSTOP_INIT:
if (req->ctype == NIX_AQ_CTYPE_RQ)
@@ -789,6 +827,8 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
memcpy(ctx, &req->rss, sizeof(struct nix_rsse_s));
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(ctx, &req->mce, sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(ctx, &req->prof, sizeof(struct nix_bandprof_s));
break;
case NIX_AQ_INSTOP_NOP:
case NIX_AQ_INSTOP_READ:
@@ -866,6 +906,9 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
else if (req->ctype == NIX_AQ_CTYPE_MCE)
memcpy(&rsp->mce, ctx,
sizeof(struct nix_rx_mce_s));
+ else if (req->ctype == NIX_AQ_CTYPE_BANDPROF)
+ memcpy(&rsp->prof, ctx,
+ sizeof(struct nix_bandprof_s));
}
}
@@ -2203,8 +2246,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
aq_req.op = op;
aq_req.qidx = mce;
- /* Forward bcast pkts to RQ0, RSS not needed */
- aq_req.mce.op = 0;
+ /* Use RSS with RSS index 0 */
+ aq_req.mce.op = 1;
aq_req.mce.index = 0;
aq_req.mce.eol = eol;
aq_req.mce.pf_func = pcifunc;
@@ -2222,8 +2265,8 @@ static int nix_blk_setup_mce(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
-static int nix_update_mce_list(struct nix_mce_list *mce_list,
- u16 pcifunc, bool add)
+static int nix_update_mce_list_entry(struct nix_mce_list *mce_list,
+ u16 pcifunc, bool add)
{
struct mce *mce, *tail = NULL;
bool delete = false;
@@ -2234,6 +2277,9 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
if (mce->pcifunc == pcifunc && !add) {
delete = true;
break;
+ } else if (mce->pcifunc == pcifunc && add) {
+ /* entry already exists */
+ return 0;
}
tail = mce;
}
@@ -2261,36 +2307,23 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0;
}
-int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
+int nix_update_mce_list(struct rvu *rvu, u16 pcifunc,
+ struct nix_mce_list *mce_list,
+ int mce_idx, int mcam_index, bool add)
{
- int err = 0, idx, next_idx, last_idx;
- struct nix_mce_list *mce_list;
+ int err = 0, idx, next_idx, last_idx, blkaddr, npc_blkaddr;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
struct nix_mcast *mcast;
struct nix_hw *nix_hw;
- struct rvu_pfvf *pfvf;
struct mce *mce;
- int blkaddr;
-
- /* Broadcast pkt replication is not needed for AF's VFs, hence skip */
- if (is_afvf(pcifunc))
- return 0;
-
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
- if (blkaddr < 0)
- return 0;
- nix_hw = get_nix_hw(rvu->hw, blkaddr);
- if (!nix_hw)
- return 0;
-
- mcast = &nix_hw->mcast;
+ if (!mce_list)
+ return -EINVAL;
/* Get this PF/VF func's MCE index */
- pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
- idx = pfvf->bcast_mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
+ idx = mce_idx + (pcifunc & RVU_PFVF_FUNC_MASK);
- mce_list = &pfvf->bcast_mce_list;
- if (idx > (pfvf->bcast_mce_idx + mce_list->max)) {
+ if (idx > (mce_idx + mce_list->max)) {
dev_err(rvu->dev,
"%s: Idx %d > max MCE idx %d, for PF%d bcast list\n",
__func__, idx, mce_list->max,
@@ -2298,20 +2331,26 @@ int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL;
}
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mcast = &nix_hw->mcast;
mutex_lock(&mcast->mce_lock);
- err = nix_update_mce_list(mce_list, pcifunc, add);
+ err = nix_update_mce_list_entry(mce_list, pcifunc, add);
if (err)
goto end;
/* Disable MCAM entry in NPC */
if (!mce_list->count) {
- rvu_npc_enable_bcast_entry(rvu, pcifunc, false);
+ npc_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ npc_enable_mcam_entry(rvu, mcam, npc_blkaddr, mcam_index, false);
goto end;
}
/* Dump the updated list to HW */
- idx = pfvf->bcast_mce_idx;
+ idx = mce_idx;
last_idx = idx + mce_list->count - 1;
hlist_for_each_entry(mce, &mce_list->head, node) {
if (idx > last_idx)
@@ -2332,7 +2371,71 @@ end:
return err;
}
-static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
+void nix_get_mce_list(struct rvu *rvu, u16 pcifunc, int type,
+ struct nix_mce_list **mce_list, int *mce_idx)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_pfvf *pfvf;
+
+ if (!hw->cap.nix_rx_multicast ||
+ !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc & ~RVU_PFVF_FUNC_MASK))) {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ return;
+ }
+
+ /* Get this PF/VF func's MCE index */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+
+ if (type == NIXLF_BCAST_ENTRY) {
+ *mce_list = &pfvf->bcast_mce_list;
+ *mce_idx = pfvf->bcast_mce_idx;
+ } else if (type == NIXLF_ALLMULTI_ENTRY) {
+ *mce_list = &pfvf->mcast_mce_list;
+ *mce_idx = pfvf->mcast_mce_idx;
+ } else if (type == NIXLF_PROMISC_ENTRY) {
+ *mce_list = &pfvf->promisc_mce_list;
+ *mce_idx = pfvf->promisc_mce_idx;
+ } else {
+ *mce_list = NULL;
+ *mce_idx = 0;
+ }
+}
+
+static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
+ int type, bool add)
+{
+ int err = 0, nixlf, blkaddr, mcam_index, mce_idx;
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+
+ /* skip multicast pkt replication for AF's VFs */
+ if (is_afvf(pcifunc))
+ return 0;
+
+ if (!hw->cap.nix_rx_multicast)
+ return 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return -EINVAL;
+
+ nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0);
+ if (nixlf < 0)
+ return -EINVAL;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ mcam_index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+ err = nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, mcam_index, add);
+ return err;
+}
+
+static int nix_setup_mce_tables(struct rvu *rvu, struct nix_hw *nix_hw)
{
struct nix_mcast *mcast = &nix_hw->mcast;
int err, pf, numvfs, idx;
@@ -2355,11 +2458,18 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
if (pfvf->nix_blkaddr != nix_hw->blkaddr)
continue;
- /* Save the start MCE */
+ /* save start idx of broadcast mce list */
pfvf->bcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
-
nix_mce_list_init(&pfvf->bcast_mce_list, numvfs + 1);
+ /* save start idx of multicast mce list */
+ pfvf->mcast_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->mcast_mce_list, numvfs + 1);
+
+ /* save the start idx of promisc mce list */
+ pfvf->promisc_mce_idx = nix_alloc_mce_list(mcast, numvfs + 1);
+ nix_mce_list_init(&pfvf->promisc_mce_list, numvfs + 1);
+
for (idx = 0; idx < (numvfs + 1); idx++) {
/* idx-0 is for PF, followed by VFs */
pcifunc = (pf << RVU_PFVF_PF_SHIFT);
@@ -2375,6 +2485,22 @@ static int nix_setup_bcast_tables(struct rvu *rvu, struct nix_hw *nix_hw)
pcifunc, 0, true);
if (err)
return err;
+
+ /* add dummy entries to multicast mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->mcast_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
+
+ /* add dummy entries to promisc mce list */
+ err = nix_blk_setup_mce(rvu, nix_hw,
+ pfvf->promisc_mce_idx + idx,
+ NIX_AQ_INSTOP_INIT,
+ pcifunc, 0, true);
+ if (err)
+ return err;
}
}
return 0;
@@ -2421,7 +2547,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
mutex_init(&mcast->mce_lock);
- return nix_setup_bcast_tables(rvu, nix_hw);
+ return nix_setup_mce_tables(rvu, nix_hw);
}
static int nix_setup_txvlan(struct rvu *rvu, struct nix_hw *nix_hw)
@@ -3035,15 +3161,22 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu,
pfvf = rvu_get_pfvf(rvu, pcifunc);
- /* VF can't overwrite admin(PF) changes */
- if (from_vf && pfvf->pf_set_vf_cfg)
+ /* untrusted VF can't overwrite admin(PF) changes */
+ if (!test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (from_vf && test_bit(PF_SET_VF_MAC, &pfvf->flags))) {
+ dev_warn(rvu->dev,
+ "MAC address set by admin(PF) cannot be overwritten by untrusted VF");
return -EPERM;
+ }
ether_addr_copy(pfvf->mac_addr, req->mac_addr);
rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base, req->mac_addr);
+ if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf)
+ ether_addr_copy(pfvf->default_mac, req->mac_addr);
+
return 0;
}
@@ -3067,30 +3200,75 @@ int rvu_mbox_handler_nix_get_mac_addr(struct rvu *rvu,
int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req,
struct msg_rsp *rsp)
{
- bool allmulti = false, disable_promisc = false;
+ bool allmulti, promisc, nix_rx_multicast;
u16 pcifunc = req->hdr.pcifunc;
- int blkaddr, nixlf, err;
struct rvu_pfvf *pfvf;
+ int nixlf, err;
- err = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ promisc = req->mode & NIX_RX_MODE_PROMISC ? true : false;
+ allmulti = req->mode & NIX_RX_MODE_ALLMULTI ? true : false;
+ pfvf->use_mce_list = req->mode & NIX_RX_MODE_USE_MCE ? true : false;
+
+ nix_rx_multicast = rvu->hw->cap.nix_rx_multicast & pfvf->use_mce_list;
+
+ if (is_vf(pcifunc) && !nix_rx_multicast &&
+ (promisc || allmulti)) {
+ dev_warn_ratelimited(rvu->dev,
+ "VF promisc/multicast not supported\n");
+ return 0;
+ }
+
+ /* untrusted VF can't configure promisc/allmulti */
+ if (is_vf(pcifunc) && !test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) &&
+ (promisc || allmulti))
+ return 0;
+
+ err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
if (err)
return err;
- pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (nix_rx_multicast) {
+ /* add/del this PF_FUNC to/from mcast pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_ALLMULTI_ENTRY,
+ allmulti);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to multicast list\n",
+ pcifunc);
+ return err;
+ }
- if (req->mode & NIX_RX_MODE_PROMISC)
- allmulti = false;
- else if (req->mode & NIX_RX_MODE_ALLMULTI)
- allmulti = true;
- else
- disable_promisc = true;
+ /* add/del this PF_FUNC to/from promisc pkt replication list */
+ err = nix_update_mce_rule(rvu, pcifunc, NIXLF_PROMISC_ENTRY,
+ promisc);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update pcifunc 0x%x to promisc list\n",
+ pcifunc);
+ return err;
+ }
+ }
- if (disable_promisc)
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
- else
+ /* install/uninstall allmulti entry */
+ if (allmulti) {
+ rvu_npc_install_allmulti_entry(rvu, pcifunc, nixlf,
+ pfvf->rx_chan_base);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_allmulti_entry(rvu, pcifunc, nixlf, false);
+ }
+
+ /* install/uninstall promisc entry */
+ if (promisc) {
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
- pfvf->rx_chan_cnt, allmulti);
+ pfvf->rx_chan_cnt);
+ } else {
+ if (!nix_rx_multicast)
+ rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf, false);
+ }
+
return 0;
}
@@ -3470,6 +3648,10 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
if (err)
return err;
+ err = nix_setup_ipolicers(rvu, nix_hw, blkaddr);
+ if (err)
+ return err;
+
err = nix_af_mark_format_setup(rvu, nix_hw, blkaddr);
if (err)
return err;
@@ -3523,6 +3705,40 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
(ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
ltdefs->rx_isctp.ltype_mask);
+ if (!is_rvu_otx2(rvu)) {
+ /* Enable APAD calculation for other protocols
+ * matching APAD0 and APAD1 lt def registers.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD0,
+ (ltdefs->rx_apad0.valid << 11) |
+ (ltdefs->rx_apad0.lid << 8) |
+ (ltdefs->rx_apad0.ltype_match << 4) |
+ ltdefs->rx_apad0.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_CST_APAD1,
+ (ltdefs->rx_apad1.valid << 11) |
+ (ltdefs->rx_apad1.lid << 8) |
+ (ltdefs->rx_apad1.ltype_match << 4) |
+ ltdefs->rx_apad1.ltype_mask);
+
+ /* Receive ethertype defination register defines layer
+ * information in NPC_RESULT_S to identify the Ethertype
+ * location in L2 header. Used for Ethertype overwriting
+ * in inline IPsec flow.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(0),
+ (ltdefs->rx_et[0].offset << 12) |
+ (ltdefs->rx_et[0].valid << 11) |
+ (ltdefs->rx_et[0].lid << 8) |
+ (ltdefs->rx_et[0].ltype_match << 4) |
+ ltdefs->rx_et[0].ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ET(1),
+ (ltdefs->rx_et[1].offset << 12) |
+ (ltdefs->rx_et[1].valid << 11) |
+ (ltdefs->rx_et[1].lid << 8) |
+ (ltdefs->rx_et[1].ltype_match << 4) |
+ ltdefs->rx_et[1].ltype_mask);
+ }
+
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
return err;
@@ -3584,6 +3800,8 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
kfree(txsch->schq.bmap);
}
+ nix_ipolicer_freemem(nix_hw);
+
vlan = &nix_hw->txvlan;
kfree(vlan->rsrc.bmap);
mutex_destroy(&vlan->rsrc_lock);
@@ -3614,6 +3832,7 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3624,6 +3843,9 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req,
npc_mcam_enable_flows(rvu, pcifunc);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ set_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, true);
}
@@ -3631,6 +3853,7 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_pfvf *pfvf;
int nixlf, err;
err = nix_get_nixlf(rvu, pcifunc, &nixlf, NULL);
@@ -3639,6 +3862,9 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
@@ -3657,6 +3883,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_rx_sync(rvu, blkaddr);
nix_txschq_free(rvu, pcifunc);
+ clear_bit(NIXLF_INITIALIZED, &pfvf->flags);
+
rvu_cgx_start_stop_io(rvu, pcifunc, false);
if (pfvf->sq_ctx) {
@@ -3681,6 +3909,8 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
}
nix_ctx_free(rvu, pfvf);
+
+ nix_free_all_bandprof(rvu, pcifunc);
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -3789,3 +4019,586 @@ void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
if (from_vf)
ether_addr_copy(pfvf->mac_addr, pfvf->default_mac);
}
+
+/* NIX ingress policers or bandwidth profiles APIs */
+static void nix_config_rx_pkt_policer_precolor(struct rvu *rvu, int blkaddr)
+{
+ struct npc_lt_def_cfg defs, *ltdefs;
+
+ ltdefs = &defs;
+ memcpy(ltdefs, rvu->kpu.lt_def, sizeof(struct npc_lt_def_cfg));
+
+ /* Extract PCP and DEI fields from outer VLAN from byte offset
+ * 2 from the start of LB_PTR (ie TAG).
+ * VLAN0 is Outer VLAN and VLAN1 is Inner VLAN. Inner VLAN
+ * fields are considered when 'Tunnel enable' is set in profile.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN0_PCP_DEI,
+ (2UL << 12) | (ltdefs->ovlan.lid << 8) |
+ (ltdefs->ovlan.ltype_match << 4) |
+ ltdefs->ovlan.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_VLAN1_PCP_DEI,
+ (2UL << 12) | (ltdefs->ivlan.lid << 8) |
+ (ltdefs->ivlan.ltype_match << 4) |
+ ltdefs->ivlan.ltype_mask);
+
+ /* DSCP field in outer and tunneled IPv4 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_oip4.lid << 8) |
+ (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4_DSCP,
+ (1UL << 12) | (ltdefs->rx_iip4.lid << 8) |
+ (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
+
+ /* DSCP field (traffic class) in outer and tunneled IPv6 packets */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_oip6.lid << 8) |
+ (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6_DSCP,
+ (1UL << 11) | (ltdefs->rx_iip6.lid << 8) |
+ (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
+}
+
+static int nix_init_policer_context(struct rvu *rvu, struct nix_hw *nix_hw,
+ int layer, int prof_idx)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ int rc;
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+
+ aq_req.qidx = (prof_idx & 0x3FFF) | (layer << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_INIT;
+
+ /* Context is all zeros, submit to AQ */
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc)
+ dev_err(rvu->dev, "Failed to INIT bandwidth profile layer %d profile %d\n",
+ layer, prof_idx);
+ return rc;
+}
+
+static int nix_setup_ipolicers(struct rvu *rvu,
+ struct nix_hw *nix_hw, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_ipolicer *ipolicer;
+ int err, layer, prof_idx;
+ u64 cfg;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
+ if (!(cfg & BIT_ULL(61))) {
+ hw->cap.ipolicer = false;
+ return 0;
+ }
+
+ hw->cap.ipolicer = true;
+ nix_hw->ipolicer = devm_kcalloc(rvu->dev, BAND_PROF_NUM_LAYERS,
+ sizeof(*ipolicer), GFP_KERNEL);
+ if (!nix_hw->ipolicer)
+ return -ENOMEM;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_PL_CONST);
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+ switch (layer) {
+ case BAND_PROF_LEAF_LAYER:
+ ipolicer->band_prof.max = cfg & 0XFFFF;
+ break;
+ case BAND_PROF_MID_LAYER:
+ ipolicer->band_prof.max = (cfg >> 16) & 0XFFFF;
+ break;
+ case BAND_PROF_TOP_LAYER:
+ ipolicer->band_prof.max = (cfg >> 32) & 0XFFFF;
+ break;
+ }
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ err = rvu_alloc_bitmap(&ipolicer->band_prof);
+ if (err)
+ return err;
+
+ ipolicer->pfvf_map = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->pfvf_map)
+ return -ENOMEM;
+
+ ipolicer->match_id = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ if (!ipolicer->match_id)
+ return -ENOMEM;
+
+ for (prof_idx = 0;
+ prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ /* Set AF as current owner for INIT ops to succeed */
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+
+ /* There is no enable bit in the profile context,
+ * so no context disable. So let's INIT them here
+ * so that PF/VF later on have to just do WRITE to
+ * setup policer rates and config.
+ */
+ err = nix_init_policer_context(rvu, nix_hw,
+ layer, prof_idx);
+ if (err)
+ return err;
+ }
+
+ /* Allocate memory for maintaining ref_counts for MID level
+ * profiles, this will be needed for leaf layer profiles'
+ * aggregation.
+ */
+ if (layer != BAND_PROF_MID_LAYER)
+ continue;
+
+ ipolicer->ref_count = devm_kcalloc(rvu->dev,
+ ipolicer->band_prof.max,
+ sizeof(u16), GFP_KERNEL);
+ }
+
+ /* Set policer timeunit to 2us ie (19 + 1) * 100 nsec = 2us */
+ rvu_write64(rvu, blkaddr, NIX_AF_PL_TS, 19);
+
+ nix_config_rx_pkt_policer_precolor(rvu, blkaddr);
+
+ return 0;
+}
+
+static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer;
+
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ if (!ipolicer->band_prof.max)
+ continue;
+
+ kfree(ipolicer->band_prof.bmap);
+ }
+}
+
+static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
+ struct nix_hw *nix_hw, u16 pcifunc)
+{
+ struct nix_ipolicer *ipolicer;
+ int layer, hi_layer, prof_idx;
+
+ /* Bits [15:14] in profile index represent layer */
+ layer = (req->qidx >> 14) & 0x03;
+ prof_idx = req->qidx & 0x3FFF;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ if (prof_idx >= ipolicer->band_prof.max)
+ return -EINVAL;
+
+ /* Check if the profile is allocated to the requesting PCIFUNC or not
+ * with the exception of AF. AF is allowed to read and update contexts.
+ */
+ if (pcifunc && ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ /* If this profile is linked to higher layer profile then check
+ * if that profile is also allocated to the requesting PCIFUNC
+ * or not.
+ */
+ if (!req->prof.hl_en)
+ return 0;
+
+ /* Leaf layer profile can link only to mid layer and
+ * mid layer to top layer.
+ */
+ if (layer == BAND_PROF_LEAF_LAYER)
+ hi_layer = BAND_PROF_MID_LAYER;
+ else if (layer == BAND_PROF_MID_LAYER)
+ hi_layer = BAND_PROF_TOP_LAYER;
+ else
+ return -EINVAL;
+
+ ipolicer = &nix_hw->ipolicer[hi_layer];
+ prof_idx = req->prof.band_prof_id;
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_alloc(struct rvu *rvu,
+ struct nix_bandprof_alloc_req *req,
+ struct nix_bandprof_alloc_rsp *rsp)
+{
+ int blkaddr, layer, prof, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ /* Allocate a max of 'MAX_BANDPROF_PER_PFFUNC' profiles */
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+
+ prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (prof < 0)
+ break;
+ rsp->prof_count[layer]++;
+ rsp->prof_idx[layer][idx] = prof;
+ ipolicer->pfvf_map[prof] = pcifunc;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc)
+{
+ int blkaddr, layer, prof_idx, err;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the profiles allocated to the PCIFUNC */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ ipolicer = &nix_hw->ipolicer[layer];
+
+ for (prof_idx = 0; prof_idx < ipolicer->band_prof.max; prof_idx++) {
+ if (ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_nix_bandprof_free(struct rvu *rvu,
+ struct nix_bandprof_free_req *req,
+ struct msg_rsp *rsp)
+{
+ int blkaddr, layer, prof_idx, idx, err;
+ u16 pcifunc = req->hdr.pcifunc;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+
+ if (req->free_all)
+ return nix_free_all_bandprof(rvu, pcifunc);
+
+ if (!rvu->hw->cap.ipolicer)
+ return NIX_AF_ERR_IPOLICER_NOTSUPP;
+
+ err = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (err)
+ return err;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free the requested profile indices */
+ for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
+ if (layer == BAND_PROF_INVAL_LAYER)
+ continue;
+ if (!req->prof_count[layer])
+ continue;
+
+ ipolicer = &nix_hw->ipolicer[layer];
+ for (idx = 0; idx < req->prof_count[layer]; idx++) {
+ prof_idx = req->prof_idx[layer][idx];
+ if (prof_idx >= ipolicer->band_prof.max ||
+ ipolicer->pfvf_map[prof_idx] != pcifunc)
+ continue;
+
+ /* Clear ratelimit aggregation, if any */
+ if (layer == BAND_PROF_LEAF_LAYER &&
+ ipolicer->match_id[prof_idx])
+ nix_clear_ratelimit_aggr(rvu, nix_hw, prof_idx);
+
+ ipolicer->pfvf_map[prof_idx] = 0x00;
+ ipolicer->match_id[prof_idx] = 0;
+ rvu_free_rsrc(&ipolicer->band_prof, prof_idx);
+ if (idx == MAX_BANDPROF_PER_PFFUNC)
+ break;
+ }
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u16 pcifunc, u8 ctype, u32 qidx)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = pcifunc;
+ aq_req->ctype = ctype;
+ aq_req->op = NIX_AQ_INSTOP_READ;
+ aq_req->qidx = qidx;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+static int nix_ipolicer_map_leaf_midprofs(struct rvu *rvu,
+ struct nix_hw *nix_hw,
+ struct nix_cn10k_aq_enq_req *aq_req,
+ struct nix_cn10k_aq_enq_rsp *aq_rsp,
+ u32 leaf_prof, u16 mid_prof)
+{
+ memset(aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req->hdr.pcifunc = 0x00;
+ aq_req->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req->op = NIX_AQ_INSTOP_WRITE;
+ aq_req->qidx = leaf_prof;
+
+ aq_req->prof.band_prof_id = mid_prof;
+ aq_req->prof_mask.band_prof_id = GENMASK(6, 0);
+ aq_req->prof.hl_en = 1;
+ aq_req->prof_mask.hl_en = 1;
+
+ return rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)aq_req,
+ (struct nix_aq_enq_rsp *)aq_rsp);
+}
+
+int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
+ u16 rq_idx, u16 match_id)
+{
+ int leaf_prof, mid_prof, leaf_match;
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ struct nix_hw *nix_hw;
+ int blkaddr, idx, rc;
+
+ if (!rvu->hw->cap.ipolicer)
+ return 0;
+
+ rc = nix_get_struct_ptrs(rvu, pcifunc, &nix_hw, &blkaddr);
+ if (rc)
+ return rc;
+
+ /* Fetch the RQ's context to see if policing is enabled */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, pcifunc,
+ NIX_AQ_CTYPE_RQ, rq_idx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch RQ%d context of PFFUNC 0x%x\n",
+ __func__, rq_idx, pcifunc);
+ return rc;
+ }
+
+ if (!aq_rsp.rq.policer_ena)
+ return 0;
+
+ /* Get the bandwidth profile ID mapped to this RQ */
+ leaf_prof = aq_rsp.rq.band_prof_id;
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_LEAF_LAYER];
+ ipolicer->match_id[leaf_prof] = match_id;
+
+ /* Check if any other leaf profile is marked with same match_id */
+ for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
+ if (idx == leaf_prof)
+ continue;
+ if (ipolicer->match_id[idx] != match_id)
+ continue;
+
+ leaf_match = idx;
+ break;
+ }
+
+ if (idx == ipolicer->band_prof.max)
+ return 0;
+
+ /* Fetch the matching profile's context to check if it's already
+ * mapped to a mid level profile.
+ */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_match);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_match);
+ return rc;
+ }
+
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ if (aq_rsp.prof.hl_en) {
+ /* Get Mid layer prof index and map leaf_prof index
+ * also such that flows that are being steered
+ * to different RQs and marked with same match_id
+ * are rate limited in a aggregate fashion
+ */
+ mid_prof = aq_rsp.prof.band_prof_id;
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+
+ /* Allocate a mid layer profile and
+ * map both 'leaf_prof' and 'leaf_match' profiles to it.
+ */
+ mutex_lock(&rvu->rsrc_lock);
+ mid_prof = rvu_alloc_rsrc(&ipolicer->band_prof);
+ if (mid_prof < 0) {
+ dev_err(rvu->dev,
+ "%s: Unable to allocate mid layer profile\n", __func__);
+ mutex_unlock(&rvu->rsrc_lock);
+ goto exit;
+ }
+ mutex_unlock(&rvu->rsrc_lock);
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ ipolicer->ref_count[mid_prof] = 0;
+
+ /* Initialize mid layer profile same as 'leaf_prof' */
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ goto exit;
+ }
+
+ memset(&aq_req, 0, sizeof(struct nix_cn10k_aq_enq_req));
+ aq_req.hdr.pcifunc = 0x00;
+ aq_req.qidx = (mid_prof & 0x3FFF) | (BAND_PROF_MID_LAYER << 14);
+ aq_req.ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq_req.op = NIX_AQ_INSTOP_WRITE;
+ memcpy(&aq_req.prof, &aq_rsp.prof, sizeof(struct nix_bandprof_s));
+ /* Clear higher layer enable bit in the mid profile, just in case */
+ aq_req.prof.hl_en = 0;
+ aq_req.prof_mask.hl_en = 1;
+
+ rc = rvu_nix_blk_aq_enq_inst(rvu, nix_hw,
+ (struct nix_aq_enq_req *)&aq_req, NULL);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to INIT context of mid layer profile %d\n",
+ __func__, mid_prof);
+ goto exit;
+ }
+
+ /* Map both leaf profiles to this mid layer profile */
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_prof, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_prof, mid_prof);
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_ipolicer_map_leaf_midprofs(rvu, nix_hw,
+ &aq_req, &aq_rsp,
+ leaf_match, mid_prof);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to map leaf(%d) and mid(%d) profiles\n",
+ __func__, leaf_match, mid_prof);
+ ipolicer->ref_count[mid_prof]--;
+ goto exit;
+ }
+
+ mutex_lock(&rvu->rsrc_lock);
+ ipolicer->ref_count[mid_prof]++;
+ mutex_unlock(&rvu->rsrc_lock);
+
+exit:
+ return rc;
+}
+
+/* Called with mutex rsrc_lock */
+static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
+ u32 leaf_prof)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ struct nix_ipolicer *ipolicer;
+ u16 mid_prof;
+ int rc;
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp, 0x00,
+ NIX_AQ_CTYPE_BANDPROF, leaf_prof);
+
+ mutex_lock(&rvu->rsrc_lock);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch context of leaf profile %d\n",
+ __func__, leaf_prof);
+ return;
+ }
+
+ if (!aq_rsp.prof.hl_en)
+ return;
+
+ mid_prof = aq_rsp.prof.band_prof_id;
+ ipolicer = &nix_hw->ipolicer[BAND_PROF_MID_LAYER];
+ ipolicer->ref_count[mid_prof]--;
+ /* If ref_count is zero, free mid layer profile */
+ if (!ipolicer->ref_count[mid_prof]) {
+ ipolicer->pfvf_map[mid_prof] = 0x00;
+ rvu_free_rsrc(&ipolicer->band_prof, mid_prof);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 0bc4529691ec..3612e0a2cab3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -19,7 +19,7 @@
#include "cgx.h"
#include "npc_profile.h"
-#define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */
+#define RSVD_MCAM_ENTRIES_PER_PF 3 /* Broadcast, Promisc and AllMulticast */
#define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
@@ -27,6 +27,8 @@
#define NPC_KEX_CHAN_MASK 0xFFFULL
#define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
+#define ALIGN_8B_CEIL(__a) (((__a) + 7) & (-8))
+
static const char def_pfl_name[] = "default";
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
@@ -212,8 +214,10 @@ int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
*/
if (type == NIXLF_BCAST_ENTRY)
return index;
- else if (type == NIXLF_PROMISC_ENTRY)
+ else if (type == NIXLF_ALLMULTI_ENTRY)
return index + 1;
+ else if (type == NIXLF_PROMISC_ENTRY)
+ return index + 2;
}
return npc_get_ucast_mcam_index(mcam, pcifunc, nixlf);
@@ -411,37 +415,49 @@ static void npc_fill_entryword(struct mcam_entry *entry, int idx,
}
}
-static void npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
- int blkaddr, int index,
- struct mcam_entry *entry)
+static u64 npc_get_default_entry_action(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, u16 pf_func)
+{
+ int bank, nixlf, index;
+
+ /* get ucast entry rule entry index */
+ nix_get_nixlf(rvu, pf_func, &nixlf, NULL);
+ index = npc_get_nixlf_mcam_index(mcam, pf_func, nixlf,
+ NIXLF_UCAST_ENTRY);
+ bank = npc_get_bank(mcam, index);
+ index &= (mcam->banksize - 1);
+
+ return rvu_read64(rvu, blkaddr,
+ NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+}
+
+static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
+ int blkaddr, int index, struct mcam_entry *entry,
+ bool *enable)
{
u16 owner, target_func;
struct rvu_pfvf *pfvf;
- int bank, nixlf;
u64 rx_action;
owner = mcam->entry2pfvf_map[index];
target_func = (entry->action >> 4) & 0xffff;
- /* return incase target is PF or LBK or rule owner is not PF */
+ /* do nothing when target is LBK/PF or owner is not PF */
if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) ||
!(target_func & RVU_PFVF_FUNC_MASK))
return;
+ /* save entry2target_pffunc */
pfvf = rvu_get_pfvf(rvu, target_func);
mcam->entry2target_pffunc[index] = target_func;
- /* return if nixlf is not attached or initialized */
- if (!is_nixlf_attached(rvu, target_func) || !pfvf->def_ucast_rule)
- return;
- /* get VF ucast entry rule */
- nix_get_nixlf(rvu, target_func, &nixlf, NULL);
- index = npc_get_nixlf_mcam_index(mcam, target_func,
- nixlf, NIXLF_UCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- index &= (mcam->banksize - 1);
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target_func) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
+ *enable = false;
- rx_action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
+ /* copy VF default entry action to the VF mcam entry */
+ rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
+ target_func);
if (rx_action)
entry->action = rx_action;
}
@@ -493,10 +509,9 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CAMX_W1(index, bank, 0), cam0);
}
- /* copy VF default entry action to the VF mcam entry */
+ /* PF installing VF rule */
if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_get_default_entry_action(rvu, mcam, blkaddr, actindex,
- entry);
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -647,30 +662,32 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
}
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, u64 chan, u8 chan_cnt,
- bool allmulti)
+ int nixlf, u64 chan, u8 chan_cnt)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct npc_install_flow_req req = { 0 };
struct npc_install_flow_rsp rsp = { 0 };
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, ucast_idx, index;
- u8 mac_addr[ETH_ALEN] = { 0 };
struct nix_rx_action action;
u64 relaxed_mask;
- /* Only PF or AF VF can add a promiscuous entry */
- if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc))
+ if (!hw->cap.nix_rx_multicast && is_cgx_vf(rvu, pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
return;
- *(u64 *)&action = 0x00;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
+ if (is_cgx_vf(rvu, pcifunc))
+ index = npc_get_nixlf_mcam_index(mcam,
+ pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, NIXLF_PROMISC_ENTRY);
+
/* If the corresponding PF's ucast action is RSS,
* use the same action for promisc also
*/
@@ -678,19 +695,20 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
*(u64 *)&action = npc_get_mcam_action(rvu, mcam,
- blkaddr, ucast_idx);
+ blkaddr, ucast_idx);
if (action.op != NIX_RX_ACTIONOP_RSS) {
*(u64 *)&action = 0x00;
action.op = NIX_RX_ACTIONOP_UCAST;
- action.pf_func = pcifunc;
}
- if (allmulti) {
- mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
- ether_addr_copy(req.packet.dmac, mac_addr);
- ether_addr_copy(req.mask.dmac, mac_addr);
- req.features = BIT_ULL(NPC_DMAC);
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list &&
+ is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ action.index = pfvf->promisc_mce_idx;
}
req.chan_mask = 0xFFFU;
@@ -718,8 +736,8 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
- int nixlf, bool enable)
+void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -728,25 +746,14 @@ static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc,
if (blkaddr < 0)
return;
- /* Only PF's have a promiscuous entry */
- if (pcifunc & RVU_PFVF_FUNC_MASK)
- return;
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
index = npc_get_nixlf_mcam_index(mcam, pcifunc,
nixlf, NIXLF_PROMISC_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
-void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false);
-}
-
-void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf)
-{
- npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true);
-}
-
void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan)
{
@@ -756,8 +763,6 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr, index;
- u32 req_index = 0;
- u8 op;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -770,7 +775,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
/* If pkt replication is not supported,
* then only PF is allowed to add a bcast match entry.
*/
- if (!hw->cap.nix_rx_multicast && pcifunc & RVU_PFVF_FUNC_MASK)
+ if (!hw->cap.nix_rx_multicast && is_vf(pcifunc))
return;
/* Get 'pcifunc' of PF device */
@@ -784,10 +789,10 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
* so install entry with UCAST action, so that PF
* receives all broadcast packets.
*/
- op = NIX_RX_ACTIONOP_UCAST;
+ req.op = NIX_RX_ACTIONOP_UCAST;
} else {
- op = NIX_RX_ACTIONOP_MCAST;
- req_index = pfvf->bcast_mce_idx;
+ req.op = NIX_RX_ACTIONOP_MCAST;
+ req.index = pfvf->bcast_mce_idx;
}
eth_broadcast_addr((u8 *)&req.packet.dmac);
@@ -796,15 +801,14 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc,
req.channel = chan;
req.intf = pfvf->nix_rx_intf;
req.entry = index;
- req.op = op;
req.hdr.pcifunc = 0; /* AF is requester */
req.vf = pcifunc;
- req.index = req_index;
rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
}
-void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
+void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
int blkaddr, index;
@@ -816,7 +820,104 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
/* Get 'pcifunc' of PF device */
pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
- index = npc_get_nixlf_mcam_index(mcam, pcifunc, 0, NIXLF_BCAST_ENTRY);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY);
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
+void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ u64 chan)
+{
+ struct npc_install_flow_req req = { 0 };
+ struct npc_install_flow_rsp rsp = { 0 };
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ int blkaddr, ucast_idx, index;
+ u8 mac_addr[ETH_ALEN] = { 0 };
+ struct nix_rx_action action;
+ struct rvu_pfvf *pfvf;
+ u16 vf_func;
+
+ /* Only CGX PF/VF can add allmulticast entry */
+ if (is_afvf(pcifunc))
+ return;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ vf_func = pcifunc & RVU_PFVF_FUNC_MASK;
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_ALLMULTI_ENTRY);
+
+ /* If the corresponding PF's ucast action is RSS,
+ * use the same action for multicast entry also
+ */
+ ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc,
+ nixlf, NIXLF_UCAST_ENTRY);
+ if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx))
+ *(u64 *)&action = npc_get_mcam_action(rvu, mcam,
+ blkaddr, ucast_idx);
+
+ if (action.op != NIX_RX_ACTIONOP_RSS) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_UCAST;
+ action.pf_func = pcifunc;
+ }
+
+ /* RX_ACTION set to MCAST for CGX PF's */
+ if (hw->cap.nix_rx_multicast && pfvf->use_mce_list) {
+ *(u64 *)&action = 0x00;
+ action.op = NIX_RX_ACTIONOP_MCAST;
+ action.index = pfvf->mcast_mce_idx;
+ }
+
+ mac_addr[0] = 0x01; /* LSB bit of 1st byte in DMAC */
+ ether_addr_copy(req.packet.dmac, mac_addr);
+ ether_addr_copy(req.mask.dmac, mac_addr);
+ req.features = BIT_ULL(NPC_DMAC);
+
+ /* For cn10k the upper two bits of the channel number are
+ * cpt channel number. with masking out these bits in the
+ * mcam entry, same entry used for NIX will allow packets
+ * received from cpt for parsing.
+ */
+ if (!is_rvu_otx2(rvu))
+ req.chan_mask = NIX_CHAN_CPT_X2P_MASK;
+ else
+ req.chan_mask = 0xFFFU;
+
+ req.channel = chan;
+ req.intf = pfvf->nix_rx_intf;
+ req.entry = index;
+ req.op = action.op;
+ req.hdr.pcifunc = 0; /* AF is requester */
+ req.vf = pcifunc | vf_func;
+ req.index = action.index;
+ req.match_id = action.match_id;
+ req.flow_key_alg = action.flow_key_alg;
+
+ rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp);
+}
+
+void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
+ bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ int blkaddr, index;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ /* Get 'pcifunc' of PF device */
+ pcifunc = pcifunc & ~RVU_PFVF_FUNC_MASK;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
}
@@ -858,6 +959,7 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
int group, int alg_idx, int mcam_index)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
struct nix_rx_action action;
int blkaddr, index, bank;
struct rvu_pfvf *pfvf;
@@ -913,7 +1015,8 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
/* If PF's promiscuous entry is enabled,
* Set RSS action for that entry as well
*/
- if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
+ if ((!hw->cap.nix_rx_multicast || !pfvf->use_mce_list) &&
+ is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) {
bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
@@ -923,12 +1026,47 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf,
}
}
+void npc_enadis_default_mce_entry(struct rvu *rvu, u16 pcifunc,
+ int nixlf, int type, bool enable)
+{
+ struct npc_mcam *mcam = &rvu->hw->mcam;
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct nix_mce_list *mce_list;
+ int index, blkaddr, mce_idx;
+ struct rvu_pfvf *pfvf;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
+ if (blkaddr < 0)
+ return;
+
+ index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
+ nixlf, type);
+
+ /* disable MCAM entry when packet replication is not supported by hw */
+ if (!hw->cap.nix_rx_multicast && !is_vf(pcifunc)) {
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+ return;
+ }
+
+ /* return incase mce list is not enabled */
+ pfvf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
+ if (hw->cap.nix_rx_multicast && is_vf(pcifunc) &&
+ type != NIXLF_BCAST_ENTRY && !pfvf->use_mce_list)
+ return;
+
+ nix_get_mce_list(rvu, pcifunc, type, &mce_list, &mce_idx);
+
+ nix_update_mce_list(rvu, pcifunc, mce_list,
+ mce_idx, index, enable);
+ if (enable)
+ npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
+}
+
static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
int nixlf, bool enable)
{
struct npc_mcam *mcam = &rvu->hw->mcam;
- struct nix_rx_action action;
- int index, bank, blkaddr;
+ int index, blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0)
@@ -939,48 +1077,33 @@ static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc,
nixlf, NIXLF_UCAST_ENTRY);
npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable);
- /* For PF, ena/dis promisc and bcast MCAM match entries.
- * For VFs add/delete from bcast list when RX multicast
- * feature is present.
+ /* Nothing to do for VFs, on platforms where pkt replication
+ * is not supported
*/
- if (pcifunc & RVU_PFVF_FUNC_MASK && !rvu->hw->cap.nix_rx_multicast)
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) && !rvu->hw->cap.nix_rx_multicast)
return;
- /* For bcast, enable/disable only if it's action is not
- * packet replication, incase if action is replication
- * then this PF/VF's nixlf is removed from bcast replication
- * list.
- */
- index = npc_get_nixlf_mcam_index(mcam, pcifunc & ~RVU_PFVF_FUNC_MASK,
- nixlf, NIXLF_BCAST_ENTRY);
- bank = npc_get_bank(mcam, index);
- *(u64 *)&action = rvu_read64(rvu, blkaddr,
- NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank));
-
- /* VFs will not have BCAST entry */
- if (action.op != NIX_RX_ACTIONOP_MCAST &&
- !(pcifunc & RVU_PFVF_FUNC_MASK)) {
- npc_enable_mcam_entry(rvu, mcam,
- blkaddr, index, enable);
- } else {
- nix_update_bcast_mce_list(rvu, pcifunc, enable);
- /* Enable PF's BCAST entry for packet replication */
- rvu_npc_enable_bcast_entry(rvu, pcifunc, enable);
- }
-
- if (enable)
- rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf);
- else
- rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf);
+ /* add/delete pf_func to broadcast MCE list */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_BCAST_ENTRY, enable);
}
void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
npc_enadis_default_entries(rvu, pcifunc, nixlf, false);
+
+ /* Delete multicast and promisc MCAM entries */
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_ALLMULTI_ENTRY, false);
+ npc_enadis_default_mce_entry(rvu, pcifunc, nixlf,
+ NIXLF_PROMISC_ENTRY, false);
}
void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
{
+ /* Enables only broadcast match entry. Promisc/Allmulti are enabled
+ * in set_rx_mode mbox handler.
+ */
npc_enadis_default_entries(rvu, pcifunc, nixlf, true);
}
@@ -1000,7 +1123,8 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
/* Disable MCAM entries directing traffic to this 'pcifunc' */
list_for_each_entry_safe(rule, tmp, &mcam->mcam_rules, list) {
if (is_npc_intf_rx(rule->intf) &&
- rule->rx_action.pf_func == pcifunc) {
+ rule->rx_action.pf_func == pcifunc &&
+ rule->rx_action.op != NIX_RX_ACTIONOP_MCAST) {
npc_enable_mcam_entry(rvu, mcam, blkaddr,
rule->entry, false);
rule->enable = false;
@@ -1134,6 +1258,30 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
}
}
+static int npc_fwdb_prfl_img_map(struct rvu *rvu, void __iomem **prfl_img_addr,
+ u64 *size)
+{
+ u64 prfl_addr, prfl_sz;
+
+ if (!rvu->fwdata)
+ return -EINVAL;
+
+ prfl_addr = rvu->fwdata->mcam_addr;
+ prfl_sz = rvu->fwdata->mcam_sz;
+
+ if (!prfl_addr || !prfl_sz)
+ return -EINVAL;
+
+ *prfl_img_addr = ioremap_wc(prfl_addr, prfl_sz);
+ if (!(*prfl_img_addr))
+ return -ENOMEM;
+
+ *size = prfl_sz;
+
+ return 0;
+}
+
+/* strtoull of "mkexprof" with base:36 */
#define MKEX_END_SIGN 0xdeadbeef
static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
@@ -1141,26 +1289,21 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
{
struct device *dev = &rvu->pdev->dev;
struct npc_mcam_kex *mcam_kex;
- void *mkex_prfl_addr = NULL;
- u64 prfl_addr, prfl_sz;
+ void __iomem *mkex_prfl_addr = NULL;
+ u64 prfl_sz;
+ int ret;
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
- goto program_mkex;
-
- if (!rvu->fwdata)
- goto program_mkex;
- prfl_addr = rvu->fwdata->mcam_addr;
- prfl_sz = rvu->fwdata->mcam_sz;
-
- if (!prfl_addr || !prfl_sz)
+ if (rvu->kpu_fwdata_sz ||
+ !strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
goto program_mkex;
- mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
- if (!mkex_prfl_addr)
+ /* Setting up the mapping for mkex profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &mkex_prfl_addr, &prfl_sz);
+ if (ret < 0)
goto program_mkex;
- mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
+ mcam_kex = (struct npc_mcam_kex __force *)mkex_prfl_addr;
while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) {
/* Compare with mkex mod_param name string */
@@ -1186,7 +1329,7 @@ program_mkex:
/* Program selected mkex profile */
npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
if (mkex_prfl_addr)
- memunmap(mkex_prfl_addr);
+ iounmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
@@ -1263,6 +1406,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
+ u64 entry_mask;
if (profile->cam_entries != profile->action_entries) {
dev_err(rvu->dev,
@@ -1286,8 +1430,12 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
/* Enable all programmed entries */
num_entries = min_t(int, profile->action_entries, profile->cam_entries);
+ entry_mask = enable_mask(num_entries);
+ /* Disable first KPU_MAX_CST_ENT entries for built-in profile */
+ if (!rvu->kpu.custom)
+ entry_mask |= GENMASK_ULL(KPU_MAX_CST_ENT - 1, 0);
rvu_write64(rvu, blkaddr,
- NPC_AF_KPUX_ENTRY_DISX(kpu, 0), enable_mask(num_entries));
+ NPC_AF_KPUX_ENTRY_DISX(kpu, 0), entry_mask);
if (num_entries > 64) {
rvu_write64(rvu, blkaddr,
NPC_AF_KPUX_ENTRY_DISX(kpu, 1),
@@ -1300,6 +1448,7 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
{
+ profile->custom = 0;
profile->name = def_pfl_name;
profile->version = NPC_KPU_PROFILE_VER;
profile->ikpu = ikpu_action_entries;
@@ -1312,10 +1461,245 @@ static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
return 0;
}
+static int npc_apply_custom_kpu(struct rvu *rvu,
+ struct npc_kpu_profile_adapter *profile)
+{
+ size_t hdr_sz = sizeof(struct npc_kpu_profile_fwdata), offset = 0;
+ struct npc_kpu_profile_fwdata *fw = rvu->kpu_fwdata;
+ struct npc_kpu_profile_action *action;
+ struct npc_kpu_profile_cam *cam;
+ struct npc_kpu_fwdata *fw_kpu;
+ int entries;
+ u16 kpu, entry;
+
+ if (rvu->kpu_fwdata_sz < hdr_sz) {
+ dev_warn(rvu->dev, "Invalid KPU profile size\n");
+ return -EINVAL;
+ }
+ if (le64_to_cpu(fw->signature) != KPU_SIGN) {
+ dev_warn(rvu->dev, "Invalid KPU profile signature %llx\n",
+ fw->signature);
+ return -EINVAL;
+ }
+ /* Verify if the using known profile structure */
+ if (NPC_KPU_VER_MAJ(profile->version) >
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev, "Not supported Major version: %d > %d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile is aligned with the required kernel changes */
+ if (NPC_KPU_VER_MIN(profile->version) <
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER)) {
+ dev_warn(rvu->dev,
+ "Invalid KPU profile version: %d.%d.%d expected version <= %d.%d.%d\n",
+ NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version),
+ NPC_KPU_VER_MAJ(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_MIN(NPC_KPU_PROFILE_VER),
+ NPC_KPU_VER_PATCH(NPC_KPU_PROFILE_VER));
+ return -EINVAL;
+ }
+ /* Verify if profile fits the HW */
+ if (fw->kpus > profile->kpus) {
+ dev_warn(rvu->dev, "Not enough KPUs: %d > %ld\n", fw->kpus,
+ profile->kpus);
+ return -EINVAL;
+ }
+
+ profile->custom = 1;
+ profile->name = fw->name;
+ profile->version = le64_to_cpu(fw->version);
+ profile->mkex = &fw->mkex;
+ profile->lt_def = &fw->lt_def;
+
+ for (kpu = 0; kpu < fw->kpus; kpu++) {
+ fw_kpu = (struct npc_kpu_fwdata *)(fw->data + offset);
+ if (fw_kpu->entries > KPU_MAX_CST_ENT)
+ dev_warn(rvu->dev,
+ "Too many custom entries on KPU%d: %d > %d\n",
+ kpu, fw_kpu->entries, KPU_MAX_CST_ENT);
+ entries = min(fw_kpu->entries, KPU_MAX_CST_ENT);
+ cam = (struct npc_kpu_profile_cam *)fw_kpu->data;
+ offset += sizeof(*fw_kpu) + fw_kpu->entries * sizeof(*cam);
+ action = (struct npc_kpu_profile_action *)(fw->data + offset);
+ offset += fw_kpu->entries * sizeof(*action);
+ if (rvu->kpu_fwdata_sz < hdr_sz + offset) {
+ dev_warn(rvu->dev,
+ "Profile size mismatch on KPU%i parsing.\n",
+ kpu + 1);
+ return -EINVAL;
+ }
+ for (entry = 0; entry < entries; entry++) {
+ profile->kpu[kpu].cam[entry] = cam[entry];
+ profile->kpu[kpu].action[entry] = action[entry];
+ }
+ }
+
+ return 0;
+}
+
+static int npc_load_kpu_prfl_img(struct rvu *rvu, void __iomem *prfl_addr,
+ u64 prfl_sz, const char *kpu_profile)
+{
+ struct npc_kpu_profile_fwdata *kpu_data = NULL;
+ int rc = -EINVAL;
+
+ kpu_data = (struct npc_kpu_profile_fwdata __force *)prfl_addr;
+ if (le64_to_cpu(kpu_data->signature) == KPU_SIGN &&
+ !strncmp(kpu_data->name, kpu_profile, KPU_NAME_LEN)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware db: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kpu_data;
+ rvu->kpu_fwdata_sz = prfl_sz;
+ rvu->kpu_prfl_addr = prfl_addr;
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int npc_fwdb_detect_load_prfl_img(struct rvu *rvu, uint64_t prfl_sz,
+ const char *kpu_profile)
+{
+ struct npc_coalesced_kpu_prfl *img_data = NULL;
+ int i = 0, rc = -EINVAL;
+ void __iomem *kpu_prfl_addr;
+ u16 offset;
+
+ img_data = (struct npc_coalesced_kpu_prfl __force *)rvu->kpu_prfl_addr;
+ if (le64_to_cpu(img_data->signature) == KPU_SIGN &&
+ !strncmp(img_data->name, kpu_profile, KPU_NAME_LEN)) {
+ /* Loaded profile is a single KPU profile. */
+ rc = npc_load_kpu_prfl_img(rvu, rvu->kpu_prfl_addr,
+ prfl_sz, kpu_profile);
+ goto done;
+ }
+
+ /* Loaded profile is coalesced image, offset of first KPU profile.*/
+ offset = offsetof(struct npc_coalesced_kpu_prfl, prfl_sz) +
+ (img_data->num_prfl * sizeof(uint16_t));
+ /* Check if mapped image is coalesced image. */
+ while (i < img_data->num_prfl) {
+ /* Profile image offsets are rounded up to next 8 multiple.*/
+ offset = ALIGN_8B_CEIL(offset);
+ kpu_prfl_addr = (void __iomem *)((uintptr_t)rvu->kpu_prfl_addr +
+ offset);
+ rc = npc_load_kpu_prfl_img(rvu, kpu_prfl_addr,
+ img_data->prfl_sz[i], kpu_profile);
+ if (!rc)
+ break;
+ /* Calculating offset of profile image based on profile size.*/
+ offset += img_data->prfl_sz[i];
+ i++;
+ }
+done:
+ return rc;
+}
+
+static int npc_load_kpu_profile_fwdb(struct rvu *rvu, const char *kpu_profile)
+{
+ int ret = -EINVAL;
+ u64 prfl_sz;
+
+ /* Setting up the mapping for NPC profile image */
+ ret = npc_fwdb_prfl_img_map(rvu, &rvu->kpu_prfl_addr, &prfl_sz);
+ if (ret < 0)
+ goto done;
+
+ /* Detect if profile is coalesced or single KPU profile and load */
+ ret = npc_fwdb_detect_load_prfl_img(rvu, prfl_sz, kpu_profile);
+ if (ret == 0)
+ goto done;
+
+ /* Cleaning up if KPU profile image from fwdata is not valid. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ rvu->kpu_fwdata = NULL;
+ }
+
+done:
+ return ret;
+}
+
static void npc_load_kpu_profile(struct rvu *rvu)
{
struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+ const char *kpu_profile = rvu->kpu_pfl_name;
+ const struct firmware *fw = NULL;
+ bool retry_fwdb = false;
+
+ /* If user not specified profile customization */
+ if (!strncmp(kpu_profile, def_pfl_name, KPU_NAME_LEN))
+ goto revert_to_default;
+ /* First prepare default KPU, then we'll customize top entries. */
+ npc_prepare_default_kpu(profile);
+ /* Order of preceedence for load loading NPC profile (high to low)
+ * Firmware binary in filesystem.
+ * Firmware database method.
+ * Default KPU profile.
+ */
+ if (!request_firmware(&fw, kpu_profile, rvu->dev)) {
+ dev_info(rvu->dev, "Loading KPU profile from firmware: %s\n",
+ kpu_profile);
+ rvu->kpu_fwdata = kzalloc(fw->size, GFP_KERNEL);
+ if (rvu->kpu_fwdata) {
+ memcpy(rvu->kpu_fwdata, fw->data, fw->size);
+ rvu->kpu_fwdata_sz = fw->size;
+ }
+ release_firmware(fw);
+ retry_fwdb = true;
+ goto program_kpu;
+ }
+
+load_image_fwdb:
+ /* Loading the KPU profile using firmware database */
+ if (npc_load_kpu_profile_fwdb(rvu, kpu_profile))
+ goto revert_to_default;
+
+program_kpu:
+ /* Apply profile customization if firmware was loaded. */
+ if (!rvu->kpu_fwdata_sz || npc_apply_custom_kpu(rvu, profile)) {
+ /* If image from firmware filesystem fails to load or invalid
+ * retry with firmware database method.
+ */
+ if (rvu->kpu_fwdata || rvu->kpu_fwdata_sz) {
+ /* Loading image from firmware database failed. */
+ if (rvu->kpu_prfl_addr) {
+ iounmap(rvu->kpu_prfl_addr);
+ rvu->kpu_prfl_addr = NULL;
+ } else {
+ kfree(rvu->kpu_fwdata);
+ }
+ rvu->kpu_fwdata = NULL;
+ rvu->kpu_fwdata_sz = 0;
+ if (retry_fwdb) {
+ retry_fwdb = false;
+ goto load_image_fwdb;
+ }
+ }
+
+ dev_warn(rvu->dev,
+ "Can't load KPU profile %s. Using default.\n",
+ kpu_profile);
+ kfree(rvu->kpu_fwdata);
+ rvu->kpu_fwdata = NULL;
+ goto revert_to_default;
+ }
+
+ dev_info(rvu->dev, "Using custom profile '%s', version %d.%d.%d\n",
+ profile->name, NPC_KPU_VER_MAJ(profile->version),
+ NPC_KPU_VER_MIN(profile->version),
+ NPC_KPU_VER_PATCH(profile->version));
+
+ return;
+
+revert_to_default:
npc_prepare_default_kpu(profile);
}
@@ -1654,6 +2038,10 @@ void rvu_npc_freemem(struct rvu *rvu)
kfree(pkind->rsrc.bmap);
kfree(mcam->counters.bmap);
+ if (rvu->kpu_prfl_addr)
+ iounmap(rvu->kpu_prfl_addr);
+ else
+ kfree(rvu->kpu_fwdata);
mutex_destroy(&mcam->lock);
}
@@ -2149,8 +2537,11 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
rsp->free_count = 0;
/* Check if ref_entry is within range */
- if (req->priority && req->ref_entry >= mcam->bmap_entries)
+ if (req->priority && req->ref_entry >= mcam->bmap_entries) {
+ dev_err(rvu->dev, "%s: reference entry %d is out of range\n",
+ __func__, req->ref_entry);
return NPC_MCAM_INVALID_REQ;
+ }
/* ref_entry can't be '0' if requested priority is high.
* Can't be last entry if requested priority is low.
@@ -2163,8 +2554,12 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu,
/* Since list of allocated indices needs to be sent to requester,
* max number of non-contiguous entries per mbox msg is limited.
*/
- if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES)
+ if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) {
+ dev_err(rvu->dev,
+ "%s: %d Non-contiguous MCAM entries requested is more than max (%d) allowed\n",
+ __func__, req->count, NPC_MAX_NONCONTIG_ENTRIES);
return NPC_MCAM_INVALID_REQ;
+ }
/* Alloc request from PFFUNC with no NIXLF attached should be denied */
if (!is_nixlf_attached(rvu, pcifunc))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 7f35b62eea13..87d7c6ab047f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1103,11 +1103,18 @@ find_rule:
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
}
- if (pfvf->pf_set_vf_cfg && req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
+ if (test_bit(PF_SET_VF_CFG, &pfvf->flags) &&
+ req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
rule->vfvlan_cfg = true;
+ if (is_npc_intf_rx(req->intf) && req->match_id &&
+ (req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
+ return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
+ req->index, req->match_id);
+
return 0;
}
@@ -1167,7 +1174,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
/* PF installing for its VF */
if (req->hdr.pcifunc && !from_vf && req->vf)
- pfvf->pf_set_vf_cfg = 1;
+ set_bit(PF_SET_VF_CFG, &pfvf->flags);
/* update req destination mac addr */
if ((req->features & BIT_ULL(NPC_DMAC)) && is_npc_intf_rx(req->intf) &&
@@ -1177,9 +1184,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
}
err = nix_get_nixlf(rvu, target, &nixlf, NULL);
+ if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac)
+ return -EINVAL;
- /* If interface is uninitialized then do not enable entry */
- if (err || (!req->default_rule && !pfvf->def_ucast_rule))
+ /* don't enable rule when nixlf not attached or initialized */
+ if (!(is_nixlf_attached(rvu, target) &&
+ test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
enable = false;
/* Packets reaching NPC in Tx path implies that a
@@ -1193,6 +1203,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu,
if (from_vf && !enable)
return -EINVAL;
+ /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */
+ if (pf_set_vfs_mac && !enable) {
+ ether_addr_copy(pfvf->default_mac, req->packet.dmac);
+ ether_addr_copy(pfvf->mac_addr, req->packet.dmac);
+ set_bit(PF_SET_VF_MAC, &pfvf->flags);
+ return 0;
+ }
+
/* If message is from VF then its flow should not overlap with
* reserved unicast flow.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index ac71c0f2f960..76837d5e19c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -171,6 +171,7 @@
#define NIX_AF_SQ_CONST (0x0040)
#define NIX_AF_CQ_CONST (0x0048)
#define NIX_AF_RQ_CONST (0x0050)
+#define NIX_AF_PL_CONST (0x0058)
#define NIX_AF_PSE_CONST (0x0060)
#define NIX_AF_TL1_CONST (0x0070)
#define NIX_AF_TL2_CONST (0x0078)
@@ -181,6 +182,7 @@
#define NIX_AF_LSO_CFG (0x00A8)
#define NIX_AF_BLK_RST (0x00B0)
#define NIX_AF_TX_TSTMP_CFG (0x00C0)
+#define NIX_AF_PL_TS (0x00C8)
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
@@ -208,19 +210,27 @@
#define NIX_AF_RVU_INT_ENA_W1S (0x01D0)
#define NIX_AF_RVU_INT_ENA_W1C (0x01D8)
#define NIX_AF_TCP_TIMER (0x01E0)
-#define NIX_AF_RX_WQE_TAG_CTL (0x01F0)
+#define NIX_AF_RX_DEF_ET(a) (0x01F0ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_DEF_OL2 (0x0200)
#define NIX_AF_RX_DEF_OIP4 (0x0210)
#define NIX_AF_RX_DEF_IIP4 (0x0220)
+#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
#define NIX_AF_RX_DEF_OIP6 (0x0230)
+#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
#define NIX_AF_RX_DEF_IIP6 (0x0240)
#define NIX_AF_RX_DEF_OTCP (0x0250)
#define NIX_AF_RX_DEF_ITCP (0x0260)
#define NIX_AF_RX_DEF_OUDP (0x0270)
#define NIX_AF_RX_DEF_IUDP (0x0280)
#define NIX_AF_RX_DEF_OSCTP (0x0290)
+#define NIX_AF_RX_DEF_CST_APAD0 (0x0298)
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
#define NIX_AF_RX_DEF_IPSECX (0x02B0)
+#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
+#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
+#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
+#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
+#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 5e5f45c7eab0..14aa8e37ea41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -286,7 +286,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
- NIX_AQ_CTYPE_BAND_PROF = 0x6,
+ NIX_AQ_CTYPE_BANDPROF = 0x6,
};
/* NIX admin queue instruction opcodes */
@@ -665,6 +665,89 @@ struct nix_rx_mce_s {
uint64_t next : 16;
};
+enum nix_band_prof_layers {
+ BAND_PROF_LEAF_LAYER = 0,
+ BAND_PROF_INVAL_LAYER = 1,
+ BAND_PROF_MID_LAYER = 2,
+ BAND_PROF_TOP_LAYER = 3,
+ BAND_PROF_NUM_LAYERS = 4,
+};
+
+enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
+ NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
+ NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
+ NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
+};
+
+enum nix_band_prof_pc_mode {
+ NIX_RX_PC_MODE_VLAN = 0,
+ NIX_RX_PC_MODE_DSCP = 1,
+ NIX_RX_PC_MODE_GEN = 2,
+ NIX_RX_PC_MODE_RSVD = 3,
+};
+
+/* NIX ingress policer bandwidth profile structure */
+struct nix_bandprof_s {
+ uint64_t pc_mode : 2; /* W0 */
+ uint64_t icolor : 2;
+ uint64_t tnl_ena : 1;
+ uint64_t reserved_5_7 : 3;
+ uint64_t peir_exponent : 5;
+ uint64_t reserved_13_15 : 3;
+ uint64_t pebs_exponent : 5;
+ uint64_t reserved_21_23 : 3;
+ uint64_t cir_exponent : 5;
+ uint64_t reserved_29_31 : 3;
+ uint64_t cbs_exponent : 5;
+ uint64_t reserved_37_39 : 3;
+ uint64_t peir_mantissa : 8;
+ uint64_t pebs_mantissa : 8;
+ uint64_t cir_mantissa : 8;
+ uint64_t cbs_mantissa : 8; /* W1 */
+ uint64_t lmode : 1;
+ uint64_t l_sellect : 3;
+ uint64_t rdiv : 4;
+ uint64_t adjust_exponent : 5;
+ uint64_t reserved_85_86 : 2;
+ uint64_t adjust_mantissa : 9;
+ uint64_t gc_action : 2;
+ uint64_t yc_action : 2;
+ uint64_t rc_action : 2;
+ uint64_t meter_algo : 2;
+ uint64_t band_prof_id : 7;
+ uint64_t reserved_111_118 : 8;
+ uint64_t hl_en : 1;
+ uint64_t reserved_120_127 : 8;
+ uint64_t ts : 48; /* W2 */
+ uint64_t reserved_176_191 : 16;
+ uint64_t pe_accum : 32; /* W3 */
+ uint64_t c_accum : 32;
+ uint64_t green_pkt_pass : 48; /* W4 */
+ uint64_t reserved_304_319 : 16;
+ uint64_t yellow_pkt_pass : 48; /* W5 */
+ uint64_t reserved_368_383 : 16;
+ uint64_t red_pkt_pass : 48; /* W6 */
+ uint64_t reserved_432_447 : 16;
+ uint64_t green_octs_pass : 48; /* W7 */
+ uint64_t reserved_496_511 : 16;
+ uint64_t yellow_octs_pass : 48; /* W8 */
+ uint64_t reserved_560_575 : 16;
+ uint64_t red_octs_pass : 48; /* W9 */
+ uint64_t reserved_624_639 : 16;
+ uint64_t green_pkt_drop : 48; /* W10 */
+ uint64_t reserved_688_703 : 16;
+ uint64_t yellow_pkt_drop : 48; /* W11 */
+ uint64_t reserved_752_767 : 16;
+ uint64_t red_pkt_drop : 48; /* W12 */
+ uint64_t reserved_816_831 : 16;
+ uint64_t green_octs_drop : 48; /* W13 */
+ uint64_t reserved_880_895 : 16;
+ uint64_t yellow_octs_drop : 48; /* W14 */
+ uint64_t reserved_944_959 : 16;
+ uint64_t red_octs_drop : 48; /* W15 */
+ uint64_t reserved_1008_1023 : 16;
+};
+
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 9ec0313f13fc..1b08896b46d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -179,3 +179,326 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
sq->head++;
sq->head &= (sq->sqe_cnt - 1);
}
+
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
+{
+ struct nix_bandprof_free_req *req;
+ int rc;
+
+ if (is_dev_otx2(pfvf->pdev))
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Free all bandwidth profiles allocated */
+ req->free_all = true;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
+{
+ struct nix_bandprof_alloc_req *req;
+ struct nix_bandprof_alloc_rsp *rsp;
+ int rc;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (rc)
+ goto out;
+
+ rsp = (struct nix_bandprof_alloc_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
+ if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
+ rc = -EIO;
+ goto out;
+ }
+
+ *leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
+out:
+ if (rc) {
+ dev_warn(pfvf->dev,
+ "Failed to allocate ingress bandwidth policer\n");
+ }
+
+ return rc;
+}
+
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int ret;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return ret;
+}
+
+#define POLICER_TIMESTAMP 1 /* 1 second */
+#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
+
+static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
+ u32 *burst_mantissa)
+{
+ int tmp;
+
+ /* Burst is calculated as
+ * (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
+ * This is the upper limit on number tokens (bytes) that
+ * can be accumulated in the bucket.
+ */
+ *burst_exp = ilog2(burst);
+ if (burst < 256) {
+ /* No float: can't express mantissa in this case */
+ *burst_mantissa = 0;
+ return;
+ }
+
+ if (*burst_exp > MAX_RATE_EXP)
+ *burst_exp = MAX_RATE_EXP;
+
+ /* Calculate mantissa
+ * Find remaining bytes 'burst - 2^burst_exp'
+ * mantissa = (remaining bytes) / 2^ (burst_exp - 8)
+ */
+ tmp = burst - rounddown_pow_of_two(burst);
+ *burst_mantissa = tmp / (1UL << (*burst_exp - 8));
+}
+
+static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
+ u32 *rate_mantissa, u32 *rdiv)
+{
+ u32 div = 0;
+ u32 exp = 0;
+ u64 tmp;
+
+ /* Figure out mantissa, exponent and divider from given max pkt rate
+ *
+ * To achieve desired rate HW adds
+ * (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
+ * policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
+ * Here policer timeunit is 2 usecs and rate is in bits per sec.
+ * Since floating point cannot be used below algorithm uses 1000000
+ * scale factor to support rates upto 100Gbps.
+ */
+ tmp = rate * 32 * 2;
+ if (tmp < 256000000) {
+ while (tmp < 256000000) {
+ tmp = tmp * 2;
+ div++;
+ }
+ } else {
+ for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
+ tmp = tmp / 2;
+
+ if (exp > MAX_RATE_EXP)
+ exp = MAX_RATE_EXP;
+ }
+
+ *rate_mantissa = (tmp - 256000000) / 1000000;
+ *rate_exp = exp;
+ *rdiv = div;
+}
+
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Enable policing and set the bandwidth profile (policer) index */
+ if (map)
+ aq->rq.policer_ena = 1;
+ else
+ aq->rq.policer_ena = 0;
+ aq->rq_mask.policer_ena = 1;
+
+ aq->rq.band_prof_id = policer;
+ aq->rq_mask.band_prof_id = GENMASK(9, 0);
+
+ /* Fill AQ info */
+ aq->qidx = rq_idx;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
+{
+ struct nix_bandprof_free_req *req;
+
+ req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
+ req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* Remove RQ's policer mapping */
+ for (qidx = 0; qidx < hw->rx_queues; qidx++)
+ cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, false);
+
+ rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
+
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps)
+{
+ struct nix_cn10k_aq_enq_req *aq;
+ u32 burst_exp, burst_mantissa;
+ u32 rate_exp, rate_mantissa;
+ u32 rdiv;
+
+ /* Get exponent and mantissa values for the desired rate */
+ cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
+ cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
+
+ /* Init bandwidth profile */
+ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
+ if (!aq)
+ return -ENOMEM;
+
+ /* Set initial color mode to blind */
+ aq->prof.icolor = 0x03;
+ aq->prof_mask.icolor = 0x03;
+
+ /* Set rate and burst values */
+ aq->prof.cir_exponent = rate_exp;
+ aq->prof_mask.cir_exponent = 0x1F;
+
+ aq->prof.cir_mantissa = rate_mantissa;
+ aq->prof_mask.cir_mantissa = 0xFF;
+
+ aq->prof.cbs_exponent = burst_exp;
+ aq->prof_mask.cbs_exponent = 0x1F;
+
+ aq->prof.cbs_mantissa = burst_mantissa;
+ aq->prof_mask.cbs_mantissa = 0xFF;
+
+ aq->prof.rdiv = rdiv;
+ aq->prof_mask.rdiv = 0xF;
+
+ if (pps) {
+ /* The amount of decremented tokens is calculated according to
+ * the following equation:
+ * max([ LMODE ? 0 : (packet_length - LXPTR)] +
+ * ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
+ * 1/256)
+ * if LMODE is 1 then rate limiting will be based on
+ * PPS otherwise bps.
+ * The aim of the ADJUST value is to specify a token cost per
+ * packet in contrary to the packet length that specifies a
+ * cost per byte. To rate limit based on PPS adjust mantissa
+ * is set as 384 and exponent as 1 so that number of tokens
+ * decremented becomes 1 i.e, 1 token per packeet.
+ */
+ aq->prof.adjust_exponent = 1;
+ aq->prof_mask.adjust_exponent = 0x1F;
+
+ aq->prof.adjust_mantissa = 384;
+ aq->prof_mask.adjust_mantissa = 0x1FF;
+
+ aq->prof.lmode = 0x1;
+ aq->prof_mask.lmode = 0x1;
+ }
+
+ /* Two rate three color marker
+ * With PEIR/EIR set to zero, color will be either green or red
+ */
+ aq->prof.meter_algo = 2;
+ aq->prof_mask.meter_algo = 0x3;
+
+ aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
+ aq->prof_mask.rc_action = 0x3;
+
+ aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.yc_action = 0x3;
+
+ aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
+ aq->prof_mask.gc_action = 0x3;
+
+ /* Setting exponent value as 24 and mantissa as 0 configures
+ * the bucket with zero values making bucket unused. Peak
+ * information rate and Excess information rate buckets are
+ * unused here.
+ */
+ aq->prof.peir_exponent = 24;
+ aq->prof_mask.peir_exponent = 0x1F;
+
+ aq->prof.peir_mantissa = 0;
+ aq->prof_mask.peir_mantissa = 0xFF;
+
+ aq->prof.pebs_exponent = 24;
+ aq->prof_mask.pebs_exponent = 0x1F;
+
+ aq->prof.pebs_mantissa = 0;
+ aq->prof_mask.pebs_mantissa = 0xFF;
+
+ /* Fill AQ info */
+ aq->qidx = profile;
+ aq->ctype = NIX_AQ_CTYPE_BANDPROF;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ return otx2_sync_mbox_msg(&pfvf->mbox);
+}
+
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate)
+{
+ struct otx2_hw *hw = &pfvf->hw;
+ int qidx, rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
+ rate, false);
+ if (rc)
+ goto out;
+
+ for (qidx = 0; qidx < hw->rx_queues; qidx++) {
+ rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
+ hw->matchall_ipolicer, true);
+ if (rc)
+ break;
+ }
+
+out:
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index e0bc595cbb78..71292a4cf1f3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -14,4 +14,15 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_pf_lmtst_init(struct otx2_nic *pf);
int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
+int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
+int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
+ u32 burst, u64 rate);
+int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
+ u16 policer, bool map);
+int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
+int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
+ u32 burst, u64 rate, bool pps);
+int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
#endif /* CN10K_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 45730d0d92f2..234b330f3183 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -180,6 +180,7 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
+ u16 matchall_ipolicer;
/* HW settings, coalescing etc */
u16 rx_chan_base;
@@ -223,6 +224,11 @@ struct otx2_hw {
u64 *nix_lmt_base;
};
+enum vfperm {
+ OTX2_RESET_VF_PERM,
+ OTX2_TRUSTED_VF,
+};
+
struct otx2_vf_config {
struct otx2_nic *pf;
struct delayed_work link_event_work;
@@ -230,6 +236,7 @@ struct otx2_vf_config {
u8 mac[ETH_ALEN];
u16 vlan;
int tx_vtag_idx;
+ bool trusted;
};
struct flr_work {
@@ -261,24 +268,26 @@ struct otx2_mac_table {
struct otx2_flow_config {
u16 entry[NPC_MAX_NONCONTIG_ENTRIES];
- u32 nr_flows;
-#define OTX2_MAX_NTUPLE_FLOWS 32
-#define OTX2_MAX_UNICAST_FLOWS 8
-#define OTX2_MAX_VLAN_FLOWS 1
-#define OTX2_MAX_TC_FLOWS OTX2_MAX_NTUPLE_FLOWS
-#define OTX2_MCAM_COUNT (OTX2_MAX_NTUPLE_FLOWS + \
+ u16 *flow_ent;
+ u16 *def_ent;
+ u16 nr_flows;
+#define OTX2_DEFAULT_FLOWCOUNT 16
+#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_MAX_VLAN_FLOWS 1
+#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
+#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
OTX2_MAX_UNICAST_FLOWS + \
OTX2_MAX_VLAN_FLOWS)
- u32 ntuple_offset;
- u32 unicast_offset;
- u32 rx_vlan_offset;
- u32 vf_vlan_offset;
-#define OTX2_PER_VF_VLAN_FLOWS 2 /* rx+tx per VF */
+ u16 ntuple_offset;
+ u16 unicast_offset;
+ u16 rx_vlan_offset;
+ u16 vf_vlan_offset;
+#define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */
#define OTX2_VF_VLAN_RX_INDEX 0
#define OTX2_VF_VLAN_TX_INDEX 1
- u32 tc_flower_offset;
- u32 ntuple_max_flows;
- u32 tc_max_flows;
+ u16 tc_flower_offset;
+ u16 ntuple_max_flows;
+ u16 tc_max_flows;
struct list_head flow_list;
};
@@ -319,6 +328,7 @@ struct otx2_nic {
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
+#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
u64 flags;
struct otx2_qset qset;
@@ -362,6 +372,7 @@ struct otx2_nic {
struct otx2_flow_config *flow_cfg;
struct otx2_tc_info tc_info;
+ unsigned long rq_bmap;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index f4962a97a075..8df748e0677b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -286,6 +286,12 @@ static int otx2_set_channels(struct net_device *dev,
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
+ if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
+ netdev_err(dev,
+ "Receive queues are in use by TC police action\n");
+ return -EINVAL;
+ }
+
if (if_up)
dev->netdev_ops->ndo_stop(dev);
@@ -786,6 +792,10 @@ static int otx2_set_rxfh_context(struct net_device *dev, const u32 *indir,
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ if (*rss_context != ETH_RXFH_CONTEXT_ALLOC &&
+ *rss_context >= MAX_RSS_GROUPS)
+ return -EINVAL;
+
rss = &pfvf->hw.rss_info;
if (!rss->enable) {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 0b4fa92ba821..8c97106bdd1c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -20,13 +20,125 @@ struct otx2_flow {
int vf;
};
+static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
+{
+ devm_kfree(pfvf->dev, flow_cfg->flow_ent);
+ flow_cfg->flow_ent = NULL;
+ flow_cfg->ntuple_max_flows = 0;
+ flow_cfg->tc_max_flows = 0;
+}
+
+static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_free_entry_req *req;
+ int ent, err;
+
+ if (!flow_cfg->ntuple_max_flows)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
+ req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
+ if (!req)
+ break;
+
+ req->entry = flow_cfg->flow_ent[ent];
+
+ /* Send message to AF to free MCAM entries */
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ break;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+}
+
+static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
+{
+ struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
+ struct npc_mcam_alloc_entry_req *req;
+ struct npc_mcam_alloc_entry_rsp *rsp;
+ int ent, allocated = 0;
+
+ /* Free current ones and allocate new ones with requested count */
+ otx2_free_ntuple_mcam_entries(pfvf);
+
+ if (!count)
+ return 0;
+
+ flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->flow_ent)
+ return -ENOMEM;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
+ * can only be allocated.
+ */
+ while (allocated < count) {
+ req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
+ if (!req)
+ goto exit;
+
+ req->contig = false;
+ req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
+ NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
+ req->priority = NPC_MCAM_HIGHER_PRIO;
+ req->ref_entry = flow_cfg->def_ent[0];
+
+ /* Send message to AF */
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto exit;
+
+ rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
+ (&pfvf->mbox.mbox, 0, &req->hdr);
+
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
+
+ allocated += rsp->count;
+
+ /* If this request is not fulfilled, no need to send
+ * further requests.
+ */
+ if (rsp->count != req->count)
+ break;
+ }
+
+exit:
+ mutex_unlock(&pfvf->mbox.lock);
+
+ flow_cfg->ntuple_offset = 0;
+ flow_cfg->ntuple_max_flows = allocated;
+ flow_cfg->tc_max_flows = allocated;
+
+ if (allocated != count)
+ netdev_info(pfvf->netdev,
+ "Unable to allocate %d MCAM entries for ntuple, got %d\n",
+ count, allocated);
+
+ return allocated;
+}
+
int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_mcam_alloc_entry_req *req;
struct npc_mcam_alloc_entry_rsp *rsp;
int vf_vlan_max_flows;
- int i;
+ int ent, count;
+
+ vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
+ count = OTX2_MAX_UNICAST_FLOWS +
+ OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
+
+ flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
+ sizeof(u16), GFP_KERNEL);
+ if (!flow_cfg->def_ent)
+ return -ENOMEM;
mutex_lock(&pfvf->mbox.lock);
@@ -36,9 +148,8 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
return -ENOMEM;
}
- vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
req->contig = false;
- req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
+ req->count = count;
/* Send message to AF */
if (otx2_sync_mbox_msg(&pfvf->mbox)) {
@@ -51,37 +162,36 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
if (rsp->count != req->count) {
netdev_info(pfvf->netdev,
- "Unable to allocate %d MCAM entries, got %d\n",
- req->count, rsp->count);
- /* support only ntuples here */
- flow_cfg->ntuple_max_flows = rsp->count;
- flow_cfg->ntuple_offset = 0;
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
- flow_cfg->tc_max_flows = flow_cfg->ntuple_max_flows;
- pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
- } else {
- flow_cfg->vf_vlan_offset = 0;
- flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
- vf_vlan_max_flows;
- flow_cfg->tc_flower_offset = flow_cfg->ntuple_offset;
- flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
- OTX2_MAX_NTUPLE_FLOWS;
- flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
- OTX2_MAX_UNICAST_FLOWS;
- pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
- pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
- pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
- pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
- }
-
- for (i = 0; i < rsp->count; i++)
- flow_cfg->entry[i] = rsp->entry_list[i];
+ "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
+ mutex_unlock(&pfvf->mbox.lock);
+ devm_kfree(pfvf->dev, flow_cfg->def_ent);
+ return 0;
+ }
- pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ for (ent = 0; ent < rsp->count; ent++)
+ flow_cfg->def_ent[ent] = rsp->entry_list[ent];
+
+ flow_cfg->vf_vlan_offset = 0;
+ flow_cfg->unicast_offset = vf_vlan_max_flows;
+ flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
+ OTX2_MAX_UNICAST_FLOWS;
+ pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
mutex_unlock(&pfvf->mbox.lock);
+ /* Allocate entries for Ntuple filters */
+ count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
+ if (count <= 0) {
+ otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
+ return 0;
+ }
+
+ pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
+ pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+
return 0;
}
@@ -96,13 +206,14 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
- pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
- pf->flow_cfg->tc_max_flows = pf->flow_cfg->ntuple_max_flows;
-
err = otx2_alloc_mcam_entries(pf);
if (err)
return err;
+ /* Check if MCAM entries are allocate or not */
+ if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
+ return 0;
+
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
* OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
if (!pf->mac_table)
@@ -146,7 +257,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
ether_addr_copy(pf->mac_table[i].addr, mac);
pf->mac_table[i].inuse = true;
pf->mac_table[i].mcam_entry =
- flow_cfg->entry[i + flow_cfg->unicast_offset];
+ flow_cfg->def_ent[i + flow_cfg->unicast_offset];
req->entry = pf->mac_table[i].mcam_entry;
break;
}
@@ -551,6 +662,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
req->features |= BIT_ULL(NPC_IPPROTO_AH);
else
req->features |= BIT_ULL(NPC_IPPROTO_ESP);
+ break;
default:
break;
}
@@ -731,8 +843,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
- flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
- flow->location];
+ flow->entry = flow_cfg->flow_ent[flow->location];
new = true;
}
/* struct copy */
@@ -836,9 +947,8 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
- req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
- flow_cfg->ntuple_max_flows - 1];
+ req->start = flow_cfg->flow_ent[0];
+ req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -905,7 +1015,7 @@ int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
req->intf = NIX_INTF_RX;
ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
eth_broadcast_addr((u8 *)&req->mask.dmac);
@@ -934,7 +1044,7 @@ static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
return -ENOMEM;
}
- req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
+ req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
/* Send message to AF */
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 03004fdac0c6..59912f73417b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -39,6 +39,8 @@ MODULE_DESCRIPTION(DRV_STRING);
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
+static void otx2_vf_link_event_task(struct work_struct *work);
+
enum {
TYPE_PFAF,
TYPE_PFVF,
@@ -1459,6 +1461,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
+ /* Free all ingress bandwidth profiles allocated */
+ cn10k_free_all_ipolicers(pf);
+
mutex_lock(&mbox->lock);
/* Reset NIX LF */
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
@@ -1820,9 +1825,11 @@ static void otx2_do_set_rx_mode(struct work_struct *work)
if (promisc)
req->mode |= NIX_RX_MODE_PROMISC;
- else if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
req->mode |= NIX_RX_MODE_ALLMULTI;
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
otx2_sync_mbox_msg(&pf->mbox);
mutex_unlock(&pf->mbox.lock);
}
@@ -2044,7 +2051,7 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!netif_running(netdev))
return -EAGAIN;
- if (vf >= pci_num_vf(pdev))
+ if (vf >= pf->total_vfs)
return -EINVAL;
if (!is_valid_ether_addr(mac))
@@ -2055,7 +2062,8 @@ static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
ret = otx2_do_set_vf_mac(pf, vf, mac);
if (ret == 0)
- dev_info(&pdev->dev, "Reload VF driver to apply the changes\n");
+ dev_info(&pdev->dev,
+ "Load/Reload VF driver\n");
return ret;
}
@@ -2104,7 +2112,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
del_req->entry =
- flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
err = otx2_sync_mbox_msg(&pf->mbox);
if (err)
goto out;
@@ -2117,7 +2125,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
del_req->entry =
- flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
err = otx2_sync_mbox_msg(&pf->mbox);
goto out;
@@ -2131,7 +2139,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
}
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
- req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
req->packet.vlan_tci = htons(vlan);
req->mask.vlan_tci = htons(VLAN_VID_MASK);
/* af fills the destination mac addr */
@@ -2182,7 +2190,7 @@ static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
eth_zero_addr((u8 *)&req->mask.dmac);
idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
- req->entry = flow_cfg->entry[flow_cfg->vf_vlan_offset + idx];
+ req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
req->features = BIT_ULL(NPC_DMAC);
req->channel = pf->hw.tx_chan_base;
req->intf = NIX_INTF_TX;
@@ -2241,10 +2249,63 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
ivi->vf = vf;
ether_addr_copy(ivi->mac, config->mac);
ivi->vlan = config->vlan;
+ ivi->trusted = config->trusted;
return 0;
}
+static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
+ int req_perm)
+{
+ struct set_vf_perm *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
+ if (!req) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Let AF reset VF permissions as sriov is disabled */
+ if (req_perm == OTX2_RESET_VF_PERM) {
+ req->flags |= RESET_VF_PERM;
+ } else if (req_perm == OTX2_TRUSTED_VF) {
+ if (pf->vf_configs[vf].trusted)
+ req->flags |= VF_TRUSTED;
+ }
+
+ req->vf = vf;
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
+
+static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
+ bool enable)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ struct pci_dev *pdev = pf->pdev;
+ int rc;
+
+ if (vf >= pci_num_vf(pdev))
+ return -EINVAL;
+
+ if (pf->vf_configs[vf].trusted == enable)
+ return 0;
+
+ pf->vf_configs[vf].trusted = enable;
+ rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
+
+ if (rc)
+ pf->vf_configs[vf].trusted = !enable;
+ else
+ netdev_info(pf->netdev, "VF %d is %strusted\n",
+ vf, enable ? "" : "not ");
+ return rc;
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -2261,6 +2322,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
.ndo_setup_tc = otx2_setup_tc,
+ .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -2315,6 +2377,40 @@ static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
return otx2_register_mbox_intr(pf, false);
}
+static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
+{
+ int i;
+
+ pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
+ sizeof(struct otx2_vf_config),
+ GFP_KERNEL);
+ if (!pf->vf_configs)
+ return -ENOMEM;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ pf->vf_configs[i].pf = pf;
+ pf->vf_configs[i].intf_down = true;
+ pf->vf_configs[i].trusted = false;
+ INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
+ otx2_vf_link_event_task);
+ }
+
+ return 0;
+}
+
+static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
+{
+ int i;
+
+ if (!pf->vf_configs)
+ return;
+
+ for (i = 0; i < pf->total_vfs; i++) {
+ cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
+ otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
+ }
+}
+
static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -2509,6 +2605,11 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_mcam_flow_del;
+ /* Initialize SR-IOV resources */
+ err = otx2_sriov_vfcfg_init(pf);
+ if (err)
+ goto err_pf_sriov_init;
+
/* Enable link notifications */
otx2_cgx_config_linkevents(pf, true);
@@ -2518,6 +2619,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_pf_sriov_init:
+ otx2_shutdown_tc(pf);
err_mcam_flow_del:
otx2_mcam_flow_del(pf);
err_unreg_netdev:
@@ -2576,7 +2679,7 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
- int ret, i;
+ int ret;
/* Init PF <=> VF mailbox stuff */
ret = otx2_pfvf_mbox_init(pf, numvfs);
@@ -2587,23 +2690,9 @@ static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
if (ret)
goto free_mbox;
- pf->vf_configs = kcalloc(numvfs, sizeof(struct otx2_vf_config),
- GFP_KERNEL);
- if (!pf->vf_configs) {
- ret = -ENOMEM;
- goto free_intr;
- }
-
- for (i = 0; i < numvfs; i++) {
- pf->vf_configs[i].pf = pf;
- pf->vf_configs[i].intf_down = true;
- INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
- otx2_vf_link_event_task);
- }
-
ret = otx2_pf_flr_init(pf, numvfs);
if (ret)
- goto free_configs;
+ goto free_intr;
ret = otx2_register_flr_me_intr(pf, numvfs);
if (ret)
@@ -2618,8 +2707,6 @@ free_flr_intr:
otx2_disable_flr_me_intr(pf);
free_flr:
otx2_flr_wq_destroy(pf);
-free_configs:
- kfree(pf->vf_configs);
free_intr:
otx2_disable_pfvf_mbox_intr(pf, numvfs);
free_mbox:
@@ -2632,17 +2719,12 @@ static int otx2_sriov_disable(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct otx2_nic *pf = netdev_priv(netdev);
int numvfs = pci_num_vf(pdev);
- int i;
if (!numvfs)
return 0;
pci_disable_sriov(pdev);
- for (i = 0; i < pci_num_vf(pdev); i++)
- cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
- kfree(pf->vf_configs);
-
otx2_disable_flr_me_intr(pf);
otx2_flr_wq_destroy(pf);
otx2_disable_pfvf_mbox_intr(pf, numvfs);
@@ -2682,6 +2764,7 @@ static void otx2_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
otx2_sriov_disable(pf->pdev);
+ otx2_sriov_vfcfg_cleanup(pf);
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 51157b283f6f..905fc02a7dfe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -15,6 +15,7 @@
#include <net/tc_act/tc_vlan.h>
#include <net/ipv6.h>
+#include "cn10k.h"
#include "otx2_common.h"
/* Egress rate limiting definitions */
@@ -41,11 +42,14 @@ struct otx2_tc_flow_stats {
struct otx2_tc_flow {
struct rhash_head node;
unsigned long cookie;
- u16 entry;
unsigned int bitpos;
struct rcu_head rcu;
struct otx2_tc_flow_stats stats;
spinlock_t lock; /* lock for stats */
+ u16 rq;
+ u16 entry;
+ u16 leaf_profile;
+ bool is_act_police;
};
static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp,
@@ -220,17 +224,76 @@ static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
return err;
}
+static int otx2_tc_act_set_police(struct otx2_nic *nic,
+ struct otx2_tc_flow *node,
+ struct flow_cls_offload *f,
+ u64 rate, u32 burst, u32 mark,
+ struct npc_install_flow_req *req, bool pps)
+{
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct otx2_hw *hw = &nic->hw;
+ int rq_idx, rc;
+
+ rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
+ if (rq_idx >= hw->rx_queues) {
+ NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
+ return -EINVAL;
+ }
+
+ mutex_lock(&nic->mbox.lock);
+
+ rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
+ if (rc) {
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+ }
+
+ rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile, burst, rate, pps);
+ if (rc)
+ goto free_leaf;
+
+ rc = cn10k_map_unmap_rq_policer(nic, rq_idx, node->leaf_profile, true);
+ if (rc)
+ goto free_leaf;
+
+ mutex_unlock(&nic->mbox.lock);
+
+ req->match_id = mark & 0xFFFFULL;
+ req->index = rq_idx;
+ req->op = NIX_RX_ACTIONOP_UCAST;
+ set_bit(rq_idx, &nic->rq_bmap);
+ node->is_act_police = true;
+ node->rq = rq_idx;
+
+ return 0;
+
+free_leaf:
+ if (cn10k_free_leaf_profile(nic, node->leaf_profile))
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ node->leaf_profile);
+ mutex_unlock(&nic->mbox.lock);
+ return rc;
+}
+
static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action,
- struct npc_install_flow_req *req)
+ struct npc_install_flow_req *req,
+ struct flow_cls_offload *f,
+ struct otx2_tc_flow *node)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_action_entry *act;
struct net_device *target;
struct otx2_nic *priv;
+ u32 burst, mark = 0;
+ u8 nr_police = 0;
+ bool pps;
+ u64 rate;
int i;
if (!flow_action_has_entries(flow_action)) {
- netdev_info(nic->netdev, "no tc actions specified");
+ NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
return -EINVAL;
}
@@ -247,8 +310,8 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
priv = netdev_priv(target);
/* npc_install_flow_req doesn't support passing a target pcifunc */
if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
- netdev_info(nic->netdev,
- "can't redirect to other pf/vf\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "can't redirect to other pf/vf");
return -EOPNOTSUPP;
}
req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -259,18 +322,55 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
/* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
break;
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps > 0) {
+ rate = act->police.rate_bytes_ps * 8;
+ burst = act->police.burst;
+ } else if (act->police.rate_pkt_ps > 0) {
+ /* The algorithm used to calculate rate
+ * mantissa, exponent values for a given token
+ * rate (token can be byte or packet) requires
+ * token rate to be mutiplied by 8.
+ */
+ rate = act->police.rate_pkt_ps * 8;
+ burst = act->police.burst_pkt;
+ pps = true;
+ }
+ nr_police++;
+ break;
+ case FLOW_ACTION_MARK:
+ mark = act->mark;
+ break;
default:
return -EOPNOTSUPP;
}
}
+ if (nr_police > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "rate limit police offload requires a single action");
+ return -EOPNOTSUPP;
+ }
+
+ if (nr_police)
+ return otx2_tc_act_set_police(nic, node, f, rate, burst,
+ mark, req, pps);
+
return 0;
}
-static int otx2_tc_prepare_flow(struct otx2_nic *nic,
+static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
struct flow_cls_offload *f,
struct npc_install_flow_req *req)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct flow_msg *flow_spec = &req->packet;
struct flow_msg *flow_mask = &req->mask;
struct flow_dissector *dissector;
@@ -335,7 +435,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_eth_addrs(rule, &match);
if (!is_zero_ether_addr(match.mask->src)) {
- netdev_err(nic->netdev, "src mac match not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
return -EOPNOTSUPP;
}
@@ -353,11 +453,11 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
flow_rule_match_ip(rule, &match);
if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
match.mask->tos) {
- netdev_err(nic->netdev, "tos not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "tos not supported");
return -EOPNOTSUPP;
}
if (match.mask->ttl) {
- netdev_err(nic->netdev, "ttl not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
return -EOPNOTSUPP;
}
flow_spec->tos = match.key->tos;
@@ -413,8 +513,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
if (ipv6_addr_loopback(&match.key->dst) ||
ipv6_addr_loopback(&match.key->src)) {
- netdev_err(nic->netdev,
- "Flow matching on IPv6 loopback addr is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Flow matching IPv6 loopback addr not supported");
return -EOPNOTSUPP;
}
@@ -463,7 +563,7 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
req->features |= BIT_ULL(NPC_SPORT_SCTP);
}
- return otx2_tc_parse_actions(nic, &rule->action, req);
+ return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
}
static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry)
@@ -498,6 +598,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
{
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *flow_node;
+ int err;
flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
&tc_flow_cmd->cookie,
@@ -508,6 +609,27 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
return -EINVAL;
}
+ if (flow_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
+ flow_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ flow_node->rq, flow_node->leaf_profile);
+
+ err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ flow_node->leaf_profile);
+
+ __clear_bit(flow_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
otx2_del_mcam_flow_entry(nic, flow_node->entry);
WARN_ON(rhashtable_remove_fast(&nic->tc_info.flow_table,
@@ -524,14 +646,21 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
static int otx2_tc_add_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd)
{
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct otx2_tc_info *tc_info = &nic->tc_info;
struct otx2_tc_flow *new_node, *old_node;
- struct npc_install_flow_req *req;
- int rc;
+ struct npc_install_flow_req *req, dummy;
+ int rc, err;
if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
return -ENOMEM;
+ if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Not enough MCAM space to add the flow");
+ return -ENOMEM;
+ }
+
/* allocate memory for the new flow and it's node */
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node)
@@ -539,17 +668,11 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie;
- mutex_lock(&nic->mbox.lock);
- req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
- if (!req) {
- mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
- }
+ memset(&dummy, 0, sizeof(struct npc_install_flow_req));
- rc = otx2_tc_prepare_flow(nic, tc_flow_cmd, req);
+ rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
if (rc) {
- otx2_mbox_reset(&nic->mbox.mbox, 0);
- mutex_unlock(&nic->mbox.lock);
+ kfree_rcu(new_node, rcu);
return rc;
}
@@ -560,18 +683,22 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (old_node)
otx2_tc_del_flow(nic, tc_flow_cmd);
- if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) {
- netdev_err(nic->netdev, "Not enough MCAM space to add the flow\n");
- otx2_mbox_reset(&nic->mbox.mbox, 0);
+ mutex_lock(&nic->mbox.lock);
+ req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
+ if (!req) {
mutex_unlock(&nic->mbox.lock);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto free_leaf;
}
+ memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
+ memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
+
new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap,
nic->flow_cfg->tc_max_flows);
req->channel = nic->hw.rx_chan_base;
- req->entry = nic->flow_cfg->entry[nic->flow_cfg->tc_flower_offset +
- nic->flow_cfg->tc_max_flows - new_node->bitpos];
+ req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset +
+ nic->flow_cfg->tc_max_flows - new_node->bitpos];
req->intf = NIX_INTF_RX;
req->set_cntr = 1;
new_node->entry = req->entry;
@@ -579,9 +706,10 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
/* Send message to AF */
rc = otx2_sync_mbox_msg(&nic->mbox);
if (rc) {
- netdev_err(nic->netdev, "Failed to install MCAM flow entry\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
mutex_unlock(&nic->mbox.lock);
- goto out;
+ kfree_rcu(new_node, rcu);
+ goto free_leaf;
}
mutex_unlock(&nic->mbox.lock);
@@ -591,12 +719,35 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
if (rc) {
otx2_del_mcam_flow_entry(nic, req->entry);
kfree_rcu(new_node, rcu);
- goto out;
+ goto free_leaf;
}
set_bit(new_node->bitpos, tc_info->tc_entries_bitmap);
tc_info->num_entries++;
-out:
+
+ return 0;
+
+free_leaf:
+ if (new_node->is_act_police) {
+ mutex_lock(&nic->mbox.lock);
+
+ err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
+ new_node->leaf_profile, false);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unmapping RQ %d & profile %d failed\n",
+ new_node->rq, new_node->leaf_profile);
+ err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
+ if (err)
+ netdev_err(nic->netdev,
+ "Unable to free leaf bandwidth profile(%d)\n",
+ new_node->leaf_profile);
+
+ __clear_bit(new_node->rq, &nic->rq_bmap);
+
+ mutex_unlock(&nic->mbox.lock);
+ }
+
return rc;
}
@@ -675,6 +826,87 @@ static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
}
}
+static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ struct flow_action *actions = &cls->rule->action;
+ struct flow_action_entry *entry;
+ u64 rate;
+ int err;
+
+ err = otx2_tc_validate_flow(nic, actions, extack);
+ if (err)
+ return err;
+
+ if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one ingress MATCHALL ratelimitter can be offloaded");
+ return -ENOMEM;
+ }
+
+ entry = &cls->rule->action.entries[0];
+ switch (entry->id) {
+ case FLOW_ACTION_POLICE:
+ /* Ingress ratelimiting is not supported on OcteonTx2 */
+ if (is_dev_otx2(nic->pdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress policing not supported on this platform");
+ return -EOPNOTSUPP;
+ }
+
+ err = cn10k_alloc_matchall_ipolicer(nic);
+ if (err)
+ return err;
+
+ /* Convert to bits per second */
+ rate = entry->police.rate_bytes_ps * 8;
+ err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
+ if (err)
+ return err;
+ nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only police action supported with Ingress MATCHALL offload");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct netlink_ext_ack *extack = cls->common.extack;
+ int err;
+
+ if (nic->flags & OTX2_FLAG_INTF_DOWN) {
+ NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
+ return -EINVAL;
+ }
+
+ err = cn10k_free_matchall_ipolicer(nic);
+ nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
+ return err;
+}
+
+static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
+ struct tc_cls_matchall_offload *cls_matchall)
+{
+ switch (cls_matchall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return otx2_tc_ingress_matchall_install(nic, cls_matchall);
+ case TC_CLSMATCHALL_DESTROY:
+ return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
+ case TC_CLSMATCHALL_STATS:
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
@@ -686,6 +918,8 @@ static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return otx2_setup_tc_cls_flower(nic, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return otx2_setup_tc_ingress_matchall(nic, type_data);
default:
break;
}
@@ -775,6 +1009,9 @@ int otx2_init_tc(struct otx2_nic *nic)
{
struct otx2_tc_info *tc = &nic->tc_info;
+ /* Exclude receive queue 0 being used for police action */
+ set_bit(0, &nic->rq_bmap);
+
tc->flow_ht_params = tc_flow_ht_params;
return rhashtable_init(&tc->flow_table, &tc->flow_ht_params);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 085be90a03eb..13a908f75ba0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -395,6 +395,42 @@ static netdev_tx_t otx2vf_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
+static void otx2vf_set_rx_mode(struct net_device *netdev)
+{
+ struct otx2_nic *vf = netdev_priv(netdev);
+
+ queue_work(vf->otx2_wq, &vf->rx_mode_work);
+}
+
+static void otx2vf_do_set_rx_mode(struct work_struct *work)
+{
+ struct otx2_nic *vf = container_of(work, struct otx2_nic, rx_mode_work);
+ struct net_device *netdev = vf->netdev;
+ unsigned int flags = netdev->flags;
+ struct nix_rx_mode *req;
+
+ mutex_lock(&vf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&vf->mbox);
+ if (!req) {
+ mutex_unlock(&vf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (flags & IFF_PROMISC)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&vf->mbox);
+
+ mutex_unlock(&vf->mbox.lock);
+}
+
static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -432,12 +468,24 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_open = otx2vf_open,
.ndo_stop = otx2vf_stop,
.ndo_start_xmit = otx2vf_xmit,
+ .ndo_set_rx_mode = otx2vf_set_rx_mode,
.ndo_set_mac_address = otx2_set_mac_address,
.ndo_change_mtu = otx2vf_change_mtu,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
};
+static int otx2_wq_init(struct otx2_nic *vf)
+{
+ vf->otx2_wq = create_singlethread_workqueue("otx2vf_wq");
+ if (!vf->otx2_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&vf->rx_mode_work, otx2vf_do_set_rx_mode);
+ INIT_WORK(&vf->reset_task, otx2vf_reset_task);
+ return 0;
+}
+
static int otx2vf_realloc_msix_vectors(struct otx2_nic *vf)
{
struct otx2_hw *hw = &vf->hw;
@@ -588,8 +636,6 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(vf);
- INIT_WORK(&vf->reset_task, otx2vf_reset_task);
-
/* To distinguish, for LBK VFs set netdev name explicitly */
if (is_otx2_lbkvf(vf->pdev)) {
int n;
@@ -606,6 +652,10 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_detach_rsrc;
}
+ err = otx2_wq_init(vf);
+ if (err)
+ goto err_unreg_netdev;
+
otx2vf_set_ethtool_ops(netdev);
/* Enable pause frames by default */
@@ -614,6 +664,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+err_unreg_netdev:
+ unregister_netdev(netdev);
err_detach_rsrc:
if (hw->lmt_base)
iounmap(hw->lmt_base);
@@ -644,6 +696,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
cancel_work_sync(&vf->reset_task);
unregister_netdev(netdev);
+ if (vf->otx2_wq)
+ destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
index 93129e32ebc5..0609df8b913d 100644
--- a/drivers/net/ethernet/marvell/prestera/Makefile
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_PRESTERA) += prestera.o
prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
- prestera_switchdev.o
+ prestera_switchdev.o prestera_acl.o prestera_flow.o \
+ prestera_flower.o prestera_span.o
obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index 55aa4bf8a27c..f18fe664b373 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -60,10 +60,22 @@ struct prestera_port_caps {
u8 transceiver;
};
+struct prestera_lag {
+ struct net_device *dev;
+ struct list_head members;
+ u16 member_count;
+ u16 lag_id;
+};
+
+struct prestera_flow_block;
+
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
+ struct prestera_flow_block *flow_block;
struct devlink_port dl_port;
+ struct list_head lag_member;
+ struct prestera_lag *lag;
u32 id;
u32 hw_id;
u32 dev_id;
@@ -127,6 +139,12 @@ struct prestera_port_event {
} data;
};
+enum prestera_fdb_entry_type {
+ PRESTERA_FDB_ENTRY_TYPE_REG_PORT,
+ PRESTERA_FDB_ENTRY_TYPE_LAG,
+ PRESTERA_FDB_ENTRY_TYPE_MAX
+};
+
enum prestera_fdb_event_id {
PRESTERA_FDB_EVENT_UNSPEC,
PRESTERA_FDB_EVENT_LEARNED,
@@ -134,7 +152,11 @@ enum prestera_fdb_event_id {
};
struct prestera_fdb_event {
- u32 port_id;
+ enum prestera_fdb_entry_type type;
+ union {
+ u32 port_id;
+ u16 lag_id;
+ } dest;
u32 vid;
union {
u8 mac[ETH_ALEN];
@@ -150,14 +172,20 @@ struct prestera_event {
};
struct prestera_switchdev;
+struct prestera_span;
struct prestera_rxtx;
+struct prestera_trap_data;
+struct prestera_acl;
struct prestera_switch {
struct prestera_device *dev;
struct prestera_switchdev *swdev;
struct prestera_rxtx *rxtx;
+ struct prestera_acl *acl;
+ struct prestera_span *span;
struct list_head event_handlers;
struct notifier_block netdev_nb;
+ struct prestera_trap_data *trap_data;
char base_mac[ETH_ALEN];
struct list_head port_list;
rwlock_t port_list_lock;
@@ -165,6 +193,9 @@ struct prestera_switch {
u32 mtu_min;
u32 mtu_max;
u8 id;
+ struct prestera_lag *lags;
+ u8 lag_member_max;
+ u8 lag_max;
};
struct prestera_rxtx_params {
@@ -203,4 +234,10 @@ int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
bool prestera_netdev_check(const struct net_device *dev);
+bool prestera_port_is_lag_member(const struct prestera_port *port);
+
+struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id);
+
+u16 prestera_port_lag_id(const struct prestera_port *port);
+
#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.c b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
new file mode 100644
index 000000000000..83c75ffb1a1c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/rhashtable.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_span.h"
+
+struct prestera_acl {
+ struct prestera_switch *sw;
+ struct list_head rules;
+};
+
+struct prestera_acl_ruleset {
+ struct rhashtable rule_ht;
+ struct prestera_switch *sw;
+ u16 id;
+};
+
+struct prestera_acl_rule {
+ struct rhash_head ht_node;
+ struct list_head list;
+ struct list_head match_list;
+ struct list_head action_list;
+ struct prestera_flow_block *block;
+ unsigned long cookie;
+ u32 priority;
+ u8 n_actions;
+ u8 n_matches;
+ u32 id;
+};
+
+static const struct rhashtable_params prestera_acl_rule_ht_params = {
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct prestera_acl_rule, cookie),
+ .head_offset = offsetof(struct prestera_acl_rule, ht_node),
+ .automatic_shrinking = true,
+};
+
+static struct prestera_acl_ruleset *
+prestera_acl_ruleset_create(struct prestera_switch *sw)
+{
+ struct prestera_acl_ruleset *ruleset;
+ int err;
+
+ ruleset = kzalloc(sizeof(*ruleset), GFP_KERNEL);
+ if (!ruleset)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&ruleset->rule_ht, &prestera_acl_rule_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+
+ err = prestera_hw_acl_ruleset_create(sw, &ruleset->id);
+ if (err)
+ goto err_ruleset_create;
+
+ ruleset->sw = sw;
+
+ return ruleset;
+
+err_ruleset_create:
+ rhashtable_destroy(&ruleset->rule_ht);
+err_rhashtable_init:
+ kfree(ruleset);
+ return ERR_PTR(err);
+}
+
+static void prestera_acl_ruleset_destroy(struct prestera_acl_ruleset *ruleset)
+{
+ prestera_hw_acl_ruleset_del(ruleset->sw, ruleset->id);
+ rhashtable_destroy(&ruleset->rule_ht);
+ kfree(ruleset);
+}
+
+struct prestera_flow_block *
+prestera_acl_block_create(struct prestera_switch *sw, struct net *net)
+{
+ struct prestera_flow_block *block;
+
+ block = kzalloc(sizeof(*block), GFP_KERNEL);
+ if (!block)
+ return NULL;
+ INIT_LIST_HEAD(&block->binding_list);
+ block->net = net;
+ block->sw = sw;
+
+ block->ruleset = prestera_acl_ruleset_create(sw);
+ if (IS_ERR(block->ruleset)) {
+ kfree(block);
+ return NULL;
+ }
+
+ return block;
+}
+
+void prestera_acl_block_destroy(struct prestera_flow_block *block)
+{
+ prestera_acl_ruleset_destroy(block->ruleset);
+ WARN_ON(!list_empty(&block->binding_list));
+ kfree(block);
+}
+
+static struct prestera_flow_block_binding *
+prestera_acl_block_lookup(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ if (binding->port == port)
+ return binding;
+
+ return NULL;
+}
+
+int prestera_acl_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+ int err;
+
+ if (WARN_ON(prestera_acl_block_lookup(block, port)))
+ return -EEXIST;
+
+ binding = kzalloc(sizeof(*binding), GFP_KERNEL);
+ if (!binding)
+ return -ENOMEM;
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ binding->port = port;
+
+ err = prestera_hw_acl_port_bind(port, block->ruleset->id);
+ if (err)
+ goto err_rules_bind;
+
+ list_add(&binding->list, &block->binding_list);
+ return 0;
+
+err_rules_bind:
+ kfree(binding);
+ return err;
+}
+
+int prestera_acl_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port)
+{
+ struct prestera_flow_block_binding *binding;
+
+ binding = prestera_acl_block_lookup(block, port);
+ if (!binding)
+ return -ENOENT;
+
+ list_del(&binding->list);
+
+ prestera_hw_acl_port_unbind(port, block->ruleset->id);
+
+ kfree(binding);
+ return 0;
+}
+
+struct prestera_acl_ruleset *
+prestera_acl_block_ruleset_get(struct prestera_flow_block *block)
+{
+ return block->ruleset;
+}
+
+u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule)
+{
+ return rule->block->ruleset->id;
+}
+
+struct net *prestera_acl_block_net(struct prestera_flow_block *block)
+{
+ return block->net;
+}
+
+struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block)
+{
+ return block->sw;
+}
+
+struct prestera_acl_rule *
+prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie)
+{
+ return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
+ prestera_acl_rule_ht_params);
+}
+
+struct prestera_acl_rule *
+prestera_acl_rule_create(struct prestera_flow_block *block,
+ unsigned long cookie)
+{
+ struct prestera_acl_rule *rule;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&rule->match_list);
+ INIT_LIST_HEAD(&rule->action_list);
+ rule->cookie = cookie;
+ rule->block = block;
+
+ return rule;
+}
+
+struct list_head *
+prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule)
+{
+ return &rule->match_list;
+}
+
+struct list_head *
+prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule)
+{
+ return &rule->action_list;
+}
+
+int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_action_entry *entry)
+{
+ struct prestera_acl_rule_action_entry *a_entry;
+
+ a_entry = kmalloc(sizeof(*a_entry), GFP_KERNEL);
+ if (!a_entry)
+ return -ENOMEM;
+
+ memcpy(a_entry, entry, sizeof(*entry));
+ list_add(&a_entry->list, &rule->action_list);
+
+ rule->n_actions++;
+ return 0;
+}
+
+u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule)
+{
+ return rule->n_actions;
+}
+
+u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule)
+{
+ return rule->priority;
+}
+
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority)
+{
+ rule->priority = priority;
+}
+
+int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_match_entry *entry)
+{
+ struct prestera_acl_rule_match_entry *m_entry;
+
+ m_entry = kmalloc(sizeof(*m_entry), GFP_KERNEL);
+ if (!m_entry)
+ return -ENOMEM;
+
+ memcpy(m_entry, entry, sizeof(*entry));
+ list_add(&m_entry->list, &rule->match_list);
+
+ rule->n_matches++;
+ return 0;
+}
+
+u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule)
+{
+ return rule->n_matches;
+}
+
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule)
+{
+ struct prestera_acl_rule_action_entry *a_entry;
+ struct prestera_acl_rule_match_entry *m_entry;
+ struct list_head *pos, *n;
+
+ list_for_each_safe(pos, n, &rule->match_list) {
+ m_entry = list_entry(pos, typeof(*m_entry), list);
+ list_del(pos);
+ kfree(m_entry);
+ }
+
+ list_for_each_safe(pos, n, &rule->action_list) {
+ a_entry = list_entry(pos, typeof(*a_entry), list);
+ list_del(pos);
+ kfree(a_entry);
+ }
+
+ kfree(rule);
+}
+
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
+{
+ u32 rule_id;
+ int err;
+
+ /* try to add rule to hash table first */
+ err = rhashtable_insert_fast(&rule->block->ruleset->rule_ht,
+ &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ if (err)
+ return err;
+
+ /* add rule to hw */
+ err = prestera_hw_acl_rule_add(sw, rule, &rule_id);
+ if (err)
+ goto err_rule_add;
+
+ rule->id = rule_id;
+
+ list_add_tail(&rule->list, &sw->acl->rules);
+
+ return 0;
+
+err_rule_add:
+ rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ return err;
+}
+
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule)
+{
+ rhashtable_remove_fast(&rule->block->ruleset->rule_ht, &rule->ht_node,
+ prestera_acl_rule_ht_params);
+ list_del(&rule->list);
+ prestera_hw_acl_rule_del(sw, rule->id);
+}
+
+int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use)
+{
+ u64 current_packets;
+ u64 current_bytes;
+ int err;
+
+ err = prestera_hw_acl_rule_stats_get(sw, rule->id, &current_packets,
+ &current_bytes);
+ if (err)
+ return err;
+
+ *packets = current_packets;
+ *bytes = current_bytes;
+ *last_use = jiffies;
+
+ return 0;
+}
+
+int prestera_acl_init(struct prestera_switch *sw)
+{
+ struct prestera_acl *acl;
+
+ acl = kzalloc(sizeof(*acl), GFP_KERNEL);
+ if (!acl)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&acl->rules);
+ sw->acl = acl;
+ acl->sw = sw;
+
+ return 0;
+}
+
+void prestera_acl_fini(struct prestera_switch *sw)
+{
+ struct prestera_acl *acl = sw->acl;
+
+ WARN_ON(!list_empty(&acl->rules));
+ kfree(acl);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_acl.h b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
new file mode 100644
index 000000000000..39b7869be659
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_acl.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_ACL_H_
+#define _PRESTERA_ACL_H_
+
+enum prestera_acl_rule_match_entry_type {
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE = 1,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE,
+ PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE
+};
+
+enum prestera_acl_rule_action {
+ PRESTERA_ACL_RULE_ACTION_ACCEPT,
+ PRESTERA_ACL_RULE_ACTION_DROP,
+ PRESTERA_ACL_RULE_ACTION_TRAP
+};
+
+struct prestera_switch;
+struct prestera_port;
+struct prestera_acl_rule;
+struct prestera_acl_ruleset;
+
+struct prestera_flow_block_binding {
+ struct list_head list;
+ struct prestera_port *port;
+ int span_id;
+};
+
+struct prestera_flow_block {
+ struct list_head binding_list;
+ struct prestera_switch *sw;
+ struct net *net;
+ struct prestera_acl_ruleset *ruleset;
+ struct flow_block_cb *block_cb;
+};
+
+struct prestera_acl_rule_action_entry {
+ struct list_head list;
+ enum prestera_acl_rule_action id;
+};
+
+struct prestera_acl_rule_match_entry {
+ struct list_head list;
+ enum prestera_acl_rule_match_entry_type type;
+ union {
+ struct {
+ u8 key;
+ u8 mask;
+ } u8;
+ struct {
+ u16 key;
+ u16 mask;
+ } u16;
+ struct {
+ u32 key;
+ u32 mask;
+ } u32;
+ struct {
+ u64 key;
+ u64 mask;
+ } u64;
+ struct {
+ u8 key[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+ } mac;
+ } keymask;
+};
+
+int prestera_acl_init(struct prestera_switch *sw);
+void prestera_acl_fini(struct prestera_switch *sw);
+struct prestera_flow_block *
+prestera_acl_block_create(struct prestera_switch *sw, struct net *net);
+void prestera_acl_block_destroy(struct prestera_flow_block *block);
+struct net *prestera_acl_block_net(struct prestera_flow_block *block);
+struct prestera_switch *prestera_acl_block_sw(struct prestera_flow_block *block);
+int prestera_acl_block_bind(struct prestera_flow_block *block,
+ struct prestera_port *port);
+int prestera_acl_block_unbind(struct prestera_flow_block *block,
+ struct prestera_port *port);
+struct prestera_acl_ruleset *
+prestera_acl_block_ruleset_get(struct prestera_flow_block *block);
+struct prestera_acl_rule *
+prestera_acl_rule_create(struct prestera_flow_block *block,
+ unsigned long cookie);
+u32 prestera_acl_rule_priority_get(struct prestera_acl_rule *rule);
+void prestera_acl_rule_priority_set(struct prestera_acl_rule *rule,
+ u32 priority);
+u16 prestera_acl_rule_ruleset_id_get(const struct prestera_acl_rule *rule);
+struct list_head *
+prestera_acl_rule_action_list_get(struct prestera_acl_rule *rule);
+u8 prestera_acl_rule_action_len(struct prestera_acl_rule *rule);
+u8 prestera_acl_rule_match_len(struct prestera_acl_rule *rule);
+int prestera_acl_rule_action_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_action_entry *entry);
+struct list_head *
+prestera_acl_rule_match_list_get(struct prestera_acl_rule *rule);
+int prestera_acl_rule_match_add(struct prestera_acl_rule *rule,
+ struct prestera_acl_rule_match_entry *entry);
+void prestera_acl_rule_destroy(struct prestera_acl_rule *rule);
+struct prestera_acl_rule *
+prestera_acl_rule_lookup(struct prestera_acl_ruleset *ruleset,
+ unsigned long cookie);
+int prestera_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule);
+void prestera_acl_rule_del(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule);
+int prestera_acl_rule_get_stats(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u64 *packets, u64 *bytes, u64 *last_use);
+
+#endif /* _PRESTERA_ACL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 94c185a0e2b8..d12e21db9fd6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -4,6 +4,352 @@
#include <net/devlink.h>
#include "prestera_devlink.h"
+#include "prestera_hw.h"
+
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/prestera.rst
+ */
+enum {
+ DEVLINK_PRESTERA_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ DEVLINK_PRESTERA_TRAP_ID_ARP_BC,
+ DEVLINK_PRESTERA_TRAP_ID_IS_IS,
+ DEVLINK_PRESTERA_TRAP_ID_OSPF,
+ DEVLINK_PRESTERA_TRAP_ID_IP_BC_MAC,
+ DEVLINK_PRESTERA_TRAP_ID_ROUTER_MC,
+ DEVLINK_PRESTERA_TRAP_ID_VRRP,
+ DEVLINK_PRESTERA_TRAP_ID_DHCP,
+ DEVLINK_PRESTERA_TRAP_ID_MAC_TO_ME,
+ DEVLINK_PRESTERA_TRAP_ID_IPV4_OPTIONS,
+ DEVLINK_PRESTERA_TRAP_ID_IP_DEFAULT_ROUTE,
+ DEVLINK_PRESTERA_TRAP_ID_IP_TO_ME,
+ DEVLINK_PRESTERA_TRAP_ID_IPV4_ICMP_REDIRECT,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_0,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_1,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_2,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_3,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_4,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_5,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_6,
+ DEVLINK_PRESTERA_TRAP_ID_ACL_CODE_7,
+ DEVLINK_PRESTERA_TRAP_ID_BGP,
+ DEVLINK_PRESTERA_TRAP_ID_SSH,
+ DEVLINK_PRESTERA_TRAP_ID_TELNET,
+ DEVLINK_PRESTERA_TRAP_ID_ICMP,
+ DEVLINK_PRESTERA_TRAP_ID_MET_RED,
+ DEVLINK_PRESTERA_TRAP_ID_IP_SIP_IS_ZERO,
+ DEVLINK_PRESTERA_TRAP_ID_IP_UC_DIP_DA_MISMATCH,
+ DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IPV4_HDR,
+ DEVLINK_PRESTERA_TRAP_ID_ILLEGAL_IP_ADDR,
+ DEVLINK_PRESTERA_TRAP_ID_INVALID_SA,
+ DEVLINK_PRESTERA_TRAP_ID_LOCAL_PORT,
+ DEVLINK_PRESTERA_TRAP_ID_PORT_NO_VLAN,
+ DEVLINK_PRESTERA_TRAP_ID_RXDMA_DROP,
+};
+
+#define DEVLINK_PRESTERA_TRAP_NAME_ARP_BC \
+ "arp_bc"
+#define DEVLINK_PRESTERA_TRAP_NAME_IS_IS \
+ "is_is"
+#define DEVLINK_PRESTERA_TRAP_NAME_OSPF \
+ "ospf"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_BC_MAC \
+ "ip_bc_mac"
+#define DEVLINK_PRESTERA_TRAP_NAME_ROUTER_MC \
+ "router_mc"
+#define DEVLINK_PRESTERA_TRAP_NAME_VRRP \
+ "vrrp"
+#define DEVLINK_PRESTERA_TRAP_NAME_DHCP \
+ "dhcp"
+#define DEVLINK_PRESTERA_TRAP_NAME_MAC_TO_ME \
+ "mac_to_me"
+#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_OPTIONS \
+ "ipv4_options"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_DEFAULT_ROUTE \
+ "ip_default_route"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_TO_ME \
+ "ip_to_me"
+#define DEVLINK_PRESTERA_TRAP_NAME_IPV4_ICMP_REDIRECT \
+ "ipv4_icmp_redirect"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_0 \
+ "acl_code_0"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_1 \
+ "acl_code_1"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_2 \
+ "acl_code_2"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_3 \
+ "acl_code_3"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_4 \
+ "acl_code_4"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_5 \
+ "acl_code_5"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_6 \
+ "acl_code_6"
+#define DEVLINK_PRESTERA_TRAP_NAME_ACL_CODE_7 \
+ "acl_code_7"
+#define DEVLINK_PRESTERA_TRAP_NAME_BGP \
+ "bgp"
+#define DEVLINK_PRESTERA_TRAP_NAME_SSH \
+ "ssh"
+#define DEVLINK_PRESTERA_TRAP_NAME_TELNET \
+ "telnet"
+#define DEVLINK_PRESTERA_TRAP_NAME_ICMP \
+ "icmp"
+#define DEVLINK_PRESTERA_TRAP_NAME_RXDMA_DROP \
+ "rxdma_drop"
+#define DEVLINK_PRESTERA_TRAP_NAME_PORT_NO_VLAN \
+ "port_no_vlan"
+#define DEVLINK_PRESTERA_TRAP_NAME_LOCAL_PORT \
+ "local_port"
+#define DEVLINK_PRESTERA_TRAP_NAME_INVALID_SA \
+ "invalid_sa"
+#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IP_ADDR \
+ "illegal_ip_addr"
+#define DEVLINK_PRESTERA_TRAP_NAME_ILLEGAL_IPV4_HDR \
+ "illegal_ipv4_hdr"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_UC_DIP_DA_MISMATCH \
+ "ip_uc_dip_da_mismatch"
+#define DEVLINK_PRESTERA_TRAP_NAME_IP_SIP_IS_ZERO \
+ "ip_sip_is_zero"
+#define DEVLINK_PRESTERA_TRAP_NAME_MET_RED \
+ "met_red"
+
+struct prestera_trap {
+ struct devlink_trap trap;
+ u8 cpu_code;
+};
+
+struct prestera_trap_item {
+ enum devlink_trap_action action;
+ void *trap_ctx;
+};
+
+struct prestera_trap_data {
+ struct prestera_switch *sw;
+ struct prestera_trap_item *trap_items_arr;
+ u32 traps_count;
+};
+
+#define PRESTERA_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+#define PRESTERA_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_CONTROL(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(CONTROL, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+#define PRESTERA_TRAP_DRIVER_DROP(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(DROP, DROP, DEVLINK_PRESTERA_TRAP_ID_##_id, \
+ DEVLINK_PRESTERA_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ PRESTERA_TRAP_METADATA)
+
+static const struct devlink_trap_group prestera_trap_groups_arr[] = {
+ /* No policer is associated with following groups (policerid == 0)*/
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(NEIGH_DISCOVERY, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_TRAP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_SAMPLE, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(OSPF, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(STP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LACP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LLDP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(VRRP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(DHCP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(BGP, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(LOCAL_DELIVERY, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 0),
+};
+
+/* Initialize trap list, as well as associate CPU code with them. */
+static struct prestera_trap prestera_trap_items_arr[] = {
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ARP_BC, NEIGH_DISCOVERY),
+ .cpu_code = 5,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IS_IS, LOCAL_DELIVERY),
+ .cpu_code = 13,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(OSPF, OSPF),
+ .cpu_code = 16,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_BC_MAC, LOCAL_DELIVERY),
+ .cpu_code = 19,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(STP, STP, TRAP),
+ .cpu_code = 26,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LACP, LACP, TRAP),
+ .cpu_code = 27,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LLDP, LLDP, TRAP),
+ .cpu_code = 28,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ROUTER_MC, LOCAL_DELIVERY),
+ .cpu_code = 29,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(VRRP, VRRP),
+ .cpu_code = 30,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(DHCP, DHCP),
+ .cpu_code = 33,
+ },
+ {
+ .trap = PRESTERA_TRAP_EXCEPTION(MTU_ERROR, L3_EXCEPTIONS),
+ .cpu_code = 63,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(MAC_TO_ME, LOCAL_DELIVERY),
+ .cpu_code = 65,
+ },
+ {
+ .trap = PRESTERA_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
+ .cpu_code = 133,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_OPTIONS,
+ L3_EXCEPTIONS),
+ .cpu_code = 141,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(IP_DEFAULT_ROUTE,
+ LOCAL_DELIVERY),
+ .cpu_code = 160,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(LOCAL_ROUTE, LOCAL_DELIVERY,
+ TRAP),
+ .cpu_code = 161,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_EXCEPTION(IPV4_ICMP_REDIRECT,
+ L3_EXCEPTIONS),
+ .cpu_code = 180,
+ },
+ {
+ .trap = PRESTERA_TRAP_CONTROL(ARP_RESPONSE, NEIGH_DISCOVERY,
+ TRAP),
+ .cpu_code = 188,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_0, ACL_TRAP),
+ .cpu_code = 192,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_1, ACL_TRAP),
+ .cpu_code = 193,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_2, ACL_TRAP),
+ .cpu_code = 194,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_3, ACL_TRAP),
+ .cpu_code = 195,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_4, ACL_TRAP),
+ .cpu_code = 196,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_5, ACL_TRAP),
+ .cpu_code = 197,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_6, ACL_TRAP),
+ .cpu_code = 198,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ACL_CODE_7, ACL_TRAP),
+ .cpu_code = 199,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(BGP, BGP),
+ .cpu_code = 206,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(SSH, LOCAL_DELIVERY),
+ .cpu_code = 207,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(TELNET, LOCAL_DELIVERY),
+ .cpu_code = 208,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_CONTROL(ICMP, LOCAL_DELIVERY),
+ .cpu_code = 209,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(RXDMA_DROP, BUFFER_DROPS),
+ .cpu_code = 37,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(PORT_NO_VLAN, L2_DROPS),
+ .cpu_code = 39,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(LOCAL_PORT, L2_DROPS),
+ .cpu_code = 56,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(INVALID_SA, L2_DROPS),
+ .cpu_code = 60,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IP_ADDR, L3_DROPS),
+ .cpu_code = 136,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(ILLEGAL_IPV4_HDR, L3_DROPS),
+ .cpu_code = 137,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(IP_UC_DIP_DA_MISMATCH,
+ L3_DROPS),
+ .cpu_code = 138,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(IP_SIP_IS_ZERO, L3_DROPS),
+ .cpu_code = 145,
+ },
+ {
+ .trap = PRESTERA_TRAP_DRIVER_DROP(MET_RED, BUFFER_DROPS),
+ .cpu_code = 185,
+ },
+};
+
+static void prestera_devlink_traps_fini(struct prestera_switch *sw);
+
+static int prestera_drop_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops);
static int prestera_dl_info_get(struct devlink *dl,
struct devlink_info_req *req,
@@ -27,8 +373,21 @@ static int prestera_dl_info_get(struct devlink *dl,
buf);
}
+static int prestera_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx);
+
+static int prestera_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+
+static int prestera_devlink_traps_register(struct prestera_switch *sw);
+
static const struct devlink_ops prestera_dl_ops = {
.info_get = prestera_dl_info_get,
+ .trap_init = prestera_trap_init,
+ .trap_action_set = prestera_trap_action_set,
+ .trap_drop_counter_get = prestera_drop_counter_get,
};
struct prestera_switch *prestera_devlink_alloc(void)
@@ -53,17 +412,32 @@ int prestera_devlink_register(struct prestera_switch *sw)
int err;
err = devlink_register(dl, sw->dev->dev);
- if (err)
+ if (err) {
dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
+ return err;
+ }
- return err;
+ err = prestera_devlink_traps_register(sw);
+ if (err) {
+ devlink_unregister(dl);
+ dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
}
void prestera_devlink_unregister(struct prestera_switch *sw)
{
+ struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
+ prestera_devlink_traps_fini(sw);
devlink_unregister(dl);
+
+ kfree(trap_data->trap_items_arr);
+ kfree(trap_data);
}
int prestera_devlink_port_register(struct prestera_port *port)
@@ -110,3 +484,155 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
return &port->dl_port;
}
+
+static int prestera_devlink_traps_register(struct prestera_switch *sw)
+{
+ const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
+ const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr);
+ struct devlink *devlink = priv_to_devlink(sw);
+ struct prestera_trap_data *trap_data;
+ struct prestera_trap *prestera_trap;
+ int err, i;
+
+ trap_data = kzalloc(sizeof(*trap_data), GFP_KERNEL);
+ if (!trap_data)
+ return -ENOMEM;
+
+ trap_data->trap_items_arr = kcalloc(traps_count,
+ sizeof(struct prestera_trap_item),
+ GFP_KERNEL);
+ if (!trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto err_trap_items_alloc;
+ }
+
+ trap_data->sw = sw;
+ trap_data->traps_count = traps_count;
+ sw->trap_data = trap_data;
+
+ err = devlink_trap_groups_register(devlink, prestera_trap_groups_arr,
+ groups_count);
+ if (err)
+ goto err_groups_register;
+
+ for (i = 0; i < traps_count; i++) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ err = devlink_traps_register(devlink, &prestera_trap->trap, 1,
+ sw);
+ if (err)
+ goto err_trap_register;
+ }
+
+ return 0;
+
+err_trap_register:
+ for (i--; i >= 0; i--) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ devlink_traps_unregister(devlink, &prestera_trap->trap, 1);
+ }
+err_groups_register:
+ kfree(trap_data->trap_items_arr);
+err_trap_items_alloc:
+ kfree(trap_data);
+ return err;
+}
+
+static struct prestera_trap_item *
+prestera_get_trap_item_by_cpu_code(struct prestera_switch *sw, u8 cpu_code)
+{
+ struct prestera_trap_data *trap_data = sw->trap_data;
+ struct prestera_trap *prestera_trap;
+ int i;
+
+ for (i = 0; i < trap_data->traps_count; i++) {
+ prestera_trap = &prestera_trap_items_arr[i];
+ if (cpu_code == prestera_trap->cpu_code)
+ return &trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+void prestera_devlink_trap_report(struct prestera_port *port,
+ struct sk_buff *skb, u8 cpu_code)
+{
+ struct prestera_trap_item *trap_item;
+ struct devlink *devlink;
+
+ devlink = port->dl_port.devlink;
+
+ trap_item = prestera_get_trap_item_by_cpu_code(port->sw, cpu_code);
+ if (unlikely(!trap_item))
+ return;
+
+ devlink_trap_report(devlink, skb, trap_item->trap_ctx,
+ &port->dl_port, NULL);
+}
+
+static struct prestera_trap_item *
+prestera_devlink_trap_item_lookup(struct prestera_switch *sw, u16 trap_id)
+{
+ struct prestera_trap_data *trap_data = sw->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); i++) {
+ if (prestera_trap_items_arr[i].trap.id == trap_id)
+ return &trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static int prestera_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap, void *trap_ctx)
+{
+ struct prestera_switch *sw = devlink_priv(devlink);
+ struct prestera_trap_item *trap_item;
+
+ trap_item = prestera_devlink_trap_item_lookup(sw, trap->id);
+ if (WARN_ON(!trap_item))
+ return -EINVAL;
+
+ trap_item->trap_ctx = trap_ctx;
+ trap_item->action = trap->init_action;
+
+ return 0;
+}
+
+static int prestera_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ /* Currently, driver does not support trap action altering */
+ return -EOPNOTSUPP;
+}
+
+static int prestera_drop_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops)
+{
+ struct prestera_switch *sw = devlink_priv(devlink);
+ enum prestera_hw_cpu_code_cnt_t cpu_code_type =
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP;
+ struct prestera_trap *prestera_trap =
+ container_of(trap, struct prestera_trap, trap);
+
+ return prestera_hw_cpu_code_counters_get(sw, prestera_trap->cpu_code,
+ cpu_code_type, p_drops);
+}
+
+static void prestera_devlink_traps_fini(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+ const struct devlink_trap *trap;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prestera_trap_items_arr); ++i) {
+ trap = &prestera_trap_items_arr[i].trap;
+ devlink_traps_unregister(dl, trap, 1);
+ }
+
+ devlink_trap_groups_unregister(dl, prestera_trap_groups_arr,
+ ARRAY_SIZE(prestera_trap_groups_arr));
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index 51bee9f75415..5d73aa9db897 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -20,4 +20,7 @@ void prestera_devlink_port_clear(struct prestera_port *port);
struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
+void prestera_devlink_trap_report(struct prestera_port *port,
+ struct sk_buff *skb, u8 cpu_code);
+
#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
index a5e01c7a307b..b7e89c0ca5c0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
@@ -19,6 +19,7 @@
#define PRESTERA_DSA_W1_EXT_BIT BIT(31)
#define PRESTERA_DSA_W1_CFI_BIT BIT(30)
#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10)
+#define PRESTERA_DSA_W1_MASK_CPU_CODE GENMASK(7, 0)
#define PRESTERA_DSA_W2_EXT_BIT BIT(31)
#define PRESTERA_DSA_W2_PORT_NUM BIT(20)
@@ -74,6 +75,8 @@ int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf)
(FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) |
(FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7);
+ dsa->cpu_code = FIELD_GET(PRESTERA_DSA_W1_MASK_CPU_CODE, words[1]);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
index 67018629bdd2..c99342f475cf 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
@@ -27,6 +27,7 @@ struct prestera_dsa {
struct prestera_dsa_vlan vlan;
u32 hw_dev_num;
u32 port_num;
+ u8 cpu_code;
};
int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.c b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
new file mode 100644
index 000000000000..c9891e968259
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_span.h"
+#include "prestera_flower.h"
+
+static LIST_HEAD(prestera_block_cb_list);
+
+static int prestera_flow_block_mall_cb(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ switch (f->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return prestera_span_replace(block, f);
+ case TC_CLSMATCHALL_DESTROY:
+ prestera_span_destroy(block);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_flow_block_flower_cb(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ if (f->common.chain_index != 0)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return prestera_flower_replace(block, f);
+ case FLOW_CLS_DESTROY:
+ prestera_flower_destroy(block, f);
+ return 0;
+ case FLOW_CLS_STATS:
+ return prestera_flower_stats(block, f);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_flow_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return prestera_flow_block_flower_cb(block, type_data);
+ case TC_SETUP_CLSMATCHALL:
+ return prestera_flow_block_mall_cb(block, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_flow_block_release(void *cb_priv)
+{
+ struct prestera_flow_block *block = cb_priv;
+
+ prestera_acl_block_destroy(block);
+}
+
+static struct prestera_flow_block *
+prestera_flow_block_get(struct prestera_switch *sw,
+ struct flow_block_offload *f,
+ bool *register_block)
+{
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ prestera_flow_block_cb, sw);
+ if (!block_cb) {
+ block = prestera_acl_block_create(sw, f->net);
+ if (!block)
+ return ERR_PTR(-ENOMEM);
+
+ block_cb = flow_block_cb_alloc(prestera_flow_block_cb,
+ sw, block,
+ prestera_flow_block_release);
+ if (IS_ERR(block_cb)) {
+ prestera_acl_block_destroy(block);
+ return ERR_CAST(block_cb);
+ }
+
+ block->block_cb = block_cb;
+ *register_block = true;
+ } else {
+ block = flow_block_cb_priv(block_cb);
+ *register_block = false;
+ }
+
+ flow_block_cb_incref(block_cb);
+
+ return block;
+}
+
+static void prestera_flow_block_put(struct prestera_flow_block *block)
+{
+ struct flow_block_cb *block_cb = block->block_cb;
+
+ if (flow_block_cb_decref(block_cb))
+ return;
+
+ flow_block_cb_free(block_cb);
+ prestera_acl_block_destroy(block);
+}
+
+static int prestera_setup_flow_block_bind(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+ bool register_block;
+ int err;
+
+ block = prestera_flow_block_get(sw, f, &register_block);
+ if (IS_ERR(block))
+ return PTR_ERR(block);
+
+ block_cb = block->block_cb;
+
+ err = prestera_acl_block_bind(block, port);
+ if (err)
+ goto err_block_bind;
+
+ if (register_block) {
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &prestera_block_cb_list);
+ }
+
+ port->flow_block = block;
+ return 0;
+
+err_block_bind:
+ prestera_flow_block_put(block);
+
+ return err;
+}
+
+static void prestera_setup_flow_block_unbind(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_flow_block *block;
+ struct flow_block_cb *block_cb;
+ int err;
+
+ block_cb = flow_block_cb_lookup(f->block, prestera_flow_block_cb, sw);
+ if (!block_cb)
+ return;
+
+ block = flow_block_cb_priv(block_cb);
+
+ prestera_span_destroy(block);
+
+ err = prestera_acl_block_unbind(block, port);
+ if (err)
+ goto error;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+error:
+ port->flow_block = NULL;
+}
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f)
+{
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ f->driver_block_list = &prestera_block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ return prestera_setup_flow_block_bind(port, f);
+ case FLOW_BLOCK_UNBIND:
+ prestera_setup_flow_block_unbind(port, f);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flow.h b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
new file mode 100644
index 000000000000..467c7038cace
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flow.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_FLOW_H_
+#define _PRESTERA_FLOW_H_
+
+#include <net/flow_offload.h>
+
+struct prestera_port;
+
+int prestera_flow_block_setup(struct prestera_port *port,
+ struct flow_block_offload *f);
+
+#endif /* _PRESTERA_FLOW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
new file mode 100644
index 000000000000..e571ba09ec08
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include "prestera.h"
+#include "prestera_acl.h"
+#include "prestera_flower.h"
+
+static int prestera_flower_parse_actions(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_acl_rule_action_entry a_entry;
+ const struct flow_action_entry *act;
+ int err, i;
+
+ if (!flow_action_has_entries(flow_action))
+ return 0;
+
+ flow_action_for_each(i, act, flow_action) {
+ memset(&a_entry, 0, sizeof(a_entry));
+
+ switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ a_entry.id = PRESTERA_ACL_RULE_ACTION_ACCEPT;
+ break;
+ case FLOW_ACTION_DROP:
+ a_entry.id = PRESTERA_ACL_RULE_ACTION_DROP;
+ break;
+ case FLOW_ACTION_TRAP:
+ a_entry.id = PRESTERA_ACL_RULE_ACTION_TRAP;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+ pr_err("Unsupported action\n");
+ return -EOPNOTSUPP;
+ }
+
+ err = prestera_acl_rule_action_add(rule, &a_entry);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
+ struct flow_cls_offload *f,
+ struct prestera_flow_block *block)
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct prestera_acl_rule_match_entry m_entry = {0};
+ struct net_device *ingress_dev;
+ struct flow_match_meta match;
+ struct prestera_port *port;
+
+ flow_rule_match_meta(f_rule, &match);
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Unsupported ingress ifindex mask");
+ return -EINVAL;
+ }
+
+ ingress_dev = __dev_get_by_index(prestera_acl_block_net(block),
+ match.key->ingress_ifindex);
+ if (!ingress_dev) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Can't find specified ingress port to match on");
+ return -EINVAL;
+ }
+
+ if (!prestera_netdev_check(ingress_dev)) {
+ NL_SET_ERR_MSG_MOD(f->common.extack,
+ "Can't match on switchdev ingress port");
+ return -EINVAL;
+ }
+ port = netdev_priv(ingress_dev);
+
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT;
+ m_entry.keymask.u64.key = port->hw_id | ((u64)port->dev_id << 32);
+ m_entry.keymask.u64.mask = ~(u64)0;
+
+ return prestera_acl_rule_match_add(rule, &m_entry);
+}
+
+static int prestera_flower_parse(struct prestera_flow_block *block,
+ struct prestera_acl_rule *rule,
+ struct flow_cls_offload *f)
+{
+ struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
+ struct flow_dissector *dissector = f_rule->match.dissector;
+ struct prestera_acl_rule_match_entry m_entry;
+ u16 n_proto_mask = 0;
+ u16 n_proto_key = 0;
+ u16 addr_type = 0;
+ u8 ip_proto = 0;
+ int err;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_META) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ICMP) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN))) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
+ return -EOPNOTSUPP;
+ }
+
+ prestera_acl_rule_priority_set(rule, f->common.prio);
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
+ err = prestera_flower_parse_meta(rule, f, block);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(f_rule, &match);
+ addr_type = match.key->addr_type;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(f_rule, &match);
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+
+ /* add eth type key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE;
+ m_entry.keymask.u16.key = n_proto_key;
+ m_entry.keymask.u16.mask = n_proto_mask;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ /* add ip proto key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO;
+ m_entry.keymask.u8.key = match.key->ip_proto;
+ m_entry.keymask.u8.mask = match.mask->ip_proto;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(f_rule, &match);
+
+ /* add ethernet dst key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC;
+ memcpy(&m_entry.keymask.mac.key,
+ &match.key->dst, sizeof(match.key->dst));
+ memcpy(&m_entry.keymask.mac.mask,
+ &match.mask->dst, sizeof(match.mask->dst));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ /* add ethernet src key,mask */
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC;
+ memcpy(&m_entry.keymask.mac.key,
+ &match.key->src, sizeof(match.key->src));
+ memcpy(&m_entry.keymask.mac.mask,
+ &match.mask->src, sizeof(match.mask->src));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(f_rule, &match);
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC;
+ memcpy(&m_entry.keymask.u32.key,
+ &match.key->src, sizeof(match.key->src));
+ memcpy(&m_entry.keymask.u32.mask,
+ &match.mask->src, sizeof(match.mask->src));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST;
+ memcpy(&m_entry.keymask.u32.key,
+ &match.key->dst, sizeof(match.key->dst));
+ memcpy(&m_entry.keymask.u32.mask,
+ &match.mask->dst, sizeof(match.mask->dst));
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
+ NL_SET_ERR_MSG_MOD
+ (f->common.extack,
+ "Only UDP and TCP keys are supported");
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports(f_rule, &match);
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC;
+ m_entry.keymask.u16.key = ntohs(match.key->src);
+ m_entry.keymask.u16.mask = ntohs(match.mask->src);
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST;
+ m_entry.keymask.u16.key = ntohs(match.key->dst);
+ m_entry.keymask.u16.mask = ntohs(match.mask->dst);
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(f_rule, &match);
+
+ if (match.mask->vlan_id != 0) {
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID;
+ m_entry.keymask.u16.key = match.key->vlan_id;
+ m_entry.keymask.u16.mask = match.mask->vlan_id;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID;
+ m_entry.keymask.u16.key = ntohs(match.key->vlan_tpid);
+ m_entry.keymask.u16.mask = ntohs(match.mask->vlan_tpid);
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
+ struct flow_match_icmp match;
+
+ flow_rule_match_icmp(f_rule, &match);
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE;
+ m_entry.keymask.u8.key = match.key->type;
+ m_entry.keymask.u8.mask = match.mask->type;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+
+ memset(&m_entry, 0, sizeof(m_entry));
+ m_entry.type = PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE;
+ m_entry.keymask.u8.key = match.key->code;
+ m_entry.keymask.u8.mask = match.mask->code;
+ err = prestera_acl_rule_match_add(rule, &m_entry);
+ if (err)
+ return err;
+ }
+
+ return prestera_flower_parse_actions(block, rule,
+ &f->rule->action,
+ f->common.extack);
+}
+
+int prestera_flower_replace(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_rule *rule;
+ int err;
+
+ rule = prestera_acl_rule_create(block, f->cookie);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
+
+ err = prestera_flower_parse(block, rule, f);
+ if (err)
+ goto err_flower_parse;
+
+ err = prestera_acl_rule_add(sw, rule);
+ if (err)
+ goto err_rule_add;
+
+ return 0;
+
+err_rule_add:
+err_flower_parse:
+ prestera_acl_rule_destroy(rule);
+ return err;
+}
+
+void prestera_flower_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_acl_rule *rule;
+ struct prestera_switch *sw;
+
+ rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
+ f->cookie);
+ if (rule) {
+ sw = prestera_acl_block_sw(block);
+ prestera_acl_rule_del(sw, rule);
+ prestera_acl_rule_destroy(rule);
+ }
+}
+
+int prestera_flower_stats(struct prestera_flow_block *block,
+ struct flow_cls_offload *f)
+{
+ struct prestera_switch *sw = prestera_acl_block_sw(block);
+ struct prestera_acl_rule *rule;
+ u64 packets;
+ u64 lastuse;
+ u64 bytes;
+ int err;
+
+ rule = prestera_acl_rule_lookup(prestera_acl_block_ruleset_get(block),
+ f->cookie);
+ if (!rule)
+ return -EINVAL;
+
+ err = prestera_acl_rule_get_stats(sw, rule, &packets, &bytes, &lastuse);
+ if (err)
+ return err;
+
+ flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.h b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
new file mode 100644
index 000000000000..91e045eec58b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_FLOWER_H_
+#define _PRESTERA_FLOWER_H_
+
+#include <net/pkt_cls.h>
+
+struct prestera_flow_block;
+
+int prestera_flower_replace(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+void prestera_flower_destroy(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+int prestera_flower_stats(struct prestera_flow_block *block,
+ struct flow_cls_offload *f);
+
+#endif /* _PRESTERA_FLOWER_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 0424718d5998..c1297859e471 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -2,11 +2,13 @@
/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/ethtool.h>
#include <linux/list.h>
#include "prestera.h"
#include "prestera_hw.h"
+#include "prestera_acl.h"
#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
@@ -36,11 +38,31 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
+ PRESTERA_CMD_TYPE_ACL_RULE_ADD = 0x500,
+ PRESTERA_CMD_TYPE_ACL_RULE_DELETE = 0x501,
+ PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET = 0x510,
+ PRESTERA_CMD_TYPE_ACL_RULESET_CREATE = 0x520,
+ PRESTERA_CMD_TYPE_ACL_RULESET_DELETE = 0x521,
+ PRESTERA_CMD_TYPE_ACL_PORT_BIND = 0x530,
+ PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531,
+
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE = 0x902,
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE = 0x903,
+
PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
+ PRESTERA_CMD_TYPE_SPAN_GET = 0x1100,
+ PRESTERA_CMD_TYPE_SPAN_BIND = 0x1101,
+ PRESTERA_CMD_TYPE_SPAN_UNBIND = 0x1102,
+ PRESTERA_CMD_TYPE_SPAN_RELEASE = 0x1103,
+
+ PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET = 0x2000,
+
PRESTERA_CMD_TYPE_ACK = 0x10000,
PRESTERA_CMD_TYPE_MAX
};
@@ -86,6 +108,11 @@ enum {
};
enum {
+ PRESTERA_PORT_FLOOD_TYPE_UC = 0,
+ PRESTERA_PORT_FLOOD_TYPE_MC = 1,
+};
+
+enum {
PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
@@ -127,6 +154,12 @@ enum {
PRESTERA_FC_SYMM_ASYMM,
};
+enum {
+ PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT = 0,
+ PRESTERA_HW_FDB_ENTRY_TYPE_LAG = 1,
+ PRESTERA_HW_FDB_ENTRY_TYPE_MAX = 2,
+};
+
struct prestera_fw_event_handler {
struct list_head list;
struct rcu_head rcu;
@@ -168,6 +201,8 @@ struct prestera_msg_switch_init_resp {
u32 port_count;
u32 mtu_max;
u8 switch_id;
+ u8 lag_max;
+ u8 lag_member_max;
};
struct prestera_msg_port_autoneg_param {
@@ -188,6 +223,11 @@ struct prestera_msg_port_mdix_param {
u8 admin_mode;
};
+struct prestera_msg_port_flood_param {
+ u8 type;
+ u8 enable;
+};
+
union prestera_msg_port_param {
u8 admin_state;
u8 oper_state;
@@ -205,6 +245,7 @@ union prestera_msg_port_param {
struct prestera_msg_port_mdix_param mdix;
struct prestera_msg_port_autoneg_param autoneg;
struct prestera_msg_port_cap_param cap;
+ struct prestera_msg_port_flood_param flood_ext;
};
struct prestera_msg_port_attr_req {
@@ -249,8 +290,13 @@ struct prestera_msg_vlan_req {
struct prestera_msg_fdb_req {
struct prestera_msg_cmd cmd;
u8 dest_type;
- u32 port;
- u32 dev;
+ union {
+ struct {
+ u32 port;
+ u32 dev;
+ };
+ u16 lag_id;
+ } dest;
u8 mac[ETH_ALEN];
u16 vid;
u8 dynamic;
@@ -269,6 +315,85 @@ struct prestera_msg_bridge_resp {
u16 bridge;
};
+struct prestera_msg_acl_action {
+ u32 id;
+};
+
+struct prestera_msg_acl_match {
+ u32 type;
+ union {
+ struct {
+ u8 key;
+ u8 mask;
+ } u8;
+ struct {
+ u16 key;
+ u16 mask;
+ } u16;
+ struct {
+ u32 key;
+ u32 mask;
+ } u32;
+ struct {
+ u64 key;
+ u64 mask;
+ } u64;
+ struct {
+ u8 key[ETH_ALEN];
+ u8 mask[ETH_ALEN];
+ } mac;
+ } __packed keymask;
+};
+
+struct prestera_msg_acl_rule_req {
+ struct prestera_msg_cmd cmd;
+ u32 id;
+ u32 priority;
+ u16 ruleset_id;
+ u8 n_actions;
+ u8 n_matches;
+};
+
+struct prestera_msg_acl_rule_resp {
+ struct prestera_msg_ret ret;
+ u32 id;
+};
+
+struct prestera_msg_acl_rule_stats_resp {
+ struct prestera_msg_ret ret;
+ u64 packets;
+ u64 bytes;
+};
+
+struct prestera_msg_acl_ruleset_bind_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 ruleset_id;
+};
+
+struct prestera_msg_acl_ruleset_req {
+ struct prestera_msg_cmd cmd;
+ u16 id;
+};
+
+struct prestera_msg_acl_ruleset_resp {
+ struct prestera_msg_ret ret;
+ u16 id;
+};
+
+struct prestera_msg_span_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u8 id;
+} __packed __aligned(4);
+
+struct prestera_msg_span_resp {
+ struct prestera_msg_ret ret;
+ u8 id;
+} __packed __aligned(4);
+
struct prestera_msg_stp_req {
struct prestera_msg_cmd cmd;
u32 port;
@@ -293,6 +418,24 @@ struct prestera_msg_rxtx_port_req {
u32 dev;
};
+struct prestera_msg_lag_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 lag_id;
+};
+
+struct prestera_msg_cpu_code_counter_req {
+ struct prestera_msg_cmd cmd;
+ u8 counter_type;
+ u8 code;
+};
+
+struct mvsw_msg_cpu_code_counter_ret {
+ struct prestera_msg_ret ret;
+ u64 packet_count;
+};
+
struct prestera_msg_event {
u16 type;
u16 id;
@@ -315,7 +458,10 @@ union prestera_msg_event_fdb_param {
struct prestera_msg_event_fdb {
struct prestera_msg_event id;
u8 dest_type;
- u32 port_id;
+ union {
+ u32 port_id;
+ u16 lag_id;
+ } dest;
u32 vid;
union prestera_msg_event_fdb_param param;
};
@@ -386,7 +532,19 @@ static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
{
struct prestera_msg_event_fdb *hw_evt = msg;
- evt->fdb_evt.port_id = hw_evt->port_id;
+ switch (hw_evt->dest_type) {
+ case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT:
+ evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT;
+ evt->fdb_evt.dest.port_id = hw_evt->dest.port_id;
+ break;
+ case PRESTERA_HW_FDB_ENTRY_TYPE_LAG:
+ evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG;
+ evt->fdb_evt.dest.lag_id = hw_evt->dest.lag_id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
evt->fdb_evt.vid = hw_evt->vid;
ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
@@ -531,6 +689,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->mtu_min = PRESTERA_MIN_MTU;
sw->mtu_max = resp.mtu_max;
sw->id = resp.switch_id;
+ sw->lag_member_max = resp.lag_member_max;
+ sw->lag_max = resp.lag_max;
return 0;
}
@@ -696,6 +856,274 @@ int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
return 0;
}
+int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_resp resp;
+ struct prestera_msg_acl_ruleset_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULESET_CREATE,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *ruleset_id = resp.id;
+
+ return 0;
+}
+
+int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_req req = {
+ .id = ruleset_id,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action,
+ struct prestera_acl_rule *rule)
+{
+ struct list_head *a_list = prestera_acl_rule_action_list_get(rule);
+ struct prestera_acl_rule_action_entry *a_entry;
+ int i = 0;
+
+ list_for_each_entry(a_entry, a_list, list) {
+ action[i].id = a_entry->id;
+
+ switch (a_entry->id) {
+ case PRESTERA_ACL_RULE_ACTION_ACCEPT:
+ case PRESTERA_ACL_RULE_ACTION_DROP:
+ case PRESTERA_ACL_RULE_ACTION_TRAP:
+ /* just rule action id, no specific data */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
+ struct prestera_acl_rule *rule)
+{
+ struct list_head *m_list = prestera_acl_rule_match_list_get(rule);
+ struct prestera_acl_rule_match_entry *m_entry;
+ int i = 0;
+
+ list_for_each_entry(m_entry, m_list, list) {
+ match[i].type = m_entry->type;
+
+ switch (m_entry->type) {
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_SRC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID:
+ match[i].keymask.u16.key = m_entry->keymask.u16.key;
+ match[i].keymask.u16.mask = m_entry->keymask.u16.mask;
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_PROTO:
+ match[i].keymask.u8.key = m_entry->keymask.u8.key;
+ match[i].keymask.u8.mask = m_entry->keymask.u8.mask;
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_SMAC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_DMAC:
+ memcpy(match[i].keymask.mac.key,
+ m_entry->keymask.mac.key,
+ sizeof(match[i].keymask.mac.key));
+ memcpy(match[i].keymask.mac.mask,
+ m_entry->keymask.mac.mask,
+ sizeof(match[i].keymask.mac.mask));
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_SRC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC:
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST:
+ match[i].keymask.u32.key = m_entry->keymask.u32.key;
+ match[i].keymask.u32.mask = m_entry->keymask.u32.mask;
+ break;
+ case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT:
+ match[i].keymask.u64.key = m_entry->keymask.u64.key;
+ match[i].keymask.u64.mask = m_entry->keymask.u64.mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ i++;
+ }
+
+ return 0;
+}
+
+int prestera_hw_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u32 *rule_id)
+{
+ struct prestera_msg_acl_action *actions;
+ struct prestera_msg_acl_match *matches;
+ struct prestera_msg_acl_rule_resp resp;
+ struct prestera_msg_acl_rule_req *req;
+ u8 n_actions;
+ u8 n_matches;
+ void *buff;
+ u32 size;
+ int err;
+
+ n_actions = prestera_acl_rule_action_len(rule);
+ n_matches = prestera_acl_rule_match_len(rule);
+
+ size = sizeof(*req) + sizeof(*actions) * n_actions +
+ sizeof(*matches) * n_matches;
+
+ buff = kzalloc(size, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ req = buff;
+ actions = buff + sizeof(*req);
+ matches = buff + sizeof(*req) + sizeof(*actions) * n_actions;
+
+ /* put acl actions into the message */
+ err = prestera_hw_acl_actions_put(actions, rule);
+ if (err)
+ goto free_buff;
+
+ /* put acl matches into the message */
+ err = prestera_hw_acl_matches_put(matches, rule);
+ if (err)
+ goto free_buff;
+
+ req->ruleset_id = prestera_acl_rule_ruleset_id_get(rule);
+ req->priority = prestera_acl_rule_priority_get(rule);
+ req->n_actions = prestera_acl_rule_action_len(rule);
+ req->n_matches = prestera_acl_rule_match_len(rule);
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_ADD,
+ &req->cmd, size, &resp.ret, sizeof(resp));
+ if (err)
+ goto free_buff;
+
+ *rule_id = resp.id;
+free_buff:
+ kfree(buff);
+ return err;
+}
+
+int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id)
+{
+ struct prestera_msg_acl_rule_req req = {
+ .id = rule_id
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
+ u64 *packets, u64 *bytes)
+{
+ struct prestera_msg_acl_rule_stats_resp resp;
+ struct prestera_msg_acl_rule_req req = {
+ .id = rule_id
+ };
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ACL_RULE_STATS_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *packets = resp.packets;
+ *bytes = resp.bytes;
+
+ return 0;
+}
+
+int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_bind_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .ruleset_id = ruleset_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_acl_port_unbind(const struct prestera_port *port,
+ u16 ruleset_id)
+{
+ struct prestera_msg_acl_ruleset_bind_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .ruleset_id = ruleset_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
+{
+ struct prestera_msg_span_resp resp;
+ struct prestera_msg_span_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_SPAN_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *span_id = resp.id;
+
+ return 0;
+}
+
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
+{
+ struct prestera_msg_span_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .id = span_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_BIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_unbind(const struct prestera_port *port)
+{
+ struct prestera_msg_span_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
+{
+ struct prestera_msg_span_req req = {
+ .id = span_id
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SPAN_RELEASE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
{
struct prestera_msg_port_attr_req req = {
@@ -988,7 +1416,43 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
&req.cmd, sizeof(req));
}
-int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
+static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood_ext = {
+ .type = PRESTERA_PORT_FLOOD_TYPE_UC,
+ .enable = flood,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood_ext = {
+ .type = PRESTERA_PORT_FLOOD_TYPE_MC,
+ .enable = flood,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
.attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
@@ -1003,6 +1467,41 @@ int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
&req.cmd, sizeof(req));
}
+int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
+ unsigned long val)
+{
+ int err;
+
+ if (port->sw->dev->fw_rev.maj <= 2) {
+ if (!(mask & BR_FLOOD))
+ return 0;
+
+ return prestera_hw_port_flood_set_v2(port, val & BR_FLOOD);
+ }
+
+ if (mask & BR_FLOOD) {
+ err = prestera_hw_port_uc_flood_set(port, val & BR_FLOOD);
+ if (err)
+ goto err_uc_flood;
+ }
+
+ if (mask & BR_MCAST_FLOOD) {
+ err = prestera_hw_port_mc_flood_set(port, val & BR_MCAST_FLOOD);
+ if (err)
+ goto err_mc_flood;
+ }
+
+ return 0;
+
+err_mc_flood:
+ prestera_hw_port_mc_flood_set(port, 0);
+err_uc_flood:
+ if (mask & BR_FLOOD)
+ prestera_hw_port_uc_flood_set(port, 0);
+
+ return err;
+}
+
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
@@ -1067,8 +1566,10 @@ int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
u16 vid, bool dynamic)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.vid = vid,
.dynamic = dynamic,
};
@@ -1083,8 +1584,10 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
u16 vid)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.vid = vid,
};
@@ -1094,11 +1597,48 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
&req.cmd, sizeof(req));
}
+int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .vid = vid,
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .vid = vid,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.flush_mode = mode,
};
@@ -1121,8 +1661,10 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
u32 mode)
{
struct prestera_msg_fdb_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .dest = {
+ .dev = port->dev_id,
+ .port = port->hw_id,
+ },
.vid = vid,
.flush_mode = mode,
};
@@ -1131,6 +1673,37 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
&req.cmd, sizeof(req));
}
+int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
+ u16 lag_id, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
+ .dest = {
+ .lag_id = lag_id,
+ },
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
{
struct prestera_msg_bridge_resp resp;
@@ -1212,6 +1785,68 @@ int prestera_hw_rxtx_port_init(struct prestera_port *port)
&req.cmd, sizeof(req));
}
+int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
+{
+ struct prestera_msg_lag_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .lag_id = lag_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id)
+{
+ struct prestera_msg_lag_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .lag_id = lag_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
+ bool enable)
+{
+ struct prestera_msg_lag_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .lag_id = lag_id,
+ };
+ u32 cmd;
+
+ cmd = enable ? PRESTERA_CMD_TYPE_LAG_MEMBER_ENABLE :
+ PRESTERA_CMD_TYPE_LAG_MEMBER_DISABLE;
+
+ return prestera_cmd(port->sw, cmd, &req.cmd, sizeof(req));
+}
+
+int
+prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
+ enum prestera_hw_cpu_code_cnt_t counter_type,
+ u64 *packet_count)
+{
+ struct prestera_msg_cpu_code_counter_req req = {
+ .counter_type = counter_type,
+ .code = code,
+ };
+ struct mvsw_msg_cpu_code_counter_ret resp;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_CPU_CODE_COUNTERS_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *packet_count = resp.packet_count;
+
+ return 0;
+}
+
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
prestera_event_cb_t fn,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index b2b5ac95b4e3..546d5fd8240d 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -89,12 +89,18 @@ enum {
PRESTERA_STP_FORWARD,
};
+enum prestera_hw_cpu_code_cnt_t {
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_DROP = 0,
+ PRESTERA_HW_CPU_CODE_CNT_TYPE_TRAP = 1,
+};
+
struct prestera_switch;
struct prestera_port;
struct prestera_port_stats;
struct prestera_port_caps;
enum prestera_event_type;
struct prestera_event;
+struct prestera_acl_rule;
typedef void (*prestera_event_cb_t)
(struct prestera_switch *sw, struct prestera_event *evt, void *arg);
@@ -138,7 +144,8 @@ int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
-int prestera_hw_port_flood_set(struct prestera_port *port, bool flood);
+int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
+ unsigned long val);
int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type);
/* Vlan API */
@@ -165,6 +172,28 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
+/* ACL API */
+int prestera_hw_acl_ruleset_create(struct prestera_switch *sw,
+ u16 *ruleset_id);
+int prestera_hw_acl_ruleset_del(struct prestera_switch *sw,
+ u16 ruleset_id);
+int prestera_hw_acl_rule_add(struct prestera_switch *sw,
+ struct prestera_acl_rule *rule,
+ u32 *rule_id);
+int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id);
+int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw,
+ u32 rule_id, u64 *packets, u64 *bytes);
+int prestera_hw_acl_port_bind(const struct prestera_port *port,
+ u16 ruleset_id);
+int prestera_hw_acl_port_unbind(const struct prestera_port *port,
+ u16 ruleset_id);
+
+/* SPAN API */
+int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id);
+int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id);
+int prestera_hw_span_unbind(const struct prestera_port *port);
+int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id);
+
/* Event handlers */
int prestera_hw_event_handler_register(struct prestera_switch *sw,
enum prestera_event_type type,
@@ -179,4 +208,24 @@ int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params);
int prestera_hw_rxtx_port_init(struct prestera_port *port);
+/* LAG API */
+int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id);
+int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id);
+int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
+ bool enable);
+int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid, bool dynamic);
+int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
+ const unsigned char *mac, u16 vid);
+int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
+ u32 mode);
+int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
+ u16 lag_id, u16 vid, u32 mode);
+
+/* HW trap/drop counters API */
+int
+prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
+ enum prestera_hw_cpu_code_cnt_t counter_type,
+ u64 *packet_count);
+
#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 2768c78528a5..226f4ff29f6e 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -8,9 +8,13 @@
#include <linux/netdev_features.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/if_vlan.h>
#include "prestera.h"
#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_flow.h"
+#include "prestera_span.h"
#include "prestera_rxtx.h"
#include "prestera_devlink.h"
#include "prestera_ethtool.h"
@@ -199,10 +203,25 @@ static void prestera_port_stats_update(struct work_struct *work)
msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
}
+static int prestera_port_setup_tc(struct net_device *dev,
+ enum tc_setup_type type,
+ void *type_data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return prestera_flow_block_setup(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops prestera_netdev_ops = {
.ndo_open = prestera_port_open,
.ndo_stop = prestera_port_close,
.ndo_start_xmit = prestera_port_xmit,
+ .ndo_setup_tc = prestera_port_setup_tc,
.ndo_change_mtu = prestera_port_change_mtu,
.ndo_get_stats64 = prestera_port_get_stats64,
.ndo_set_mac_address = prestera_port_set_mac_address,
@@ -281,6 +300,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
INIT_LIST_HEAD(&port->vlans_list);
port->pvid = PRESTERA_DEFAULT_VID;
+ port->lag = NULL;
port->dev = dev;
port->id = id;
port->sw = sw;
@@ -296,7 +316,7 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
if (err)
goto err_dl_port_register;
- dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
dev->netdev_ops = &prestera_netdev_ops;
dev->ethtool_ops = &prestera_ethtool_ops;
@@ -472,6 +492,149 @@ static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
return prestera_hw_switch_mac_set(sw, sw->base_mac);
}
+struct prestera_lag *prestera_lag_by_id(struct prestera_switch *sw, u16 id)
+{
+ return id < sw->lag_max ? &sw->lags[id] : NULL;
+}
+
+static struct prestera_lag *prestera_lag_by_dev(struct prestera_switch *sw,
+ struct net_device *dev)
+{
+ struct prestera_lag *lag;
+ u16 id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = &sw->lags[id];
+ if (lag->dev == dev)
+ return lag;
+ }
+
+ return NULL;
+}
+
+static struct prestera_lag *prestera_lag_create(struct prestera_switch *sw,
+ struct net_device *lag_dev)
+{
+ struct prestera_lag *lag = NULL;
+ u16 id;
+
+ for (id = 0; id < sw->lag_max; id++) {
+ lag = &sw->lags[id];
+ if (!lag->dev)
+ break;
+ }
+ if (lag) {
+ INIT_LIST_HEAD(&lag->members);
+ lag->dev = lag_dev;
+ }
+
+ return lag;
+}
+
+static void prestera_lag_destroy(struct prestera_switch *sw,
+ struct prestera_lag *lag)
+{
+ WARN_ON(!list_empty(&lag->members));
+ lag->member_count = 0;
+ lag->dev = NULL;
+}
+
+static int prestera_lag_port_add(struct prestera_port *port,
+ struct net_device *lag_dev)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_lag *lag;
+ int err;
+
+ lag = prestera_lag_by_dev(sw, lag_dev);
+ if (!lag) {
+ lag = prestera_lag_create(sw, lag_dev);
+ if (!lag)
+ return -ENOSPC;
+ }
+
+ if (lag->member_count >= sw->lag_member_max)
+ return -ENOSPC;
+
+ err = prestera_hw_lag_member_add(port, lag->lag_id);
+ if (err) {
+ if (!lag->member_count)
+ prestera_lag_destroy(sw, lag);
+ return err;
+ }
+
+ list_add(&port->lag_member, &lag->members);
+ lag->member_count++;
+ port->lag = lag;
+
+ return 0;
+}
+
+static int prestera_lag_port_del(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_lag *lag = port->lag;
+ int err;
+
+ if (!lag || !lag->member_count)
+ return -EINVAL;
+
+ err = prestera_hw_lag_member_del(port, lag->lag_id);
+ if (err)
+ return err;
+
+ list_del(&port->lag_member);
+ lag->member_count--;
+ port->lag = NULL;
+
+ if (netif_is_bridge_port(lag->dev)) {
+ struct net_device *br_dev;
+
+ br_dev = netdev_master_upper_dev_get(lag->dev);
+
+ prestera_bridge_port_leave(br_dev, port);
+ }
+
+ if (!lag->member_count)
+ prestera_lag_destroy(sw, lag);
+
+ return 0;
+}
+
+bool prestera_port_is_lag_member(const struct prestera_port *port)
+{
+ return !!port->lag;
+}
+
+u16 prestera_port_lag_id(const struct prestera_port *port)
+{
+ return port->lag->lag_id;
+}
+
+static int prestera_lag_init(struct prestera_switch *sw)
+{
+ u16 id;
+
+ sw->lags = kcalloc(sw->lag_max, sizeof(*sw->lags), GFP_KERNEL);
+ if (!sw->lags)
+ return -ENOMEM;
+
+ for (id = 0; id < sw->lag_max; id++)
+ sw->lags[id].lag_id = id;
+
+ return 0;
+}
+
+static void prestera_lag_fini(struct prestera_switch *sw)
+{
+ u8 idx;
+
+ for (idx = 0; idx < sw->lag_max; idx++)
+ WARN_ON(sw->lags[idx].member_count);
+
+ kfree(sw->lags);
+}
+
bool prestera_netdev_check(const struct net_device *dev)
{
return dev->netdev_ops == &prestera_netdev_ops;
@@ -505,16 +668,119 @@ struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
return port;
}
-static int prestera_netdev_port_event(struct net_device *dev,
+static int prestera_netdev_port_lower_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_changelowerstate_info *info = ptr;
+ struct netdev_lag_lower_state_info *lower_state_info;
+ struct prestera_port *port = netdev_priv(dev);
+ bool enabled;
+
+ if (!netif_is_lag_port(dev))
+ return 0;
+ if (!prestera_port_is_lag_member(port))
+ return 0;
+
+ lower_state_info = info->lower_state_info;
+ enabled = lower_state_info->link_up && lower_state_info->tx_enabled;
+
+ return prestera_hw_lag_member_enable(port, port->lag->lag_id, enabled);
+}
+
+static bool prestera_lag_master_check(struct net_device *lag_dev,
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *ext_ack)
+{
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(ext_ack, "Unsupported LAG Tx type");
+ return false;
+ }
+
+ return true;
+}
+
+static int prestera_netdev_port_event(struct net_device *lower,
+ struct net_device *dev,
unsigned long event, void *ptr)
{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct prestera_port *port = netdev_priv(dev);
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ upper = info->upper_dev;
+
switch (event) {
case NETDEV_PRECHANGEUPPER:
+ if (!netif_is_bridge_master(upper) &&
+ !netif_is_lag_master(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+
+ if (!info->linking)
+ break;
+
+ if (netdev_has_any_upper_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
+ return -EINVAL;
+ }
+
+ if (netif_is_lag_master(upper) &&
+ !prestera_lag_master_check(upper, info->upper_info, extack))
+ return -EOPNOTSUPP;
+ if (netif_is_lag_master(upper) && vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Master device is a LAG master and port has a VLAN");
+ return -EINVAL;
+ }
+ if (netif_is_lag_port(dev) && is_vlan_dev(upper) &&
+ !netif_is_lag_master(vlan_dev_real_dev(upper))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can not put a VLAN on a LAG port");
+ return -EINVAL;
+ }
+ break;
+
case NETDEV_CHANGEUPPER:
- return prestera_bridge_port_event(dev, event, ptr);
- default:
- return 0;
+ if (netif_is_bridge_master(upper)) {
+ if (info->linking)
+ return prestera_bridge_port_join(upper, port);
+ else
+ prestera_bridge_port_leave(upper, port);
+ } else if (netif_is_lag_master(upper)) {
+ if (info->linking)
+ return prestera_lag_port_add(port, upper);
+ else
+ prestera_lag_port_del(port);
+ }
+ break;
+
+ case NETDEV_CHANGELOWERSTATE:
+ return prestera_netdev_port_lower_event(dev, event, ptr);
}
+
+ return 0;
+}
+
+static int prestera_netdevice_lag_event(struct net_device *lag_dev,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
+ if (prestera_netdev_check(dev)) {
+ err = prestera_netdev_port_event(lag_dev, dev, event,
+ ptr);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
}
static int prestera_netdev_event_handler(struct notifier_block *nb,
@@ -524,7 +790,9 @@ static int prestera_netdev_event_handler(struct notifier_block *nb,
int err = 0;
if (prestera_netdev_check(dev))
- err = prestera_netdev_port_event(dev, event, ptr);
+ err = prestera_netdev_port_event(dev, dev, event, ptr);
+ else if (netif_is_lag_master(dev))
+ err = prestera_netdevice_lag_event(dev, event, ptr);
return notifier_from_errno(err);
}
@@ -574,10 +842,22 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_handlers_register;
+ err = prestera_acl_init(sw);
+ if (err)
+ goto err_acl_init;
+
+ err = prestera_span_init(sw);
+ if (err)
+ goto err_span_init;
+
err = prestera_devlink_register(sw);
if (err)
goto err_dl_register;
+ err = prestera_lag_init(sw);
+ if (err)
+ goto err_lag_init;
+
err = prestera_create_ports(sw);
if (err)
goto err_ports_create;
@@ -585,8 +865,14 @@ static int prestera_switch_init(struct prestera_switch *sw)
return 0;
err_ports_create:
+ prestera_lag_fini(sw);
+err_lag_init:
prestera_devlink_unregister(sw);
err_dl_register:
+ prestera_span_fini(sw);
+err_span_init:
+ prestera_acl_fini(sw);
+err_acl_init:
prestera_event_handlers_unregister(sw);
err_handlers_register:
prestera_rxtx_switch_fini(sw);
@@ -602,7 +888,10 @@ err_swdev_register:
static void prestera_switch_fini(struct prestera_switch *sw)
{
prestera_destroy_ports(sw);
+ prestera_lag_fini(sw);
prestera_devlink_unregister(sw);
+ prestera_span_fini(sw);
+ prestera_acl_fini(sw);
prestera_event_handlers_unregister(sw);
prestera_rxtx_switch_fini(sw);
prestera_switchdev_fini(sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index 298110119272..a250d394da38 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/device.h>
#include <linux/firmware.h>
@@ -13,9 +14,12 @@
#define PRESTERA_MSG_MAX_SIZE 1500
-#define PRESTERA_SUPP_FW_MAJ_VER 2
+#define PRESTERA_SUPP_FW_MAJ_VER 3
#define PRESTERA_SUPP_FW_MIN_VER 0
+#define PRESTERA_PREV_FW_MAJ_VER 2
+#define PRESTERA_PREV_FW_MIN_VER 0
+
#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
@@ -144,6 +148,11 @@ struct prestera_fw_regs {
/* PRESTERA_CMD_RCV_CTL_REG flags */
#define PRESTERA_CMD_F_REPL_SENT BIT(0)
+#define PRESTERA_FW_EVT_CTL_STATUS_MASK GENMASK(1, 0)
+
+#define PRESTERA_FW_EVT_CTL_STATUS_ON 0
+#define PRESTERA_FW_EVT_CTL_STATUS_OFF 1
+
#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
(PRESTERA_FW_REG_OFFSET(evtq_list) + \
(q) * sizeof(struct prestera_fw_evtq_regs) + \
@@ -166,6 +175,8 @@ struct prestera_fw_evtq {
};
struct prestera_fw {
+ struct prestera_fw_rev rev_supp;
+ const struct firmware *bin;
struct workqueue_struct *wq;
struct prestera_device dev;
u8 __iomem *ldr_regs;
@@ -260,6 +271,15 @@ static u8 prestera_fw_evtq_pick(struct prestera_fw *fw)
return PRESTERA_EVT_QNUM_MAX;
}
+static void prestera_fw_evt_ctl_status_set(struct prestera_fw *fw, u32 val)
+{
+ u32 status = prestera_fw_read(fw, PRESTERA_FW_STATUS_REG);
+
+ u32p_replace_bits(&status, val, PRESTERA_FW_EVT_CTL_STATUS_MASK);
+
+ prestera_fw_write(fw, PRESTERA_FW_STATUS_REG, status);
+}
+
static void prestera_fw_evt_work_fn(struct work_struct *work)
{
struct prestera_fw *fw;
@@ -269,6 +289,8 @@ static void prestera_fw_evt_work_fn(struct work_struct *work)
fw = container_of(work, struct prestera_fw, evt_work);
msg = fw->evt_msg;
+ prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_OFF);
+
while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
u32 idx;
u32 len;
@@ -288,6 +310,8 @@ static void prestera_fw_evt_work_fn(struct work_struct *work)
if (fw->dev.recv_msg)
fw->dev.recv_msg(&fw->dev, msg, len);
}
+
+ prestera_fw_evt_ctl_status_set(fw, PRESTERA_FW_EVT_CTL_STATUS_ON);
}
static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
@@ -576,25 +600,24 @@ static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr,
static int prestera_fw_rev_check(struct prestera_fw *fw)
{
struct prestera_fw_rev *rev = &fw->dev.fw_rev;
- u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER;
- u16 min_supp = PRESTERA_SUPP_FW_MIN_VER;
- if (rev->maj == maj_supp && rev->min >= min_supp)
+ if (rev->maj == fw->rev_supp.maj && rev->min >= fw->rev_supp.min)
return 0;
dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'",
- PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+ fw->rev_supp.maj, fw->rev_supp.min);
return -EINVAL;
}
-static int prestera_fw_hdr_parse(struct prestera_fw *fw,
- const struct firmware *img)
+static int prestera_fw_hdr_parse(struct prestera_fw *fw)
{
- struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data;
struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ struct prestera_fw_header *hdr;
u32 magic;
+ hdr = (struct prestera_fw_header *)fw->bin->data;
+
magic = be32_to_cpu(hdr->magic_number);
if (magic != PRESTERA_FW_HDR_MAGIC) {
dev_err(fw->dev.dev, "FW img hdr magic is invalid");
@@ -609,11 +632,52 @@ static int prestera_fw_hdr_parse(struct prestera_fw *fw,
return prestera_fw_rev_check(fw);
}
+static int prestera_fw_get(struct prestera_fw *fw)
+{
+ int ver_maj = PRESTERA_SUPP_FW_MAJ_VER;
+ int ver_min = PRESTERA_SUPP_FW_MIN_VER;
+ char fw_path[128];
+ int err;
+
+pick_fw_ver:
+ snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ ver_maj, ver_min);
+
+ err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev);
+ if (err) {
+ if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) {
+ ver_maj = PRESTERA_PREV_FW_MAJ_VER;
+ ver_min = PRESTERA_PREV_FW_MIN_VER;
+
+ dev_warn(fw->dev.dev,
+ "missing latest %s firmware, fall-back to previous %u.%u version\n",
+ fw_path, ver_maj, ver_min);
+
+ goto pick_fw_ver;
+ } else {
+ dev_err(fw->dev.dev, "failed to request previous firmware: %s\n",
+ fw_path);
+ return err;
+ }
+ }
+
+ dev_info(fw->dev.dev, "Loading %s ...", fw_path);
+
+ fw->rev_supp.maj = ver_maj;
+ fw->rev_supp.min = ver_min;
+ fw->rev_supp.sub = 0;
+
+ return 0;
+}
+
+static void prestera_fw_put(struct prestera_fw *fw)
+{
+ release_firmware(fw->bin);
+}
+
static int prestera_fw_load(struct prestera_fw *fw)
{
size_t hlen = sizeof(struct prestera_fw_header);
- const struct firmware *f;
- char fw_path[128];
int err;
err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG,
@@ -632,30 +696,24 @@ static int prestera_fw_load(struct prestera_fw *fw)
fw->ldr_wr_idx = 0;
- snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
- PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
-
- err = request_firmware_direct(&f, fw_path, fw->dev.dev);
- if (err) {
- dev_err(fw->dev.dev, "failed to request firmware file\n");
+ err = prestera_fw_get(fw);
+ if (err)
return err;
- }
- err = prestera_fw_hdr_parse(fw, f);
+ err = prestera_fw_hdr_parse(fw);
if (err) {
dev_err(fw->dev.dev, "FW image header is invalid\n");
goto out_release;
}
- prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen);
+ prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, fw->bin->size - hlen);
prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START);
- dev_info(fw->dev.dev, "Loading %s ...", fw_path);
-
- err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen);
+ err = prestera_ldr_fw_send(fw, fw->bin->data + hlen,
+ fw->bin->size - hlen);
out_release:
- release_firmware(f);
+ prestera_fw_put(fw);
return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index 2a13c318048c..73d2eba5262f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -14,6 +14,7 @@
#include "prestera.h"
#include "prestera_hw.h"
#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
#define PRESTERA_SDMA_WAIT_MUL 10
@@ -214,9 +215,10 @@ static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
struct sk_buff *skb)
{
- const struct prestera_port *port;
+ struct prestera_port *port;
struct prestera_dsa dsa;
u32 hw_port, dev_id;
+ u8 cpu_code;
int err;
skb_pull(skb, ETH_HLEN);
@@ -259,6 +261,9 @@ static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
}
+ cpu_code = dsa.cpu_code;
+ prestera_devlink_trap_report(port, skb, cpu_code);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.c b/drivers/net/ethernet/marvell/prestera/prestera_span.c
new file mode 100644
index 000000000000..3cafca827bb7
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_acl.h"
+#include "prestera_span.h"
+
+struct prestera_span_entry {
+ struct list_head list;
+ struct prestera_port *port;
+ refcount_t ref_count;
+ u8 id;
+};
+
+struct prestera_span {
+ struct prestera_switch *sw;
+ struct list_head entries;
+};
+
+static struct prestera_span_entry *
+prestera_span_entry_create(struct prestera_port *port, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ refcount_set(&entry->ref_count, 1);
+ entry->port = port;
+ entry->id = span_id;
+ list_add_tail(&entry->list, &port->sw->span->entries);
+
+ return entry;
+}
+
+static void prestera_span_entry_del(struct prestera_span_entry *entry)
+{
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+static struct prestera_span_entry *
+prestera_span_entry_find_by_id(struct prestera_span *span, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+
+ list_for_each_entry(entry, &span->entries, list) {
+ if (entry->id == span_id)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct prestera_span_entry *
+prestera_span_entry_find_by_port(struct prestera_span *span,
+ struct prestera_port *port)
+{
+ struct prestera_span_entry *entry;
+
+ list_for_each_entry(entry, &span->entries, list) {
+ if (entry->port == port)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static int prestera_span_get(struct prestera_port *port, u8 *span_id)
+{
+ u8 new_span_id;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_span_entry *entry;
+ int err;
+
+ entry = prestera_span_entry_find_by_port(sw->span, port);
+ if (entry) {
+ refcount_inc(&entry->ref_count);
+ *span_id = entry->id;
+ return 0;
+ }
+
+ err = prestera_hw_span_get(port, &new_span_id);
+ if (err)
+ return err;
+
+ entry = prestera_span_entry_create(port, new_span_id);
+ if (IS_ERR(entry)) {
+ prestera_hw_span_release(sw, new_span_id);
+ return PTR_ERR(entry);
+ }
+
+ *span_id = new_span_id;
+ return 0;
+}
+
+static int prestera_span_put(struct prestera_switch *sw, u8 span_id)
+{
+ struct prestera_span_entry *entry;
+ int err;
+
+ entry = prestera_span_entry_find_by_id(sw->span, span_id);
+ if (!entry)
+ return false;
+
+ if (!refcount_dec_and_test(&entry->ref_count))
+ return 0;
+
+ err = prestera_hw_span_release(sw, span_id);
+ if (err)
+ return err;
+
+ prestera_span_entry_del(entry);
+ return 0;
+}
+
+static int prestera_span_rule_add(struct prestera_flow_block_binding *binding,
+ struct prestera_port *to_port)
+{
+ struct prestera_switch *sw = binding->port->sw;
+ u8 span_id;
+ int err;
+
+ if (binding->span_id != PRESTERA_SPAN_INVALID_ID)
+ /* port already in mirroring */
+ return -EEXIST;
+
+ err = prestera_span_get(to_port, &span_id);
+ if (err)
+ return err;
+
+ err = prestera_hw_span_bind(binding->port, span_id);
+ if (err) {
+ prestera_span_put(sw, span_id);
+ return err;
+ }
+
+ binding->span_id = span_id;
+ return 0;
+}
+
+static int prestera_span_rule_del(struct prestera_flow_block_binding *binding)
+{
+ int err;
+
+ err = prestera_hw_span_unbind(binding->port);
+ if (err)
+ return err;
+
+ err = prestera_span_put(binding->port->sw, binding->span_id);
+ if (err)
+ return err;
+
+ binding->span_id = PRESTERA_SPAN_INVALID_ID;
+ return 0;
+}
+
+int prestera_span_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f)
+{
+ struct prestera_flow_block_binding *binding;
+ __be16 protocol = f->common.protocol;
+ struct flow_action_entry *act;
+ struct prestera_port *port;
+ int err;
+
+ if (!flow_offload_has_one_action(&f->rule->action)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only singular actions are supported");
+ return -EOPNOTSUPP;
+ }
+
+ act = &f->rule->action.entries[0];
+
+ if (!prestera_netdev_check(act->dev)) {
+ NL_SET_ERR_MSG(f->common.extack,
+ "Only Marvell Prestera port is supported");
+ return -EINVAL;
+ }
+ if (!tc_cls_can_offload_and_chain0(act->dev, &f->common))
+ return -EOPNOTSUPP;
+ if (act->id != FLOW_ACTION_MIRRED)
+ return -EOPNOTSUPP;
+ if (protocol != htons(ETH_P_ALL))
+ return -EOPNOTSUPP;
+
+ port = netdev_priv(act->dev);
+
+ list_for_each_entry(binding, &block->binding_list, list) {
+ err = prestera_span_rule_add(binding, port);
+ if (err)
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ list_for_each_entry_continue_reverse(binding,
+ &block->binding_list, list)
+ prestera_span_rule_del(binding);
+ return err;
+}
+
+void prestera_span_destroy(struct prestera_flow_block *block)
+{
+ struct prestera_flow_block_binding *binding;
+
+ list_for_each_entry(binding, &block->binding_list, list)
+ prestera_span_rule_del(binding);
+}
+
+int prestera_span_init(struct prestera_switch *sw)
+{
+ struct prestera_span *span;
+
+ span = kzalloc(sizeof(*span), GFP_KERNEL);
+ if (!span)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&span->entries);
+
+ sw->span = span;
+ span->sw = sw;
+
+ return 0;
+}
+
+void prestera_span_fini(struct prestera_switch *sw)
+{
+ struct prestera_span *span = sw->span;
+
+ WARN_ON(!list_empty(&span->entries));
+ kfree(span);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_span.h b/drivers/net/ethernet/marvell/prestera/prestera_span.h
new file mode 100644
index 000000000000..f0644521f78a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_span.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SPAN_H_
+#define _PRESTERA_SPAN_H_
+
+#include <net/pkt_cls.h>
+
+#define PRESTERA_SPAN_INVALID_ID -1
+
+struct prestera_switch;
+struct prestera_flow_block;
+
+int prestera_span_init(struct prestera_switch *sw);
+void prestera_span_fini(struct prestera_switch *sw);
+int prestera_span_replace(struct prestera_flow_block *block,
+ struct tc_cls_matchall_offload *f);
+void prestera_span_destroy(struct prestera_flow_block *block);
+
+#endif /* _PRESTERA_SPAN_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
index cb564890a3dc..74b81b4fbb97 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -180,6 +180,45 @@ err_port_vlan_alloc:
return ERR_PTR(err);
}
+static int prestera_fdb_add(struct prestera_port *port,
+ const unsigned char *mac, u16 vid, bool dynamic)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_lag_fdb_add(port->sw, prestera_port_lag_id(port),
+ mac, vid, dynamic);
+
+ return prestera_hw_fdb_add(port, mac, vid, dynamic);
+}
+
+static int prestera_fdb_del(struct prestera_port *port,
+ const unsigned char *mac, u16 vid)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_lag_fdb_del(port->sw, prestera_port_lag_id(port),
+ mac, vid);
+ else
+ return prestera_hw_fdb_del(port, mac, vid);
+}
+
+static int prestera_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_fdb_flush_lag_vlan(port->sw, prestera_port_lag_id(port),
+ vid, mode);
+ else
+ return prestera_hw_fdb_flush_port_vlan(port, vid, mode);
+}
+
+static int prestera_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ if (prestera_port_is_lag_member(port))
+ return prestera_hw_fdb_flush_lag(port->sw, prestera_port_lag_id(port),
+ mode);
+ else
+ return prestera_hw_fdb_flush_port(port, mode);
+}
+
static void
prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
{
@@ -199,11 +238,11 @@ prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
last_port = port_count == 1;
if (last_vlan)
- prestera_hw_fdb_flush_port(port, fdb_flush_mode);
+ prestera_fdb_flush_port(port, fdb_flush_mode);
else if (last_port)
prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
else
- prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+ prestera_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
list_del(&port_vlan->br_vlan_head);
prestera_bridge_vlan_put(br_vlan);
@@ -312,11 +351,29 @@ __prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
return NULL;
}
+static int prestera_match_upper_bridge_dev(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ if (netif_is_bridge_master(dev))
+ priv->data = dev;
+
+ return 0;
+}
+
+static struct net_device *prestera_get_upper_bridge_dev(struct net_device *dev)
+{
+ struct netdev_nested_priv priv = { };
+
+ netdev_walk_all_upper_dev_rcu(dev, prestera_match_upper_bridge_dev,
+ &priv);
+ return priv.data;
+}
+
static struct prestera_bridge_port *
prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
struct net_device *dev)
{
- struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ struct net_device *br_dev = prestera_get_upper_bridge_dev(dev);
struct prestera_bridge *bridge;
if (!br_dev)
@@ -404,7 +461,8 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
if (err)
return err;
- err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
+ br_port->flags);
if (err)
goto err_port_flood_set;
@@ -415,24 +473,23 @@ prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
return 0;
err_port_learning_set:
- prestera_hw_port_flood_set(port, false);
err_port_flood_set:
prestera_hw_bridge_port_delete(port, bridge->bridge_id);
return err;
}
-static int prestera_port_bridge_join(struct prestera_port *port,
- struct net_device *upper)
+int prestera_bridge_port_join(struct net_device *br_dev,
+ struct prestera_port *port)
{
struct prestera_switchdev *swdev = port->sw->swdev;
struct prestera_bridge_port *br_port;
struct prestera_bridge *bridge;
int err;
- bridge = prestera_bridge_by_dev(swdev, upper);
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
if (!bridge) {
- bridge = prestera_bridge_create(swdev, upper);
+ bridge = prestera_bridge_create(swdev, br_dev);
if (IS_ERR(bridge))
return PTR_ERR(bridge);
}
@@ -505,14 +562,14 @@ static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
}
-static void prestera_port_bridge_leave(struct prestera_port *port,
- struct net_device *upper)
+void prestera_bridge_port_leave(struct net_device *br_dev,
+ struct prestera_port *port)
{
struct prestera_switchdev *swdev = port->sw->swdev;
struct prestera_bridge_port *br_port;
struct prestera_bridge *bridge;
- bridge = prestera_bridge_by_dev(swdev, upper);
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
if (!bridge)
return;
@@ -528,57 +585,11 @@ static void prestera_port_bridge_leave(struct prestera_port *port,
prestera_bridge_1d_port_leave(br_port);
prestera_hw_port_learning_set(port, false);
- prestera_hw_port_flood_set(port, false);
+ prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0);
prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
prestera_bridge_port_put(br_port);
}
-int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
- void *ptr)
-{
- struct netdev_notifier_changeupper_info *info = ptr;
- struct netlink_ext_ack *extack;
- struct prestera_port *port;
- struct net_device *upper;
- int err;
-
- extack = netdev_notifier_info_to_extack(&info->info);
- port = netdev_priv(dev);
- upper = info->upper_dev;
-
- switch (event) {
- case NETDEV_PRECHANGEUPPER:
- if (!netif_is_bridge_master(upper)) {
- NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
- return -EINVAL;
- }
-
- if (!info->linking)
- break;
-
- if (netdev_has_any_upper_dev(upper)) {
- NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
- return -EINVAL;
- }
- break;
-
- case NETDEV_CHANGEUPPER:
- if (!netif_is_bridge_master(upper))
- break;
-
- if (info->linking) {
- err = prestera_port_bridge_join(port, upper);
- if (err)
- return err;
- } else {
- prestera_port_bridge_leave(port, upper);
- }
- break;
- }
-
- return 0;
-}
-
static int prestera_port_attr_br_flags_set(struct prestera_port *port,
struct net_device *dev,
struct switchdev_brport_flags flags)
@@ -590,11 +601,9 @@ static int prestera_port_attr_br_flags_set(struct prestera_port *port,
if (!br_port)
return 0;
- if (flags.mask & BR_FLOOD) {
- err = prestera_hw_port_flood_set(port, flags.val & BR_FLOOD);
- if (err)
- return err;
- }
+ err = prestera_hw_port_flood_set(port, flags.mask, flags.val);
+ if (err)
+ return err;
if (flags.mask & BR_LEARNING) {
err = prestera_hw_port_learning_set(port,
@@ -771,9 +780,9 @@ static int prestera_port_fdb_set(struct prestera_port *port,
vid = bridge->bridge_id;
if (adding)
- err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false);
+ err = prestera_fdb_add(port, fdb_info->addr, vid, false);
else
- err = prestera_hw_fdb_del(port, fdb_info->addr, vid);
+ err = prestera_fdb_del(port, fdb_info->addr, vid);
return err;
}
@@ -901,7 +910,8 @@ prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
if (port_vlan->br_port)
return 0;
- err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ err = prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD,
+ br_port->flags);
if (err)
return err;
@@ -1009,15 +1019,15 @@ static int prestera_port_vlans_add(struct prestera_port *port,
{
bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct net_device *dev = vlan->obj.orig_dev;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
struct prestera_bridge *bridge;
- if (netif_is_bridge_master(dev))
+ if (netif_is_bridge_master(orig_dev))
return 0;
- br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1049,14 +1059,14 @@ static int prestera_port_obj_add(struct net_device *dev,
static int prestera_port_vlans_del(struct prestera_port *port,
const struct switchdev_obj_port_vlan *vlan)
{
- struct net_device *dev = vlan->obj.orig_dev;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
struct prestera_bridge_port *br_port;
struct prestera_switch *sw = port->sw;
- if (netif_is_bridge_master(dev))
+ if (netif_is_bridge_master(orig_dev))
return -EOPNOTSUPP;
- br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
if (WARN_ON(!br_port))
return -EINVAL;
@@ -1114,10 +1124,26 @@ static void prestera_fdb_event(struct prestera_switch *sw,
struct prestera_event *evt, void *arg)
{
struct switchdev_notifier_fdb_info info;
+ struct net_device *dev = NULL;
struct prestera_port *port;
+ struct prestera_lag *lag;
- port = prestera_find_port(sw, evt->fdb_evt.port_id);
- if (!port)
+ switch (evt->fdb_evt.type) {
+ case PRESTERA_FDB_ENTRY_TYPE_REG_PORT:
+ port = prestera_find_port(sw, evt->fdb_evt.dest.port_id);
+ if (port)
+ dev = port->dev;
+ break;
+ case PRESTERA_FDB_ENTRY_TYPE_LAG:
+ lag = prestera_lag_by_id(sw, evt->fdb_evt.dest.lag_id);
+ if (lag)
+ dev = lag->dev;
+ break;
+ default:
+ return;
+ }
+
+ if (!dev)
return;
info.addr = evt->fdb_evt.data.mac;
@@ -1129,11 +1155,11 @@ static void prestera_fdb_event(struct prestera_switch *sw,
switch (evt->id) {
case PRESTERA_FDB_EVENT_LEARNED:
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
- port->dev, &info.info, NULL);
+ dev, &info.info, NULL);
break;
case PRESTERA_FDB_EVENT_AGED:
call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
- port->dev, &info.info, NULL);
+ dev, &info.info, NULL);
break;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
index 606e21d2355b..a91bc35d235f 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -7,7 +7,10 @@
int prestera_switchdev_init(struct prestera_switch *sw);
void prestera_switchdev_fini(struct prestera_switch *sw);
-int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
- void *ptr);
+int prestera_bridge_port_join(struct net_device *br_dev,
+ struct prestera_port *port);
+
+void prestera_bridge_port_leave(struct net_device *br_dev,
+ struct prestera_port *port);
#endif /* _PRESTERA_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 324c280cc22c..8b8bff59c8fe 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2503,7 +2503,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
if (length == 0) {
/* don't need this page */
- __skb_frag_unref(frag);
+ __skb_frag_unref(frag, false);
--skb_shinfo(skb)->nr_frags;
} else {
size = min(length, (unsigned) PAGE_SIZE);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index ed4eacef17ce..64adfd24e134 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -681,32 +681,53 @@ static int mtk_set_mac_address(struct net_device *dev, void *p)
void mtk_stats_update_mac(struct mtk_mac *mac)
{
struct mtk_hw_stats *hw_stats = mac->hw_stats;
- unsigned int base = MTK_GDM1_TX_GBCNT;
- u64 stats;
-
- base += hw_stats->reg_offset;
+ struct mtk_eth *eth = mac->hw;
u64_stats_update_begin(&hw_stats->syncp);
- hw_stats->rx_bytes += mtk_r32(mac->hw, base);
- stats = mtk_r32(mac->hw, base + 0x04);
- if (stats)
- hw_stats->rx_bytes += (stats << 32);
- hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
- hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
- hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
- hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
- hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
- hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
- hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, base + 0x24);
- hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
- hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
- hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
- stats = mtk_r32(mac->hw, base + 0x34);
- if (stats)
- hw_stats->tx_bytes += (stats << 32);
- hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
+ hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
+ } else {
+ unsigned int offs = hw_stats->reg_offset;
+ u64 stats;
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw,
+ MTK_GDM1_RX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ hw_stats->rx_overflow +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ hw_stats->rx_fcs_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ hw_stats->rx_short_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ hw_stats->rx_long_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
+ stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ }
+
u64_stats_update_end(&hw_stats->syncp);
}
@@ -2423,7 +2444,8 @@ static void mtk_dim_rx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2452,7 +2474,8 @@ static void mtk_dim_tx(struct work_struct *work)
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
spin_unlock_bh(&eth->dim_lock);
@@ -2480,6 +2503,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
goto err_disable_pm;
}
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
+
/* disable delay and normal interrupt */
mtk_tx_irq_disable(eth, ~0);
mtk_rx_irq_disable(eth, ~0);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 11331b44ba07..5ef70dd8b49c 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -278,8 +278,21 @@
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN 0x1B2C
-/* GMA1 Received Good Byte Count Register */
-#define MTK_GDM1_TX_GBCNT 0x2400
+/* GMA1 counter / statics register */
+#define MTK_GDM1_RX_GBCNT_L 0x2400
+#define MTK_GDM1_RX_GBCNT_H 0x2404
+#define MTK_GDM1_RX_GPCNT 0x2408
+#define MTK_GDM1_RX_OERCNT 0x2410
+#define MTK_GDM1_RX_FERCNT 0x2414
+#define MTK_GDM1_RX_SERCNT 0x2418
+#define MTK_GDM1_RX_LENCNT 0x241c
+#define MTK_GDM1_RX_CERCNT 0x2420
+#define MTK_GDM1_RX_FCCNT 0x2424
+#define MTK_GDM1_TX_SKIPCNT 0x2428
+#define MTK_GDM1_TX_COLCNT 0x242c
+#define MTK_GDM1_TX_GBCNT_L 0x2430
+#define MTK_GDM1_TX_GBCNT_H 0x2434
+#define MTK_GDM1_TX_GPCNT 0x2438
#define MTK_STAT_OFFSET 0x40
/* QDMA descriptor txd4 */
@@ -502,6 +515,13 @@
#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 1434df66fcf2..3616b77caa0a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -2027,8 +2027,6 @@ static int mlx4_en_set_tunable(struct net_device *dev,
return ret;
}
-#define MLX4_EEPROM_PAGE_LEN 256
-
static int mlx4_en_get_module_info(struct net_device *dev,
struct ethtool_modinfo *modinfo)
{
@@ -2063,7 +2061,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
break;
case MLX4_MODULE_ID_SFP:
modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = MLX4_EEPROM_PAGE_LEN;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index e35e4d7ef4d1..cea62b8f554c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -526,7 +526,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
fail:
while (nr > 0) {
nr--;
- __skb_frag_unref(skb_shinfo(skb)->frags + nr);
+ __skb_frag_unref(skb_shinfo(skb)->frags + nr, false);
}
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index ba6ac31a339d..256a06b3c096 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -1973,6 +1973,7 @@ EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
#define I2C_ADDR_LOW 0x50
#define I2C_ADDR_HIGH 0x51
#define I2C_PAGE_SIZE 256
+#define I2C_HIGH_PAGE_SIZE 128
/* Module Info Data */
struct mlx4_cable_info {
@@ -2026,6 +2027,88 @@ static inline const char *cable_info_mad_err_str(u16 mad_status)
return "Unknown Error";
}
+static int mlx4_get_module_id(struct mlx4_dev *dev, u8 port, u8 *module_id)
+{
+ struct mlx4_cmd_mailbox *inbox, *outbox;
+ struct mlx4_mad_ifc *inmad, *outmad;
+ struct mlx4_cable_info *cable_info;
+ int ret;
+
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox))
+ return PTR_ERR(inbox);
+
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ mlx4_free_cmd_mailbox(dev, inbox);
+ return PTR_ERR(outbox);
+ }
+
+ inmad = (struct mlx4_mad_ifc *)(inbox->buf);
+ outmad = (struct mlx4_mad_ifc *)(outbox->buf);
+
+ inmad->method = 0x1; /* Get */
+ inmad->class_version = 0x1;
+ inmad->mgmt_class = 0x1;
+ inmad->base_version = 0x1;
+ inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
+
+ cable_info = (struct mlx4_cable_info *)inmad->data;
+ cable_info->dev_mem_address = 0;
+ cable_info->page_num = 0;
+ cable_info->i2c_addr = I2C_ADDR_LOW;
+ cable_info->size = cpu_to_be16(1);
+
+ ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ if (be16_to_cpu(outmad->status)) {
+ /* Mad returned with bad status */
+ ret = be16_to_cpu(outmad->status);
+ mlx4_warn(dev,
+ "MLX4_CMD_MAD_IFC Get Module ID attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
+ 0xFF60, port, I2C_ADDR_LOW, 0, 1, ret,
+ cable_info_mad_err_str(ret));
+ ret = -ret;
+ goto out;
+ }
+ cable_info = (struct mlx4_cable_info *)outmad->data;
+ *module_id = cable_info->data[0];
+out:
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_sfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ *i2c_addr = I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < I2C_PAGE_SIZE)
+ return;
+
+ *i2c_addr = I2C_ADDR_HIGH;
+ *offset -= I2C_PAGE_SIZE;
+}
+
+static void mlx4_qsfp_eeprom_params_set(u8 *i2c_addr, u8 *page_num, u16 *offset)
+{
+ /* Offsets 0-255 belong to page 0.
+ * Offsets 256-639 belong to pages 01, 02, 03.
+ * For example, offset 400 is page 02: 1 + (400 - 256) / 128 = 2
+ */
+ if (*offset < I2C_PAGE_SIZE)
+ *page_num = 0;
+ else
+ *page_num = 1 + (*offset - I2C_PAGE_SIZE) / I2C_HIGH_PAGE_SIZE;
+ *i2c_addr = I2C_ADDR_LOW;
+ *offset -= *page_num * I2C_HIGH_PAGE_SIZE;
+}
+
/**
* mlx4_get_module_info - Read cable module eeprom data
* @dev: mlx4_dev.
@@ -2045,12 +2128,30 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
struct mlx4_cmd_mailbox *inbox, *outbox;
struct mlx4_mad_ifc *inmad, *outmad;
struct mlx4_cable_info *cable_info;
- u16 i2c_addr;
+ u8 module_id, i2c_addr, page_num;
int ret;
if (size > MODULE_INFO_MAX_READ)
size = MODULE_INFO_MAX_READ;
+ ret = mlx4_get_module_id(dev, port, &module_id);
+ if (ret)
+ return ret;
+
+ switch (module_id) {
+ case MLX4_MODULE_ID_SFP:
+ mlx4_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX4_MODULE_ID_QSFP:
+ case MLX4_MODULE_ID_QSFP_PLUS:
+ case MLX4_MODULE_ID_QSFP28:
+ mlx4_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx4_err(dev, "Module ID not recognized: %#x\n", module_id);
+ return -EINVAL;
+ }
+
inbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(inbox))
return PTR_ERR(inbox);
@@ -2076,11 +2177,9 @@ int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
*/
size -= offset + size - I2C_PAGE_SIZE;
- i2c_addr = I2C_ADDR_LOW;
-
cable_info = (struct mlx4_cable_info *)inmad->data;
cable_info->dev_mem_address = cpu_to_be16(offset);
- cable_info->page_num = 0;
+ cable_info->page_num = page_num;
cable_info->i2c_addr = i2c_addr;
cable_info->size = cpu_to_be16(size);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 461a43f338e6..d62f90aedade 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -79,6 +79,16 @@ config MLX5_ESWITCH
Legacy SRIOV mode (L2 mac vlan steering based).
Switchdev mode (eswitch offloads).
+config MLX5_BRIDGE
+ bool
+ depends on MLX5_ESWITCH && BRIDGE
+ default y
+ help
+ mlx5 ConnectX offloads support for Ethernet Bridging (BRIDGE).
+ Enable adding representors of mlx5 uplink and VF ports to Bridge and
+ offloading rules for traffic between such ports. Supports VLANs (trunk and
+ access modes).
+
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
depends on MLX5_ESWITCH && NET_CLS_ACT
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index a1223e904190..b5072a3a2585 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
fw_reset.o qos.o
@@ -56,6 +56,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
esw/devlink_port.o esw/vporttbl.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o
+mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 44c458443428..d791d351b489 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -63,6 +63,11 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
err = devlink_info_version_running_put(req, "fw.version", version_str);
if (err)
return err;
+ err = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+ if (err)
+ return err;
/* no pending version, return running (stored) version */
if (stored_fw == 0)
@@ -74,8 +79,9 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
err = devlink_info_version_stored_put(req, "fw.version", version_str);
if (err)
return err;
-
- return 0;
+ return devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
}
static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netlink_ext_ack *extack)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index b636d63358d2..b1b51bbba054 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -974,7 +974,6 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5e_xsk_param *xsk, int node,
struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
-void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
@@ -1163,6 +1162,13 @@ mlx5e_calc_max_nch(struct mlx5e_priv *priv, const struct mlx5e_profile *profile)
return priv->netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
}
+static inline bool
+mlx5e_tx_mpwqe_supported(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe);
+}
+
int mlx5e_priv_init(struct mlx5e_priv *priv,
struct net_device *netdev,
struct mlx5_core_dev *mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index f410c1268422..150c8e82c738 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -201,7 +201,7 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param
static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
{
- struct dim_cq_moder moder;
+ struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
@@ -214,7 +214,7 @@ static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
{
- struct dim_cq_moder moder;
+ struct dim_cq_moder moder = {};
moder.cq_period_mode = cq_period_mode;
moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
@@ -614,7 +614,7 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
{
- if (mlx5_accel_is_ktls_rx(mdev))
+ if (mlx5e_accel_is_ktls_rx(mdev))
return MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
@@ -643,7 +643,7 @@ static void mlx5e_build_async_icosq_param(struct mlx5_core_dev *mdev,
mlx5e_build_sq_param_common(mdev, param);
param->stop_room = mlx5e_stop_room_for_wqe(1); /* for XSK NOP */
- param->is_tls = mlx5_accel_is_ktls_rx(mdev);
+ param->is_tls = mlx5e_accel_is_ktls_rx(mdev);
if (param->is_tls)
param->stop_room += mlx5e_stop_room_for_wqe(1); /* for TLS RX resync NOP */
MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(mdev, reg_umr_sq));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 95f2b26a3ee3..9c076aa20306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
rpriv = priv->ppriv;
fwd_vport_num = rpriv->rep->vport;
lag_dev = netdev_master_upper_dev_get(netdev);
+ if (!lag_dev)
+ return;
netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
new file mode 100644
index 000000000000..7f5efc1b4392
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -0,0 +1,424 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+#include "bridge.h"
+#include "esw/bridge.h"
+#include "en_rep.h"
+
+#define MLX5_ESW_BRIDGE_UPDATE_INTERVAL 1000
+
+struct mlx5_bridge_switchdev_fdb_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ bool add;
+};
+
+static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
+ struct mlx5_esw_bridge_offloads,
+ netdev_nb);
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct net_device *upper;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+
+ if (!mlx5e_eswitch_rep(dev))
+ return 0;
+
+ upper = info->upper_dev;
+ if (!netif_is_bridge_master(upper))
+ return 0;
+
+ esw = br_offloads->esw;
+ priv = netdev_priv(dev);
+ if (esw != priv->mdev->priv.eswitch)
+ return 0;
+
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ return info->linking ?
+ mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) :
+ mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack);
+}
+
+static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ err = mlx5_esw_bridge_port_changeupper(nb, ptr);
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static int mlx5_esw_bridge_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ const struct switchdev_obj_port_vlan *vlan;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+ int err = 0;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static int mlx5_esw_bridge_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ const struct switchdev_obj_port_vlan *vlan;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+ int err = 0;
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ if (attr->u.brport_flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flag is not supported");
+ err = -EINVAL;
+ }
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_attr_set);
+ break;
+ default:
+ err = 0;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+mlx5_esw_bridge_cleanup_switchdev_fdb_work(struct mlx5_bridge_switchdev_fdb_work *fdb_work)
+{
+ dev_put(fdb_work->dev);
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+}
+
+static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work)
+{
+ struct mlx5_bridge_switchdev_fdb_work *fdb_work =
+ container_of(work, struct mlx5_bridge_switchdev_fdb_work, work);
+ struct switchdev_notifier_fdb_info *fdb_info =
+ &fdb_work->fdb_info;
+ struct net_device *dev = fdb_work->dev;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5_eswitch *esw;
+ struct mlx5_vport *vport;
+ struct mlx5e_priv *priv;
+ u16 vport_num;
+
+ rtnl_lock();
+
+ priv = netdev_priv(dev);
+ rpriv = priv->ppriv;
+ vport_num = rpriv->rep->vport;
+ esw = priv->mdev->priv.eswitch;
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ goto out;
+
+ if (fdb_work->add)
+ mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info);
+ else
+ mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info);
+
+out:
+ rtnl_unlock();
+ mlx5_esw_bridge_cleanup_switchdev_fdb_work(fdb_work);
+}
+
+static struct mlx5_bridge_switchdev_fdb_work *
+mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_bridge_switchdev_fdb_work *work;
+ u8 *addr;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&work->work, mlx5_esw_bridge_switchdev_fdb_event_work);
+ memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
+
+ addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!addr) {
+ kfree(work);
+ return ERR_PTR(-ENOMEM);
+ }
+ ether_addr_copy(addr, fdb_info->addr);
+ work->fdb_info.addr = addr;
+
+ dev_hold(dev);
+ work->dev = dev;
+ work->add = add;
+ return work;
+}
+
+static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb,
+ struct mlx5_esw_bridge_offloads,
+ nb);
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct mlx5_bridge_switchdev_fdb_work *work;
+ struct switchdev_notifier_info *info = ptr;
+ struct net_device *upper;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_eswitch_rep(dev))
+ return NOTIFY_DONE;
+ priv = netdev_priv(dev);
+ if (priv->mdev->priv.eswitch != br_offloads->esw)
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ int err = switchdev_handle_port_attr_set(dev, ptr,
+ mlx5e_eswitch_rep,
+ mlx5_esw_bridge_port_obj_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+
+ work = mlx5_esw_bridge_init_switchdev_fdb_work(dev,
+ event == SWITCHDEV_FDB_ADD_TO_DEVICE,
+ fdb_info);
+ if (IS_ERR(work)) {
+ WARN_ONCE(1, "Failed to init switchdev work, err=%ld",
+ PTR_ERR(work));
+ return notifier_from_errno(PTR_ERR(work));
+ }
+
+ queue_work(br_offloads->wq, &work->work);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static void mlx5_esw_bridge_update_work(struct work_struct *work)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = container_of(work,
+ struct mlx5_esw_bridge_offloads,
+ update_work.work);
+
+ rtnl_lock();
+ mlx5_esw_bridge_update(br_offloads);
+ rtnl_unlock();
+
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
+}
+
+void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw =
+ mdev->priv.eswitch;
+ int err;
+
+ rtnl_lock();
+ br_offloads = mlx5_esw_bridge_init(esw);
+ rtnl_unlock();
+ if (IS_ERR(br_offloads)) {
+ esw_warn(mdev, "Failed to init esw bridge (err=%ld)\n", PTR_ERR(br_offloads));
+ return;
+ }
+
+ br_offloads->wq = alloc_ordered_workqueue("mlx5_bridge_wq", 0);
+ if (!br_offloads->wq) {
+ esw_warn(mdev, "Failed to allocate bridge offloads workqueue\n");
+ goto err_alloc_wq;
+ }
+ INIT_DELAYED_WORK(&br_offloads->update_work, mlx5_esw_bridge_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ msecs_to_jiffies(MLX5_ESW_BRIDGE_UPDATE_INTERVAL));
+
+ br_offloads->nb.notifier_call = mlx5_esw_bridge_switchdev_event;
+ err = register_switchdev_notifier(&br_offloads->nb);
+ if (err) {
+ esw_warn(mdev, "Failed to register switchdev notifier (err=%d)\n", err);
+ goto err_register_swdev;
+ }
+
+ br_offloads->nb_blk.notifier_call = mlx5_esw_bridge_event_blocking;
+ err = register_switchdev_blocking_notifier(&br_offloads->nb_blk);
+ if (err) {
+ esw_warn(mdev, "Failed to register blocking switchdev notifier (err=%d)\n", err);
+ goto err_register_swdev_blk;
+ }
+
+ br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
+ err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ if (err) {
+ esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
+ err);
+ goto err_register_netdev;
+ }
+ return;
+
+err_register_netdev:
+ unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
+err_register_swdev_blk:
+ unregister_switchdev_notifier(&br_offloads->nb);
+err_register_swdev:
+ destroy_workqueue(br_offloads->wq);
+err_alloc_wq:
+ mlx5_esw_bridge_cleanup(esw);
+}
+
+void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw =
+ mdev->priv.eswitch;
+
+ br_offloads = esw->br_offloads;
+ if (!br_offloads)
+ return;
+
+ unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
+ unregister_switchdev_notifier(&br_offloads->nb);
+ cancel_delayed_work(&br_offloads->update_work);
+ destroy_workqueue(br_offloads->wq);
+ rtnl_lock();
+ mlx5_esw_bridge_cleanup(esw);
+ rtnl_unlock();
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h
new file mode 100644
index 000000000000..fbeb64242831
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_EN_REP_BRIDGE__
+#define __MLX5_EN_REP_BRIDGE__
+
+#include "en.h"
+
+#if IS_ENABLED(CONFIG_MLX5_BRIDGE)
+
+void mlx5e_rep_bridge_init(struct mlx5e_priv *priv);
+void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv);
+
+#else /* CONFIG_MLX5_BRIDGE */
+
+static inline void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) {}
+static inline void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) {}
+
+#endif /* CONFIG_MLX5_BRIDGE */
+
+#endif /* __MLX5_EN_REP_BRIDGE__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 6cdc52d50a48..f0b98f5b2a92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -617,7 +617,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5e_tc_update_priv *tc_priv)
{
struct mlx5e_priv *priv = netdev_priv(skb->dev);
- u32 tunnel_id = reg_c1 >> ESW_TUN_OFFSET;
+ u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
if (chain) {
struct mlx5_rep_uplink_priv *uplink_priv;
@@ -626,7 +626,7 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1,
struct mlx5_eswitch *esw;
u32 zone_restore_id;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (!tc_skb_ext) {
WARN_ON(1);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 5da5e5323a44..91e7a01e32be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -23,7 +23,7 @@
#include "en_tc.h"
#include "en_rep.h"
-#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen * 8)
+#define MLX5_CT_ZONE_BITS (mlx5e_tc_attr_to_reg_mappings[ZONE_TO_REG].mlen)
#define MLX5_CT_ZONE_MASK GENMASK(MLX5_CT_ZONE_BITS - 1, 0)
#define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
#define MLX5_CT_STATE_TRK_BIT BIT(2)
@@ -32,11 +32,11 @@
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
-#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen * 8)
+#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen)
#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0)
#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX
-#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen * 8)
+#define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen)
#define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0)
#define ct_dbg(fmt, args...)\
@@ -150,6 +150,11 @@ struct mlx5_ct_entry {
unsigned long flags;
};
+static void
+mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_mod_hdr_handle *mh);
+
static const struct rhashtable_params cts_ht_params = {
.head_offset = offsetof(struct mlx5_ct_entry, node),
.key_offset = offsetof(struct mlx5_ct_entry, cookie),
@@ -458,8 +463,7 @@ mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
- mlx5e_mod_hdr_detach(ct_priv->dev,
- ct_priv->mod_hdr_tbl, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
kfree(attr);
}
@@ -686,15 +690,27 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
- ct_priv->mod_hdr_tbl,
- ct_priv->ns_type,
- &mod_acts);
- if (IS_ERR(*mh)) {
- err = PTR_ERR(*mh);
- goto err_mapping;
+ if (nat) {
+ attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
+ mod_acts.num_actions,
+ mod_acts.actions);
+ if (IS_ERR(attr->modify_hdr)) {
+ err = PTR_ERR(attr->modify_hdr);
+ goto err_mapping;
+ }
+
+ *mh = NULL;
+ } else {
+ *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl,
+ ct_priv->ns_type,
+ &mod_acts);
+ if (IS_ERR(*mh)) {
+ err = PTR_ERR(*mh);
+ goto err_mapping;
+ }
+ attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
}
- attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
dealloc_mod_hdr_actions(&mod_acts);
return 0;
@@ -705,6 +721,17 @@ err_mapping:
return err;
}
+static void
+mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_mod_hdr_handle *mh)
+{
+ if (mh)
+ mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, mh);
+ else
+ mlx5_modify_header_dealloc(ct_priv->dev, attr->modify_hdr);
+}
+
static int
mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
@@ -767,8 +794,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return 0;
err_rule:
- mlx5e_mod_hdr_detach(ct_priv->dev,
- ct_priv->mod_hdr_tbl, zone_rule->mh);
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
kfree(attr);
@@ -918,7 +944,7 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
}
if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
- ct_dbg("Using shared counter entry=0x%p rev=0x%p\n", entry, rev_entry);
+ ct_dbg("Using shared counter entry=0x%p rev=0x%p", entry, rev_entry);
shared_counter = rev_entry->counter;
spin_unlock_bh(&ct_priv->ht_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 69e618d17071..644cf1641cde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -33,15 +33,15 @@ struct mlx5_ct_attr {
#define zone_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
.moffset = 0,\
- .mlen = 2,\
+ .mlen = 16,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
- misc_parameters_2.metadata_reg_c_2) + 2,\
+ misc_parameters_2.metadata_reg_c_2),\
}
#define ctstate_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_2,\
- .moffset = 2,\
- .mlen = 2,\
+ .moffset = 16,\
+ .mlen = 16,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_2),\
}
@@ -49,7 +49,7 @@ struct mlx5_ct_attr {
#define mark_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_3,\
.moffset = 0,\
- .mlen = 4,\
+ .mlen = 32,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_3),\
}
@@ -57,7 +57,7 @@ struct mlx5_ct_attr {
#define labels_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_4,\
.moffset = 0,\
- .mlen = 4,\
+ .mlen = 32,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_4),\
}
@@ -65,7 +65,7 @@ struct mlx5_ct_attr {
#define fteid_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_5,\
.moffset = 0,\
- .mlen = 4,\
+ .mlen = 32,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
misc_parameters_2.metadata_reg_c_5),\
}
@@ -73,20 +73,19 @@ struct mlx5_ct_attr {
#define zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,\
.moffset = 0,\
- .mlen = (ESW_ZONE_ID_BITS / 8),\
+ .mlen = ESW_ZONE_ID_BITS,\
.soffset = MLX5_BYTE_OFF(fte_match_param,\
- misc_parameters_2.metadata_reg_c_1) + 3,\
+ misc_parameters_2.metadata_reg_c_1),\
}
#define nic_zone_restore_to_reg_ct {\
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
- .moffset = 2,\
- .mlen = (ESW_ZONE_ID_BITS / 8),\
+ .moffset = 16,\
+ .mlen = ESW_ZONE_ID_BITS,\
}
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
-#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 172e0474f2e6..8f79f04eccd6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -212,6 +212,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_neigh m_neigh = {};
TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
@@ -295,9 +296,12 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
*/
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv4_encap_size, encap_header,
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv4_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@@ -324,6 +328,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
TC_TUN_ROUTE_ATTR_INIT(attr);
int ipv4_encap_size;
char *encap_header;
@@ -396,9 +401,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
*/
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv4_encap_size, encap_header,
+
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv4_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@@ -471,6 +479,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_neigh m_neigh = {};
TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
@@ -553,9 +562,11 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv6_encap_size, encap_header,
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv6_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
@@ -582,6 +593,7 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
+ struct mlx5_pkt_reformat_params reformat_params;
TC_TUN_ROUTE_ATTR_INIT(attr);
struct ipv6hdr *ip6h;
int ipv6_encap_size;
@@ -654,9 +666,11 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
goto release_neigh;
}
- e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- ipv6_encap_size, encap_header,
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = ipv6_encap_size;
+ reformat_params.data = encap_header;
+ e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev, &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
err = PTR_ERR(e->pkt_reformat);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 593503bc4d07..0dfd51d2d178 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -120,6 +120,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_attr *attr;
@@ -130,9 +131,12 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (e->flags & MLX5_ENCAP_ENTRY_NO_ROUTE)
return;
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = e->reformat_type;
+ reformat_params.size = e->encap_size;
+ reformat_params.data = e->encap_header;
e->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- e->reformat_type,
- e->encap_size, e->encap_header,
+ &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(e->pkt_reformat)) {
mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %lu\n",
@@ -812,6 +816,7 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
+ struct mlx5_pkt_reformat_params reformat_params;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
@@ -853,10 +858,12 @@ int mlx5e_attach_decap(struct mlx5e_priv *priv,
hash_add_rcu(esw->offloads.decap_tbl, &d->hlist, hash_key);
mutex_unlock(&esw->offloads.decap_tbl_lock);
+ memset(&reformat_params, 0, sizeof(reformat_params));
+ reformat_params.type = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ reformat_params.size = sizeof(parse_attr->eth);
+ reformat_params.data = &parse_attr->eth;
d->pkt_reformat = mlx5_packet_reformat_alloc(priv->mdev,
- MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2,
- sizeof(parse_attr->eth),
- &parse_attr->eth,
+ &reformat_params,
MLX5_FLOW_NAMESPACE_FDB);
if (IS_ERR(d->pkt_reformat)) {
err = PTR_ERR(d->pkt_reformat);
@@ -1505,7 +1512,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
- if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
+ if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 00af0b831a28..d964665eaa63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -162,7 +162,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
/* Part of the eseg touched by TX offloads */
#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
-static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
@@ -175,8 +175,6 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
-
- return true;
}
static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index 95293ee0d38d..d93aadbf10da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -59,12 +59,15 @@ void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
- if (mlx5_accel_is_ktls_tx(mdev)) {
+ if (!mlx5e_accel_is_ktls_tx(mdev) && !mlx5e_accel_is_ktls_rx(mdev))
+ return;
+
+ if (mlx5e_accel_is_ktls_tx(mdev)) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->features |= NETIF_F_HW_TLS_TX;
}
- if (mlx5_accel_is_ktls_rx(mdev))
+ if (mlx5e_accel_is_ktls_rx(mdev))
netdev->hw_features |= NETIF_F_HW_TLS_RX;
netdev->tlsdev_ops = &mlx5e_ktls_ops;
@@ -89,7 +92,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
{
int err;
- if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_accel_is_ktls_rx(priv->mdev))
return 0;
priv->tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
@@ -109,7 +112,7 @@ int mlx5e_ktls_init_rx(struct mlx5e_priv *priv)
void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
{
- if (!mlx5_accel_is_ktls_rx(priv->mdev))
+ if (!mlx5e_accel_is_ktls_rx(priv->mdev))
return;
if (priv->netdev->features & NETIF_F_HW_TLS_RX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index aaa579bf9a39..5833deb2354c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -15,6 +15,25 @@ int mlx5e_ktls_set_feature_rx(struct net_device *netdev, bool enable);
struct mlx5e_ktls_resync_resp *
mlx5e_ktls_rx_resync_create_resp_list(void);
void mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list);
+
+static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_ktls_tx(mdev);
+}
+
+static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_ktls_rx(mdev);
+}
+
+static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_ktls_device(mdev);
+}
+
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
@@ -44,6 +63,11 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
static inline void
mlx5e_ktls_rx_resync_destroy_resp_list(struct mlx5e_ktls_resync_resp *resp_list) {}
+
+static inline bool mlx5e_accel_is_ktls_tx(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_accel_is_ktls_rx(struct mlx5_core_dev *mdev) { return false; }
+static inline bool mlx5e_accel_is_ktls_device(struct mlx5_core_dev *mdev) { return false; }
+
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 51bdf71073f3..2c0a9344338a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -23,10 +23,13 @@ mlx5e_ktls_dumps_num_wqes(struct mlx5e_params *params, unsigned int nfrags,
return nfrags + DIV_ROUND_UP(sync_len, MLX5E_SW2HW_MTU(params, params->sw_mtu));
}
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params)
+u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
u16 num_dumps, stop_room = 0;
+ if (!mlx5e_accel_is_ktls_tx(mdev))
+ return 0;
+
num_dumps = mlx5e_ktls_dumps_num_wqes(params, MAX_SKB_FRAGS, TLS_MAX_PAYLOAD_SIZE);
stop_room += mlx5e_stop_room_for_wqe(MLX5E_TLS_SET_STATIC_PARAMS_WQEBBS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 8f79335057dc..08c9d5134479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -14,7 +14,7 @@ struct mlx5e_accel_tx_tls_state {
u32 tls_tisn;
};
-u16 mlx5e_ktls_get_stop_room(struct mlx5e_params *params);
+u16 mlx5e_ktls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
bool mlx5e_ktls_handle_tx_skb(struct tls_context *tls_ctx, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index d6b21b899dbc..b8fc863aa68d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -192,13 +192,13 @@ void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
u32 caps;
- if (mlx5_accel_is_ktls_device(priv->mdev)) {
+ if (mlx5e_accel_is_ktls_device(priv->mdev)) {
mlx5e_ktls_build_netdev(priv);
return;
}
/* FPGA */
- if (!mlx5_accel_is_tls_device(priv->mdev))
+ if (!mlx5e_accel_is_tls_device(priv->mdev))
return;
caps = mlx5_accel_tls_device_caps(priv->mdev);
@@ -224,7 +224,7 @@ int mlx5e_tls_init(struct mlx5e_priv *priv)
{
struct mlx5e_tls *tls;
- if (!mlx5_accel_is_tls_device(priv->mdev))
+ if (!mlx5e_accel_is_tls_device(priv->mdev))
return 0;
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
index 4c9274d390da..3fd6fd69bbd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.h
@@ -103,11 +103,18 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv);
int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data);
+static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev)
+{
+ return !is_kdump_kernel() &&
+ mlx5_accel_is_tls_device(mdev);
+}
+
#else
static inline void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
{
- if (mlx5_accel_is_ktls_device(priv->mdev))
+ if (!is_kdump_kernel() &&
+ mlx5_accel_is_ktls_device(priv->mdev))
mlx5e_ktls_build_netdev(priv);
}
@@ -117,6 +124,7 @@ static inline void mlx5e_tls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_tls_get_count(struct mlx5e_priv *priv) { return 0; }
static inline int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { return 0; }
static inline int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { return 0; }
+static inline bool mlx5e_accel_is_tls_device(struct mlx5_core_dev *mdev) { return false; }
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index 82dc09aaa7fc..7a700f913582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -273,7 +273,7 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
- if (mlx5_accel_is_ktls_tx(sq->mdev))
+ if (mlx5e_accel_is_ktls_tx(sq->mdev))
return mlx5e_ktls_handle_tx_skb(tls_ctx, sq, skb, datalen, state);
/* FPGA */
@@ -378,11 +378,11 @@ void mlx5e_tls_handle_rx_skb_metadata(struct mlx5e_rq *rq, struct sk_buff *skb,
u16 mlx5e_tls_get_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
- if (!mlx5_accel_is_tls_device(mdev))
+ if (!mlx5e_accel_is_tls_device(mdev))
return 0;
- if (mlx5_accel_is_ktls_device(mdev))
- return mlx5e_ktls_get_stop_room(params);
+ if (mlx5e_accel_is_ktls_device(mdev))
+ return mlx5e_ktls_get_stop_room(mdev, params);
/* FPGA */
/* Resync SKB. */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
index 29463bdb7715..ffc84f9b41b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c
@@ -58,7 +58,7 @@ static const struct counter_desc *get_tls_atomic_stats(struct mlx5e_priv *priv)
{
if (!priv->tls)
return NULL;
- if (mlx5_accel_is_ktls_device(priv->mdev))
+ if (mlx5e_accel_is_ktls_device(priv->mdev))
return mlx5e_ktls_sw_stats_desc;
return mlx5e_tls_sw_stats_desc;
}
@@ -67,7 +67,7 @@ int mlx5e_tls_get_count(struct mlx5e_priv *priv)
{
if (!priv->tls)
return 0;
- if (mlx5_accel_is_ktls_device(priv->mdev))
+ if (mlx5e_accel_is_ktls_device(priv->mdev))
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
return ARRAY_SIZE(mlx5e_tls_sw_stats_desc);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 8360289813f0..bd72572e03d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1624,12 +1624,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ unsigned long fec_bitmap;
u16 fec_policy = 0;
int mode;
int err;
- if (bitmap_weight((unsigned long *)&fecparam->fec,
- ETHTOOL_FEC_LLRS_BIT + 1) > 1)
+ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE);
+ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1)
return -EOPNOTSUPP;
for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) {
@@ -1893,6 +1894,13 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (curr_val == new_val)
return 0;
+ if (new_val && !priv->profile->rx_ptp_support &&
+ priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
+ netdev_err(priv->netdev,
+ "Profile doesn't support enabling of CQE compression while hardware time-stamping is enabled.\n");
+ return -EINVAL;
+ }
+
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
@@ -1984,7 +1992,7 @@ static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool e
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params new_params;
- if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ if (enable && !mlx5e_tx_mpwqe_supported(mdev))
return -EOPNOTSUPP;
new_params = priv->channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 0d571a0c76d9..0b75fab41ae8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "en.h"
#include "en_rep.h"
#include "lib/mpfs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index bca832cdc4cb..930b225dfe77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -91,12 +91,16 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
u8 port_state;
+ bool up;
port_state = mlx5_query_vport_state(mdev,
MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT,
0);
- if (port_state == VPORT_STATE_UP) {
+ up = port_state == VPORT_STATE_UP;
+ if (up == netif_carrier_ok(priv->netdev))
+ netif_carrier_event(priv->netdev);
+ if (up) {
netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
} else {
@@ -853,7 +857,7 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (err)
goto err_destroy_rq;
- if (mlx5e_is_tls_on(rq->priv) && !mlx5_accel_is_ktls_device(mdev))
+ if (mlx5e_is_tls_on(rq->priv) && !mlx5e_accel_is_ktls_device(mdev))
__set_bit(MLX5E_RQ_STATE_FPGA_TLS, &rq->state); /* must be FPGA */
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
@@ -889,10 +893,13 @@ err_free_rq:
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
- if (rq->icosq)
+ if (rq->icosq) {
mlx5e_trigger_irq(rq->icosq);
- else
+ } else {
+ local_bh_disable();
napi_schedule(rq->cq.napi);
+ local_bh_enable();
+ }
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -2697,7 +2704,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
int err;
old_num_txqs = netdev->real_num_tx_queues;
- old_ntc = netdev->num_tc;
+ old_ntc = netdev->num_tc ? : 1;
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
@@ -3855,6 +3862,16 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}
+ if (mlx5e_is_uplink_rep(priv)) {
+ features &= ~NETIF_F_HW_TLS_RX;
+ if (netdev->features & NETIF_F_HW_TLS_RX)
+ netdev_warn(netdev, "Disabling hw_tls_rx, not supported in switchdev mode\n");
+
+ features &= ~NETIF_F_HW_TLS_TX;
+ if (netdev->features & NETIF_F_HW_TLS_TX)
+ netdev_warn(netdev, "Disabling hw_tls_tx, not supported in switchdev mode\n");
+ }
+
mutex_unlock(&priv->state_lock);
return features;
@@ -3971,11 +3988,45 @@ int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
return mlx5e_ptp_rx_manage_fs(priv, set);
}
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+static int mlx5e_hwstamp_config_no_ptp_rx(struct mlx5e_priv *priv, bool rx_filter)
+{
+ bool rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
+ int err;
+
+ if (!rx_filter)
+ /* Reset CQE compression to Admin default */
+ return mlx5e_modify_rx_cqe_compression_locked(priv, rx_cqe_compress_def);
+
+ if (!MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
+ return 0;
+
+ /* Disable CQE compression */
+ netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err)
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+
+ return err;
+}
+
+static int mlx5e_hwstamp_config_ptp_rx(struct mlx5e_priv *priv, bool ptp_rx)
{
struct mlx5e_params new_params;
+
+ if (ptp_rx == priv->channels.params.ptp_rx)
+ return 0;
+
+ new_params = priv->channels.params;
+ new_params.ptp_rx = ptp_rx;
+ return mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
+ &new_params.ptp_rx, true);
+}
+
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
struct hwtstamp_config config;
bool rx_cqe_compress_def;
+ bool ptp_rx;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
@@ -3995,13 +4046,12 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
}
mutex_lock(&priv->state_lock);
- new_params = priv->channels.params;
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- new_params.ptp_rx = false;
+ ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -4018,24 +4068,25 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
- new_params.ptp_rx = rx_cqe_compress_def;
config.rx_filter = HWTSTAMP_FILTER_ALL;
+ /* ptp_rx is set if both HW TS is set and CQE
+ * compression is set
+ */
+ ptp_rx = rx_cqe_compress_def;
break;
default:
- mutex_unlock(&priv->state_lock);
- return -ERANGE;
+ err = -ERANGE;
+ goto err_unlock;
}
- if (new_params.ptp_rx == priv->channels.params.ptp_rx)
- goto out;
+ if (!priv->profile->rx_ptp_support)
+ err = mlx5e_hwstamp_config_no_ptp_rx(priv,
+ config.rx_filter != HWTSTAMP_FILTER_NONE);
+ else
+ err = mlx5e_hwstamp_config_ptp_rx(priv, ptp_rx);
+ if (err)
+ goto err_unlock;
- err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_ptp_rx_manage_fs_ctx,
- &new_params.ptp_rx, true);
- if (err) {
- mutex_unlock(&priv->state_lock);
- return err;
- }
-out:
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
@@ -4044,6 +4095,9 @@ out:
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
+err_unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
}
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
@@ -4613,12 +4667,10 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
- MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* XDP SQ */
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
- MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, mlx5e_tx_mpwqe_supported(mdev));
/* set CQE compression */
params->rx_cqe_compress_def = false;
@@ -5062,7 +5114,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
- mlx5_lag_add(mdev, netdev);
+ mlx5_lag_add_netdev(mdev, netdev);
mlx5e_enable_async_events(priv);
mlx5e_enable_blocking_events(priv);
@@ -5110,7 +5162,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
priv->en_trap = NULL;
}
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
}
@@ -5229,6 +5281,11 @@ static void mlx5e_update_features(struct net_device *netdev)
rtnl_unlock();
}
+static void mlx5e_reset_channels(struct net_device *netdev)
+{
+ netdev_reset_tc(netdev);
+}
+
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
@@ -5283,6 +5340,7 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
@@ -5300,6 +5358,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
+ mlx5e_reset_channels(priv->netdev);
cancel_work_sync(&priv->update_stats_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 34eb1118670f..2d2cc5f3b03f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -45,6 +45,7 @@
#include "en_tc.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
+#include "en/rep/bridge.h"
#include "en/devlink.h"
#include "fs_core.h"
#include "lib/mlx5.h"
@@ -536,13 +537,13 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
.ndo_change_carrier = mlx5e_rep_change_carrier,
};
-bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
+bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops &&
mlx5e_is_uplink_rep(netdev_priv(netdev));
}
-bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
+bool mlx5e_eswitch_vf_rep(const struct net_device *netdev)
{
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
@@ -975,12 +976,13 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
if (MLX5_CAP_GEN(mdev, uplink_follow))
mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
- mlx5_lag_add(mdev, netdev);
+ mlx5_lag_add_netdev(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
mlx5e_rep_neigh_init(rpriv);
+ mlx5e_rep_bridge_init(priv);
netdev->wanted_features |= NETIF_F_HW_TC;
@@ -1002,11 +1004,12 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
netif_device_detach(priv->netdev);
rtnl_unlock();
+ mlx5e_rep_bridge_cleanup(priv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);
- mlx5_lag_remove(mdev);
+ mlx5_lag_remove_netdev(mdev, priv->netdev);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 22585015c7a7..47a2dfb7792a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -231,9 +231,9 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
-bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
-bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
-static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
+bool mlx5e_eswitch_vf_rep(const struct net_device *netdev);
+bool mlx5e_eswitch_uplink_rep(const struct net_device *netdev);
+static inline bool mlx5e_eswitch_rep(const struct net_device *netdev)
{
return mlx5e_eswitch_vf_rep(netdev) ||
mlx5e_eswitch_uplink_rep(netdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f90894eea9e0..3c65fd0bcf31 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -579,6 +579,9 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
if (mlx5_wq_cyc_missing(wq) < wqe_bulk)
return false;
+ if (rq->page_pool)
+ page_pool_nid_changed(rq->page_pool, numa_mem_id());
+
do {
u16 head = mlx5_wq_cyc_get_head(wq);
@@ -734,6 +737,9 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (likely(missing < UMR_WQE_BULK))
return false;
+ if (rq->page_pool)
+ page_pool_nid_changed(rq->page_pool, numa_mem_id());
+
head = rq->mpwqe.actual_wq_head;
i = missing;
do {
@@ -1310,7 +1316,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
dev_kfree_skb_any(skb);
goto free_wqe;
}
@@ -1367,7 +1374,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
+ if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
}
@@ -1553,12 +1561,9 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
return 0;
- if (rq->page_pool)
- page_pool_nid_changed(rq->page_pool, numa_mem_id());
-
if (rq->cqd.left) {
work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
- if (rq->cqd.left || work_done >= budget)
+ if (work_done >= budget)
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 47a9c49b25fd..cf4558e12325 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -83,17 +83,17 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 0,
- .mlen = 2,
+ .mlen = 16,
},
[VPORT_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
- .moffset = 2,
- .mlen = 2,
+ .moffset = 16,
+ .mlen = 16,
},
[TUNNEL_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
- .moffset = 1,
- .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8),
+ .moffset = 8,
+ .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
.soffset = MLX5_BYTE_OFF(fte_match_param,
misc_parameters_2.metadata_reg_c_1),
},
@@ -110,7 +110,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[NIC_CHAIN_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
.moffset = 0,
- .mlen = 2,
+ .mlen = 16,
},
[NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
@@ -128,23 +128,46 @@ static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
void
mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
- u32 data,
+ u32 val,
u32 mask)
{
+ void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
- void *headers_c = spec->match_criteria;
- void *headers_v = spec->match_value;
- void *fmask, *fval;
+ u32 max_mask = GENMASK(match_len - 1, 0);
+ __be32 curr_mask_be, curr_val_be;
+ u32 curr_mask, curr_val;
fmask = headers_c + soffset;
fval = headers_v + soffset;
- mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
- data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
+ memcpy(&curr_mask_be, fmask, 4);
+ memcpy(&curr_val_be, fval, 4);
+
+ curr_mask = be32_to_cpu(curr_mask_be);
+ curr_val = be32_to_cpu(curr_val_be);
+
+ //move to correct offset
+ WARN_ON(mask > max_mask);
+ mask <<= moffset;
+ val <<= moffset;
+ max_mask <<= moffset;
+
+ //zero val and mask
+ curr_mask &= ~max_mask;
+ curr_val &= ~max_mask;
+
+ //add current to mask
+ curr_mask |= mask;
+ curr_val |= val;
+
+ //back to be32 and write
+ curr_mask_be = cpu_to_be32(curr_mask);
+ curr_val_be = cpu_to_be32(curr_val);
- memcpy(fmask, &mask, match_len);
- memcpy(fval, &data, match_len);
+ memcpy(fmask, &curr_mask_be, 4);
+ memcpy(fval, &curr_val_be, 4);
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
}
@@ -152,23 +175,28 @@ mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
void
mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
enum mlx5e_tc_attr_to_reg type,
- u32 *data,
+ u32 *val,
u32 *mask)
{
+ void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
+ int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
- void *headers_c = spec->match_criteria;
- void *headers_v = spec->match_value;
- void *fmask, *fval;
+ u32 max_mask = GENMASK(match_len - 1, 0);
+ __be32 curr_mask_be, curr_val_be;
+ u32 curr_mask, curr_val;
fmask = headers_c + soffset;
fval = headers_v + soffset;
- memcpy(mask, fmask, match_len);
- memcpy(data, fval, match_len);
+ memcpy(&curr_mask_be, fmask, 4);
+ memcpy(&curr_val_be, fval, 4);
+
+ curr_mask = be32_to_cpu(curr_mask_be);
+ curr_val = be32_to_cpu(curr_val_be);
- *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
- *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
+ *mask = (curr_mask >> moffset) & max_mask;
+ *val = (curr_val >> moffset) & max_mask;
}
int
@@ -192,13 +220,13 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
(mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
/* Firmware has 5bit length field and 0 means 32bits */
- if (mlen == 4)
+ if (mlen == 32)
mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, mfield);
- MLX5_SET(set_action_in, modact, offset, moffset * 8);
- MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, offset, moffset);
+ MLX5_SET(set_action_in, modact, length, mlen);
MLX5_SET(set_action_in, modact, data, data);
err = mod_hdr_acts->num_actions;
mod_hdr_acts->num_actions++;
@@ -296,13 +324,13 @@ void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
/* Firmware has 5bit length field and 0 means 32bits */
- if (mlen == 4)
+ if (mlen == 32)
mlen = 0;
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field, mfield);
- MLX5_SET(set_action_in, modact, offset, moffset * 8);
- MLX5_SET(set_action_in, modact, length, mlen * 8);
+ MLX5_SET(set_action_in, modact, offset, moffset);
+ MLX5_SET(set_action_in, modact, length, mlen);
MLX5_SET(set_action_in, modact, data, data);
}
@@ -1322,10 +1350,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
bool vf_tun = false, encap_valid = true;
+ struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
@@ -1371,16 +1399,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+ struct net_device *out_dev;
int mirred_ifindex;
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
- out_dev = __dev_get_by_index(dev_net(priv->netdev),
- mirred_ifindex);
+ out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+ if (!out_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+ err = -ENODEV;
+ goto err_out;
+ }
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
+ dev_put(out_dev);
if (err)
goto err_out;
@@ -1393,6 +1427,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
+ if (vf_tun && esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
@@ -2003,11 +2043,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
misc_parameters_3);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ enum fs_flow_table_type fs_type;
u16 addr_type = 0;
u8 ip_proto = 0;
u8 *match_level;
int err;
+ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
match_level = outer_match_level;
if (dissector->used_keys &
@@ -2133,6 +2175,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
if (match.mask->vlan_id ||
match.mask->vlan_priority ||
match.mask->vlan_tpid) {
+ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
+ fs_type)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on CVLAN is not supported");
+ return -EOPNOTSUPP;
+ }
+
if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
MLX5_SET(fte_match_set_misc, misc_c,
outer_second_svlan_tag, 1);
@@ -3526,8 +3575,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
if (err)
return err;
- *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
- dev_get_iflink(vlan_dev));
+ rcu_read_lock();
+ *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
+ rcu_read_unlock();
+ if (!*out_dev)
+ return -ENODEV;
+
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, action);
@@ -5074,13 +5127,13 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
chain = mapped_obj.chain;
- tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ tc_skb_ext = tc_skb_ext_alloc(skb);
if (WARN_ON(!tc_skb_ext))
return false;
tc_skb_ext->chain = chain;
- zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
+ zone_restore_id = (reg_b >> REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
ESW_ZONE_ID_MASK;
if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 25c091795bcd..721093b55acc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -129,7 +129,7 @@ struct tunnel_match_enc_opts {
*/
#define TUNNEL_INFO_BITS 12
#define TUNNEL_INFO_BITS_MASK GENMASK(TUNNEL_INFO_BITS - 1, 0)
-#define ENC_OPTS_BITS 12
+#define ENC_OPTS_BITS 11
#define ENC_OPTS_BITS_MASK GENMASK(ENC_OPTS_BITS - 1, 0)
#define TUNNEL_ID_BITS (TUNNEL_INFO_BITS + ENC_OPTS_BITS)
#define TUNNEL_ID_MASK GENMASK(TUNNEL_ID_BITS - 1, 0)
@@ -198,10 +198,10 @@ enum mlx5e_tc_attr_to_reg {
struct mlx5e_tc_attr_to_reg_mapping {
int mfield; /* rewrite field */
- int moffset; /* offset of mfield */
- int mlen; /* bytes to rewrite/match */
+ int moffset; /* bit offset of mfield */
+ int mlen; /* bits to rewrite/match */
- int soffset; /* offset of spec for match */
+ int soffset; /* byte offset of spec for match */
};
extern struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 8ba62671f5f1..669ff58107e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -706,16 +706,12 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
mlx5e_tx_mpwqe_session_complete(sq);
}
-static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+static void mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
- return false;
-
+ mlx5e_accel_tx_eseg(priv, skb, eseg, ihs);
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
-
- return true;
}
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -744,10 +740,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
- attr.ihs)))
- return NETDEV_TX_OK;
-
+ mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg, attr.ihs);
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
return NETDEV_TX_OK;
}
@@ -762,9 +755,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
- return NETDEV_TX_OK;
-
+ mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs);
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 77c0ca655975..7e5b3826eae5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2013-2021, Mellanox Technologies inc. All rights reserved.
*/
#include <linux/interrupt.h>
@@ -45,6 +18,7 @@
#include "eswitch.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h"
+#include "mlx5_irq.h"
enum {
MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -84,6 +58,9 @@ struct mlx5_eq_table {
struct mutex lock; /* sync async eqs creations */
int num_comp_eqs;
struct mlx5_irq_table *irq_table;
+#ifdef CONFIG_RFS_ACCEL
+ struct cpu_rmap *rmap;
+#endif
};
#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \
@@ -286,7 +263,7 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
u8 log_eq_stride = ilog2(MLX5_EQE_SIZE);
struct mlx5_priv *priv = &dev->priv;
- u8 vecidx = param->irq_index;
+ u16 vecidx = param->irq_index;
__be64 *pas;
void *eqc;
int inlen;
@@ -309,13 +286,20 @@ create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
mlx5_init_fbc(eq->frag_buf.frags, log_eq_stride, log_eq_size, &eq->fbc);
init_eq_buf(eq);
+ eq->irq = mlx5_irq_request(dev, vecidx, param->affinity);
+ if (IS_ERR(eq->irq)) {
+ err = PTR_ERR(eq->irq);
+ goto err_buf;
+ }
+
+ vecidx = mlx5_irq_get_index(eq->irq);
inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->frag_buf.npages;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
- goto err_buf;
+ goto err_irq;
}
pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
@@ -359,6 +343,8 @@ err_eq:
err_in:
kvfree(in);
+err_irq:
+ mlx5_irq_release(eq->irq);
err_buf:
mlx5_frag_buf_free(dev, &eq->frag_buf);
return err;
@@ -377,10 +363,9 @@ err_buf:
int mlx5_eq_enable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
- struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int err;
- err = mlx5_irq_attach_nb(eq_table->irq_table, eq->vecidx, nb);
+ err = mlx5_irq_attach_nb(eq->irq, nb);
if (!err)
eq_update_ci(eq, 1);
@@ -399,9 +384,7 @@ EXPORT_SYMBOL(mlx5_eq_enable);
void mlx5_eq_disable(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
struct notifier_block *nb)
{
- struct mlx5_eq_table *eq_table = dev->priv.eq_table;
-
- mlx5_irq_detach_nb(eq_table->irq_table, eq->vecidx, nb);
+ mlx5_irq_detach_nb(eq->irq, nb);
}
EXPORT_SYMBOL(mlx5_eq_disable);
@@ -415,10 +398,9 @@ static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(eq->irqn);
+ mlx5_irq_release(eq->irq);
mlx5_frag_buf_free(dev, &eq->frag_buf);
-
return err;
}
@@ -490,14 +472,7 @@ static int create_async_eq(struct mlx5_core_dev *dev,
int err;
mutex_lock(&eq_table->lock);
- /* Async EQs must share irq index 0 */
- if (param->irq_index != 0) {
- err = -EINVAL;
- goto unlock;
- }
-
err = create_map_eq(dev, eq, param);
-unlock:
mutex_unlock(&eq_table->lock);
return err;
}
@@ -616,8 +591,11 @@ setup_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq_async *eq,
eq->irq_nb.notifier_call = mlx5_eq_async_int;
spin_lock_init(&eq->lock);
+ if (!zalloc_cpumask_var(&param->affinity, GFP_KERNEL))
+ return -ENOMEM;
err = create_async_eq(dev, &eq->core, param);
+ free_cpumask_var(param->affinity);
if (err) {
mlx5_core_warn(dev, "failed to create %s EQ %d\n", name, err);
return err;
@@ -652,7 +630,6 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
- .irq_index = 0,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -665,7 +642,6 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
- .irq_index = 0,
.nent = MLX5_NUM_ASYNC_EQE,
};
@@ -675,7 +651,6 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
- .irq_index = 0,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -735,6 +710,9 @@ mlx5_eq_create_generic(struct mlx5_core_dev *dev,
struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL);
int err;
+ if (!param->affinity)
+ return ERR_PTR(-EINVAL);
+
if (!eq)
return ERR_PTR(-ENOMEM);
@@ -845,16 +823,21 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
.irq_index = vecidx,
.nent = nent,
};
- err = create_map_eq(dev, &eq->core, &param);
- if (err) {
- kfree(eq);
- goto clean;
+
+ if (!zalloc_cpumask_var(&param.affinity, GFP_KERNEL)) {
+ err = -ENOMEM;
+ goto clean_eq;
}
+ cpumask_set_cpu(cpumask_local_spread(i, dev->priv.numa_node),
+ param.affinity);
+ err = create_map_eq(dev, &eq->core, &param);
+ free_cpumask_var(param.affinity);
+ if (err)
+ goto clean_eq;
err = mlx5_eq_enable(dev, &eq->core, &eq->irq_nb);
if (err) {
destroy_unmap_eq(dev, &eq->core);
- kfree(eq);
- goto clean;
+ goto clean_eq;
}
mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn);
@@ -863,7 +846,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
}
return 0;
-
+clean_eq:
+ kfree(eq);
clean:
destroy_comp_eqs(dev);
return err;
@@ -899,17 +883,23 @@ EXPORT_SYMBOL(mlx5_comp_vectors_count);
struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
- int vecidx = vector + MLX5_IRQ_VEC_COMP_BASE;
+ struct mlx5_eq_table *table = dev->priv.eq_table;
+ struct mlx5_eq_comp *eq, *n;
+ int i = 0;
- return mlx5_irq_get_affinity_mask(dev->priv.eq_table->irq_table,
- vecidx);
+ list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ if (i++ == vector)
+ break;
+ }
+
+ return mlx5_irq_get_affinity_mask(eq->core.irq);
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
- return mlx5_irq_get_rmap(dev->priv.eq_table->irq_table);
+ return dev->priv.eq_table->rmap;
}
#endif
@@ -926,12 +916,57 @@ struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
return ERR_PTR(-ENOENT);
}
+static void clear_rmap(struct mlx5_core_dev *dev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_eq_table *eq_table = dev->priv.eq_table;
+
+ free_irq_cpu_rmap(eq_table->rmap);
+#endif
+}
+
+static int set_rmap(struct mlx5_core_dev *mdev)
+{
+ int err = 0;
+#ifdef CONFIG_RFS_ACCEL
+ struct mlx5_eq_table *eq_table = mdev->priv.eq_table;
+ int vecidx;
+
+ eq_table->rmap = alloc_irq_cpu_rmap(eq_table->num_comp_eqs);
+ if (!eq_table->rmap) {
+ err = -ENOMEM;
+ mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
+ goto err_out;
+ }
+
+ vecidx = MLX5_IRQ_VEC_COMP_BASE;
+ for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
+ vecidx++) {
+ err = irq_cpu_rmap_add(eq_table->rmap,
+ pci_irq_vector(mdev->pdev, vecidx));
+ if (err) {
+ mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
+ err);
+ goto err_irq_cpu_rmap_add;
+ }
+ }
+ return 0;
+
+err_irq_cpu_rmap_add:
+ clear_rmap(mdev);
+err_out:
+#endif
+ return err;
+}
+
/* This function should only be called after mlx5_cmd_force_teardown_hca */
void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
mlx5_irq_table_destroy(dev);
mutex_unlock(&table->lock);
}
@@ -948,12 +983,19 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int max_eqs_sf;
int err;
eq_table->num_comp_eqs =
min_t(int,
- mlx5_irq_get_num_comp(eq_table->irq_table),
+ mlx5_irq_table_get_num_comp(eq_table->irq_table),
num_eqs - MLX5_MAX_ASYNC_EQS);
+ if (mlx5_core_is_sf(dev)) {
+ max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
+ mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
+ eq_table->num_comp_eqs = min_t(int, eq_table->num_comp_eqs,
+ max_eqs_sf);
+ }
err = create_async_eqs(dev);
if (err) {
@@ -961,6 +1003,18 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
goto err_async_eqs;
}
+ if (!mlx5_core_is_sf(dev)) {
+ /* rmap is a mapping between irq number and queue number.
+ * each irq can be assign only to a single rmap.
+ * since SFs share IRQs, rmap mapping cannot function correctly
+ * for irqs that are shared for different core/netdev RX rings.
+ * Hence we don't allow netdev rmap for SFs
+ */
+ err = set_rmap(dev);
+ if (err)
+ goto err_rmap;
+ }
+
err = create_comp_eqs(dev);
if (err) {
mlx5_core_err(dev, "Failed to create completion EQs\n");
@@ -969,6 +1023,9 @@ int mlx5_eq_table_create(struct mlx5_core_dev *dev)
return 0;
err_comp_eqs:
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
+err_rmap:
destroy_async_eqs(dev);
err_async_eqs:
return err;
@@ -976,6 +1033,8 @@ err_async_eqs:
void mlx5_eq_table_destroy(struct mlx5_core_dev *dev)
{
+ if (!mlx5_core_is_sf(dev))
+ clear_rmap(dev);
destroy_comp_eqs(dev);
destroy_async_eqs(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
new file mode 100644
index 000000000000..a6e1d4f78268
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -0,0 +1,1299 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+#include "bridge.h"
+#include "eswitch.h"
+#include "bridge_priv.h"
+#define CREATE_TRACE_POINTS
+#include "diag/bridge_tracepoint.h"
+
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM 0
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 4 - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE / 2 - 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE - 1)
+
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE 64000
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM 0
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+
+#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
+
+enum {
+ MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
+ MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
+ MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
+};
+
+static const struct rhashtable_params fdb_ht_params = {
+ .key_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, key),
+ .key_len = sizeof(struct mlx5_esw_bridge_fdb_key),
+ .head_offset = offsetof(struct mlx5_esw_bridge_fdb_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+enum {
+ MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG = BIT(0),
+};
+
+struct mlx5_esw_bridge {
+ int ifindex;
+ int refcnt;
+ struct list_head list;
+ struct mlx5_esw_bridge_offloads *br_offloads;
+
+ struct list_head fdb_list;
+ struct rhashtable fdb_ht;
+ struct xarray vports;
+
+ struct mlx5_flow_table *egress_ft;
+ struct mlx5_flow_group *egress_vlan_fg;
+ struct mlx5_flow_group *egress_mac_fg;
+ unsigned long ageing_time;
+ u32 flags;
+};
+
+static void
+mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char *addr, u16 vid,
+ unsigned long val)
+{
+ struct switchdev_notifier_fdb_info send_info;
+
+ send_info.addr = addr;
+ send_info.vid = vid;
+ send_info.offloaded = true;
+ call_switchdev_notifiers(val, dev, &send_info.info, NULL);
+}
+
+static struct mlx5_flow_table *
+mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *fdb;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!ns) {
+ esw_warn(dev, "Failed to get FDB namespace\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ ft_attr.flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ ft_attr.max_fte = max_fte;
+ ft_attr.level = level;
+ ft_attr.prio = FDB_BR_OFFLOAD;
+ fdb = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(fdb))
+ esw_warn(dev, "Failed to create bridge FDB Table (err=%ld)\n", PTR_ERR(fdb));
+
+ return fdb;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_VLAN_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ kvfree(in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create VLAN flow group for bridge ingress table (err=%ld)\n",
+ PTR_ERR(fg));
+
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_filter_fg_create(struct mlx5_eswitch *esw,
+ struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_FILTER_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge ingress table VLAN filter flow group (err=%ld)\n",
+ PTR_ERR(fg));
+
+ kvfree(in);
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_ingress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *ingress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.smac_15_0);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_INGRESS_TABLE_MAC_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(ingress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create MAC flow group for bridge ingress table (err=%ld)\n",
+ PTR_ERR(fg));
+
+ kvfree(in);
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create VLAN flow group for bridge egress table (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.dmac_15_0);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge egress table MAC flow group (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
+static int
+mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
+ struct mlx5_flow_table *ingress_ft, *skip_ft;
+ int err;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
+ return -EOPNOTSUPP;
+
+ ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
+ MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
+ br_offloads->esw);
+ if (IS_ERR(ingress_ft))
+ return PTR_ERR(ingress_ft);
+
+ skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
+ MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
+ br_offloads->esw);
+ if (IS_ERR(skip_ft)) {
+ err = PTR_ERR(skip_ft);
+ goto err_skip_tbl;
+ }
+
+ vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
+ if (IS_ERR(vlan_fg)) {
+ err = PTR_ERR(vlan_fg);
+ goto err_vlan_fg;
+ }
+
+ filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
+ if (IS_ERR(filter_fg)) {
+ err = PTR_ERR(filter_fg);
+ goto err_filter_fg;
+ }
+
+ mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
+ if (IS_ERR(mac_fg)) {
+ err = PTR_ERR(mac_fg);
+ goto err_mac_fg;
+ }
+
+ br_offloads->ingress_ft = ingress_ft;
+ br_offloads->skip_ft = skip_ft;
+ br_offloads->ingress_vlan_fg = vlan_fg;
+ br_offloads->ingress_filter_fg = filter_fg;
+ br_offloads->ingress_mac_fg = mac_fg;
+ return 0;
+
+err_mac_fg:
+ mlx5_destroy_flow_group(filter_fg);
+err_filter_fg:
+ mlx5_destroy_flow_group(vlan_fg);
+err_vlan_fg:
+ mlx5_destroy_flow_table(skip_ft);
+err_skip_tbl:
+ mlx5_destroy_flow_table(ingress_ft);
+ return err;
+}
+
+static void
+mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ mlx5_destroy_flow_group(br_offloads->ingress_mac_fg);
+ br_offloads->ingress_mac_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_filter_fg);
+ br_offloads->ingress_filter_fg = NULL;
+ mlx5_destroy_flow_group(br_offloads->ingress_vlan_fg);
+ br_offloads->ingress_vlan_fg = NULL;
+ mlx5_destroy_flow_table(br_offloads->skip_ft);
+ br_offloads->skip_ft = NULL;
+ mlx5_destroy_flow_table(br_offloads->ingress_ft);
+ br_offloads->ingress_ft = NULL;
+}
+
+static int
+mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_flow_group *mac_fg, *vlan_fg;
+ struct mlx5_flow_table *egress_ft;
+ int err;
+
+ egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
+ MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
+ br_offloads->esw);
+ if (IS_ERR(egress_ft))
+ return PTR_ERR(egress_ft);
+
+ vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
+ if (IS_ERR(vlan_fg)) {
+ err = PTR_ERR(vlan_fg);
+ goto err_vlan_fg;
+ }
+
+ mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
+ if (IS_ERR(mac_fg)) {
+ err = PTR_ERR(mac_fg);
+ goto err_mac_fg;
+ }
+
+ bridge->egress_ft = egress_ft;
+ bridge->egress_vlan_fg = vlan_fg;
+ bridge->egress_mac_fg = mac_fg;
+ return 0;
+
+err_mac_fg:
+ mlx5_destroy_flow_group(vlan_fg);
+err_vlan_fg:
+ mlx5_destroy_flow_table(egress_ft);
+ return err;
+}
+
+static void
+mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
+{
+ mlx5_destroy_flow_group(bridge->egress_mac_fg);
+ mlx5_destroy_flow_group(bridge->egress_vlan_fg);
+ mlx5_destroy_flow_table(bridge->egress_ft);
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_destination dests[2] = {};
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ u8 *smac_v, *smac_c;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
+
+ smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, addr);
+ smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.smac_47_16);
+ eth_broadcast_addr(smac_c);
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
+
+ if (vlan && vlan->pkt_reformat_push) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.pkt_reformat = vlan->pkt_reformat_push;
+ } else if (vlan) {
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
+ vlan->vid);
+ }
+
+ dests[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dests[0].ft = bridge->egress_ft;
+ dests[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dests[1].counter_id = counter_id;
+
+ handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, dests,
+ ARRAY_SIZE(dests));
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = br_offloads->skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ u8 *smac_v, *smac_c;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2;
+
+ smac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, addr);
+ smac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.smac_47_16);
+ eth_broadcast_addr(smac_c);
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num));
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+
+ handle = mlx5_add_flow_rules(br_offloads->ingress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ .vport.num = vport_num,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ .flags = FLOW_ACT_NO_APPEND,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+ u8 *dmac_v, *dmac_c;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+ dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
+ outer_headers.dmac_47_16);
+ ether_addr_copy(dmac_v, addr);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
+ outer_headers.dmac_47_16);
+ eth_broadcast_addr(dmac_c);
+
+ if (vlan) {
+ if (vlan->pkt_reformat_pop) {
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.pkt_reformat = vlan->pkt_reformat_pop;
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
+ outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
+ vlan->vid);
+ }
+
+ handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
+static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
+ struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge *bridge;
+ int err;
+
+ bridge = kvzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ bridge->br_offloads = br_offloads;
+ err = mlx5_esw_bridge_egress_table_init(br_offloads, bridge);
+ if (err)
+ goto err_egress_tbl;
+
+ err = rhashtable_init(&bridge->fdb_ht, &fdb_ht_params);
+ if (err)
+ goto err_fdb_ht;
+
+ INIT_LIST_HEAD(&bridge->fdb_list);
+ xa_init(&bridge->vports);
+ bridge->ifindex = ifindex;
+ bridge->refcnt = 1;
+ bridge->ageing_time = BR_DEFAULT_AGEING_TIME;
+ list_add(&bridge->list, &br_offloads->bridges);
+
+ return bridge;
+
+err_fdb_ht:
+ mlx5_esw_bridge_egress_table_cleanup(bridge);
+err_egress_tbl:
+ kvfree(bridge);
+ return ERR_PTR(err);
+}
+
+static void mlx5_esw_bridge_get(struct mlx5_esw_bridge *bridge)
+{
+ bridge->refcnt++;
+}
+
+static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_esw_bridge *bridge)
+{
+ if (--bridge->refcnt)
+ return;
+
+ mlx5_esw_bridge_egress_table_cleanup(bridge);
+ WARN_ON(!xa_empty(&bridge->vports));
+ list_del(&bridge->list);
+ rhashtable_destroy(&bridge->fdb_ht);
+ kvfree(bridge);
+
+ if (list_empty(&br_offloads->bridges))
+ mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
+}
+
+static struct mlx5_esw_bridge *
+mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge *bridge;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry(bridge, &br_offloads->bridges, list) {
+ if (bridge->ifindex == ifindex) {
+ mlx5_esw_bridge_get(bridge);
+ return bridge;
+ }
+ }
+
+ if (!br_offloads->ingress_ft) {
+ int err = mlx5_esw_bridge_ingress_table_init(br_offloads);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
+ if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
+ mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
+ return bridge;
+}
+
+static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL);
+}
+
+static struct mlx5_esw_bridge_port *
+mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge)
+{
+ return xa_load(&bridge->vports, vport_num);
+}
+
+static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ xa_erase(&bridge->vports, port->vport_num);
+}
+
+static void mlx5_esw_bridge_fdb_entry_refresh(unsigned long lastuse,
+ struct mlx5_esw_bridge_fdb_entry *entry)
+{
+ trace_mlx5_esw_bridge_fdb_entry_refresh(entry);
+
+ entry->lastuse = lastuse;
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_ADD_TO_BRIDGE);
+}
+
+static void
+mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
+ struct mlx5_esw_bridge *bridge)
+{
+ trace_mlx5_esw_bridge_fdb_entry_cleanup(entry);
+
+ rhashtable_remove_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
+ mlx5_del_flow_rules(entry->egress_handle);
+ if (entry->filter_handle)
+ mlx5_del_flow_rules(entry->filter_handle);
+ mlx5_del_flow_rules(entry->ingress_handle);
+ mlx5_fc_destroy(bridge->br_offloads->esw->dev, entry->ingress_counter);
+ list_del(&entry->vlan_list);
+ list_del(&entry->list);
+ kvfree(entry);
+}
+
+static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
+ if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ }
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_lookup(u16 vid, struct mlx5_esw_bridge_port *port)
+{
+ return xa_load(&port->vlans, vid);
+}
+
+static int
+mlx5_esw_bridge_vlan_push_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } vlan_hdr = { htons(ETH_P_8021Q), htons(vlan->vid) };
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_pkt_reformat *pkt_reformat;
+
+ if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_insert)) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_size) < sizeof(vlan_hdr) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_insert_offset) <
+ offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ esw_warn(esw->dev, "Packet reformat INSERT_HEADER is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_INSERT_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(vlan_hdr);
+ reformat_params.data = &vlan_hdr;
+ pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(pkt_reformat)) {
+ esw_warn(esw->dev, "Failed to alloc packet reformat INSERT_HEADER (err=%ld)\n",
+ PTR_ERR(pkt_reformat));
+ return PTR_ERR(pkt_reformat);
+ }
+
+ vlan->pkt_reformat_push = pkt_reformat;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_push);
+ vlan->pkt_reformat_push = NULL;
+}
+
+static int
+mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_pkt_reformat *pkt_reformat;
+
+ if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
+ offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(struct vlan_hdr);
+ pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
+ &reformat_params,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(pkt_reformat)) {
+ esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
+ PTR_ERR(pkt_reformat));
+ return PTR_ERR(pkt_reformat);
+ }
+
+ vlan->pkt_reformat_pop = pkt_reformat;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_packet_reformat_dealloc(esw->dev, vlan->pkt_reformat_pop);
+ vlan->pkt_reformat_pop = NULL;
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ int err;
+
+ vlan = kvzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ INIT_LIST_HEAD(&vlan->fdb_list);
+
+ if (flags & BRIDGE_VLAN_INFO_PVID) {
+ err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
+ if (err)
+ goto err_vlan_push;
+ }
+ if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
+ err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
+ if (err)
+ goto err_vlan_pop;
+ }
+
+ err = xa_insert(&port->vlans, vid, vlan, GFP_KERNEL);
+ if (err)
+ goto err_xa_insert;
+
+ trace_mlx5_esw_bridge_vlan_create(vlan);
+ return vlan;
+
+err_xa_insert:
+ if (vlan->pkt_reformat_pop)
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
+err_vlan_pop:
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
+err_vlan_push:
+ kvfree(vlan);
+ return ERR_PTR(err);
+}
+
+static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan)
+{
+ xa_erase(&port->vlans, vlan->vid);
+}
+
+static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
+ if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ }
+
+ if (vlan->pkt_reformat_pop)
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
+ if (vlan->pkt_reformat_push)
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
+}
+
+static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge_vlan *vlan,
+ struct mlx5_esw_bridge *bridge)
+{
+ trace_mlx5_esw_bridge_vlan_cleanup(vlan);
+ mlx5_esw_bridge_vlan_flush(vlan, bridge);
+ mlx5_esw_bridge_vlan_erase(port, vlan);
+ kvfree(vlan);
+}
+
+static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_vlan *vlan;
+ unsigned long index;
+
+ xa_for_each(&port->vlans, index, vlan)
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, bridge);
+}
+
+static struct mlx5_esw_bridge_vlan *
+mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge,
+ struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge_vlan *vlan;
+
+ port = mlx5_esw_bridge_port_lookup(vport_num, bridge);
+ if (!port) {
+ /* FDB is added asynchronously on wq while port might have been deleted
+ * concurrently. Report on 'info' logging level and skip the FDB offload.
+ */
+ esw_info(esw->dev, "Failed to lookup bridge port (vport=%u)\n", vport_num);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
+ if (!vlan) {
+ /* FDB is added asynchronously on wq while vlan might have been deleted
+ * concurrently. Report on 'info' logging level and skip the FDB offload.
+ */
+ esw_info(esw->dev, "Failed to lookup bridge port vlan metadata (vport=%u)\n",
+ vport_num);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vlan;
+}
+
+static struct mlx5_esw_bridge_fdb_entry *
+mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr,
+ u16 vid, bool added_by_user, struct mlx5_eswitch *esw,
+ struct mlx5_esw_bridge *bridge)
+{
+ struct mlx5_esw_bridge_vlan *vlan = NULL;
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ struct mlx5_flow_handle *handle;
+ struct mlx5_fc *counter;
+ struct mlx5e_priv *priv;
+ int err;
+
+ if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) {
+ vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw);
+ if (IS_ERR(vlan))
+ return ERR_CAST(vlan);
+ }
+
+ priv = netdev_priv(dev);
+ entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ ether_addr_copy(entry->key.addr, addr);
+ entry->key.vid = vid;
+ entry->dev = dev;
+ entry->vport_num = vport_num;
+ entry->lastuse = jiffies;
+ if (added_by_user)
+ entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER;
+
+ counter = mlx5_fc_create(priv->mdev, true);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_ingress_fc_create;
+ }
+ entry->ingress_counter = counter;
+
+ handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter),
+ bridge);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
+ vport_num, err);
+ goto err_ingress_flow_create;
+ }
+ entry->ingress_handle = handle;
+
+ if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG) {
+ handle = mlx5_esw_bridge_ingress_filter_flow_create(vport_num, addr, bridge);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ esw_warn(esw->dev, "Failed to create ingress filter(vport=%u,err=%d)\n",
+ vport_num, err);
+ goto err_ingress_filter_flow_create;
+ }
+ entry->filter_handle = handle;
+ }
+
+ handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge);
+ if (IS_ERR(handle)) {
+ err = PTR_ERR(handle);
+ esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n",
+ vport_num, err);
+ goto err_egress_flow_create;
+ }
+ entry->egress_handle = handle;
+
+ err = rhashtable_insert_fast(&bridge->fdb_ht, &entry->ht_node, fdb_ht_params);
+ if (err) {
+ esw_warn(esw->dev, "Failed to insert FDB flow(vport=%u,err=%d)\n", vport_num, err);
+ goto err_ht_init;
+ }
+
+ if (vlan)
+ list_add(&entry->vlan_list, &vlan->fdb_list);
+ else
+ INIT_LIST_HEAD(&entry->vlan_list);
+ list_add(&entry->list, &bridge->fdb_list);
+
+ trace_mlx5_esw_bridge_fdb_entry_init(entry);
+ return entry;
+
+err_ht_init:
+ mlx5_del_flow_rules(entry->egress_handle);
+err_egress_flow_create:
+ if (entry->filter_handle)
+ mlx5_del_flow_rules(entry->filter_handle);
+err_ingress_filter_flow_create:
+ mlx5_del_flow_rules(entry->ingress_handle);
+err_ingress_flow_create:
+ mlx5_fc_destroy(priv->mdev, entry->ingress_counter);
+err_ingress_fc_create:
+ kvfree(entry);
+ return ERR_PTR(err);
+}
+
+int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!vport->bridge)
+ return -EINVAL;
+
+ vport->bridge->ageing_time = ageing_time;
+ return 0;
+}
+
+int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_esw_bridge *bridge;
+ bool filtering;
+
+ if (!vport->bridge)
+ return -EINVAL;
+
+ bridge = vport->bridge;
+ filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
+ if (filtering == enable)
+ return 0;
+
+ mlx5_esw_bridge_fdb_flush(bridge);
+ if (enable)
+ bridge->flags |= MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
+ else
+ bridge->flags &= ~MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG;
+
+ return 0;
+}
+
+static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_esw_bridge *bridge,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ struct mlx5_esw_bridge_port *port;
+ int err;
+
+ port = kvzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port) {
+ err = -ENOMEM;
+ goto err_port_alloc;
+ }
+
+ port->vport_num = vport->vport;
+ xa_init(&port->vlans);
+ err = mlx5_esw_bridge_port_insert(port, bridge);
+ if (err) {
+ esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n",
+ vport->vport, err);
+ goto err_port_insert;
+ }
+ trace_mlx5_esw_bridge_vport_init(port);
+
+ vport->bridge = bridge;
+ return 0;
+
+err_port_insert:
+ kvfree(port);
+err_port_alloc:
+ mlx5_esw_bridge_put(br_offloads, bridge);
+ return err;
+}
+
+static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+ struct mlx5_esw_bridge_port *port;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ if (entry->vport_num == vport->vport)
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+
+ port = mlx5_esw_bridge_port_lookup(vport->vport, bridge);
+ if (!port) {
+ WARN(1, "Vport %u metadata not found on bridge", vport->vport);
+ return -EINVAL;
+ }
+
+ trace_mlx5_esw_bridge_vport_cleanup(port);
+ mlx5_esw_bridge_port_vlans_flush(port, bridge);
+ mlx5_esw_bridge_port_erase(port, bridge);
+ kvfree(port);
+ mlx5_esw_bridge_put(br_offloads, bridge);
+ vport->bridge = NULL;
+ return 0;
+}
+
+int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge *bridge;
+ int err;
+
+ WARN_ON(vport->bridge);
+
+ bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
+ if (IS_ERR(bridge)) {
+ NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
+ return PTR_ERR(bridge);
+ }
+
+ err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Error initializing port");
+ return err;
+}
+
+int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ int err;
+
+ if (!bridge) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
+ return -EINVAL;
+ }
+ if (bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
+ return -EINVAL;
+ }
+
+ err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed");
+ return err;
+}
+
+int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge_vlan *vlan;
+
+ port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
+ if (!port)
+ return -EINVAL;
+
+ vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
+ if (vlan) {
+ if (vlan->flags == flags)
+ return 0;
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
+ }
+
+ vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, esw);
+ if (IS_ERR(vlan)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry");
+ return PTR_ERR(vlan);
+ }
+ return 0;
+}
+
+void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ struct mlx5_esw_bridge_port *port;
+ struct mlx5_esw_bridge_vlan *vlan;
+
+ port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge);
+ if (!port)
+ return;
+
+ vlan = mlx5_esw_bridge_vlan_lookup(vid, port);
+ if (!vlan)
+ return;
+ mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge);
+}
+
+void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ u16 vport_num = vport->vport;
+
+ if (!bridge) {
+ esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
+ return;
+ }
+
+ entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid,
+ fdb_info->added_by_user, esw, bridge);
+ if (IS_ERR(entry))
+ return;
+
+ if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
+ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
+ SWITCHDEV_FDB_OFFLOADED);
+ else
+ /* Take over dynamic entries to prevent kernel bridge from aging them out. */
+ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
+ SWITCHDEV_FDB_ADD_TO_BRIDGE);
+}
+
+void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info)
+{
+ struct mlx5_esw_bridge *bridge = vport->bridge;
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ struct mlx5_esw_bridge_fdb_key key;
+ u16 vport_num = vport->vport;
+
+ if (!bridge) {
+ esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num);
+ return;
+ }
+
+ ether_addr_copy(key.addr, fdb_info->addr);
+ key.vid = fdb_info->vid;
+ entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+ if (!entry) {
+ esw_warn(esw->dev,
+ "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+ key.addr, key.vid, vport_num);
+ return;
+ }
+
+ if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER))
+ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+}
+
+void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
+ struct mlx5_esw_bridge *bridge;
+
+ list_for_each_entry(bridge, &br_offloads->bridges, list) {
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
+ unsigned long lastuse =
+ (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
+
+ if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
+ continue;
+
+ if (time_after(lastuse, entry->lastuse)) {
+ mlx5_esw_bridge_fdb_entry_refresh(lastuse, entry);
+ } else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
+ mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr,
+ entry->key.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ }
+ }
+ }
+}
+
+static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ struct mlx5_eswitch *esw = br_offloads->esw;
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_vport(esw, i, vport)
+ if (vport->bridge)
+ mlx5_esw_bridge_vport_cleanup(br_offloads, vport);
+
+ WARN_ONCE(!list_empty(&br_offloads->bridges),
+ "Cleaning up bridge offloads while still having bridges attached\n");
+}
+
+struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads;
+
+ br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
+ if (!br_offloads)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&br_offloads->bridges);
+ br_offloads->esw = esw;
+ esw->br_offloads = br_offloads;
+
+ return br_offloads;
+}
+
+void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
+
+ if (!br_offloads)
+ return;
+
+ mlx5_esw_bridge_flush(br_offloads);
+
+ esw->br_offloads = NULL;
+ kvfree(br_offloads);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
new file mode 100644
index 000000000000..d826942b27fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_ESW_BRIDGE_H__
+#define __MLX5_ESW_BRIDGE_H__
+
+#include <linux/notifier.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include "eswitch.h"
+
+struct mlx5_flow_table;
+struct mlx5_flow_group;
+
+struct mlx5_esw_bridge_offloads {
+ struct mlx5_eswitch *esw;
+ struct list_head bridges;
+ struct notifier_block netdev_nb;
+ struct notifier_block nb_blk;
+ struct notifier_block nb;
+ struct workqueue_struct *wq;
+ struct delayed_work update_work;
+
+ struct mlx5_flow_table *ingress_ft;
+ struct mlx5_flow_group *ingress_vlan_fg;
+ struct mlx5_flow_group *ingress_filter_fg;
+ struct mlx5_flow_group *ingress_mac_fg;
+
+ struct mlx5_flow_table *skip_ft;
+};
+
+struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw);
+void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw);
+int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack);
+int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack);
+void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport,
+ struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads);
+int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport);
+int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport, struct netlink_ext_ack *extack);
+void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
+#endif /* __MLX5_ESW_BRIDGE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
new file mode 100644
index 000000000000..d9ab2e8bc2cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef _MLX5_ESW_BRIDGE_PRIVATE_
+#define _MLX5_ESW_BRIDGE_PRIVATE_
+
+#include <linux/netdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/if_ether.h>
+#include <linux/rhashtable.h>
+#include <linux/xarray.h>
+#include "fs_core.h"
+
+struct mlx5_esw_bridge_fdb_key {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+enum {
+ MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
+};
+
+struct mlx5_esw_bridge_fdb_entry {
+ struct mlx5_esw_bridge_fdb_key key;
+ struct rhash_head ht_node;
+ struct net_device *dev;
+ struct list_head list;
+ struct list_head vlan_list;
+ u16 vport_num;
+ u16 flags;
+
+ struct mlx5_flow_handle *ingress_handle;
+ struct mlx5_fc *ingress_counter;
+ unsigned long lastuse;
+ struct mlx5_flow_handle *egress_handle;
+ struct mlx5_flow_handle *filter_handle;
+};
+
+struct mlx5_esw_bridge_vlan {
+ u16 vid;
+ u16 flags;
+ struct list_head fdb_list;
+ struct mlx5_pkt_reformat *pkt_reformat_push;
+ struct mlx5_pkt_reformat *pkt_reformat_pop;
+};
+
+struct mlx5_esw_bridge_port {
+ u16 vport_num;
+ struct xarray vlans;
+};
+
+#endif /* _MLX5_ESW_BRIDGE_PRIVATE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
new file mode 100644
index 000000000000..227964b7d3b9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_ESW_BRIDGE_TRACEPOINT_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_ESW_BRIDGE_TRACEPOINT_
+
+#include <linux/tracepoint.h>
+#include "../bridge_priv.h"
+
+DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb),
+ TP_STRUCT__entry(
+ __array(char, dev_name, IFNAMSIZ)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(u16, flags)
+ __field(unsigned int, used)
+ ),
+ TP_fast_assign(
+ strncpy(__entry->dev_name,
+ netdev_name(fdb->dev),
+ IFNAMSIZ);
+ memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
+ __entry->vid = fdb->key.vid;
+ __entry->flags = fdb->flags;
+ __entry->used = jiffies_to_msecs(jiffies - fdb->lastuse)
+ ),
+ TP_printk("net_device=%s addr=%pM vid=%hu flags=%hx used=%u",
+ __entry->dev_name,
+ __entry->addr,
+ __entry->vid,
+ __entry->flags,
+ __entry->used / 1000)
+ );
+
+DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
+ mlx5_esw_bridge_fdb_entry_init,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
+ mlx5_esw_bridge_fdb_entry_refresh,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_fdb_template,
+ mlx5_esw_bridge_fdb_entry_cleanup,
+ TP_PROTO(const struct mlx5_esw_bridge_fdb_entry *fdb),
+ TP_ARGS(fdb)
+ );
+
+DECLARE_EVENT_CLASS(mlx5_esw_bridge_vlan_template,
+ TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
+ TP_ARGS(vlan),
+ TP_STRUCT__entry(
+ __field(u16, vid)
+ __field(u16, flags)
+ ),
+ TP_fast_assign(
+ __entry->vid = vlan->vid;
+ __entry->flags = vlan->flags;
+ ),
+ TP_printk("vid=%hu flags=%hx",
+ __entry->vid,
+ __entry->flags)
+ );
+
+DEFINE_EVENT(mlx5_esw_bridge_vlan_template,
+ mlx5_esw_bridge_vlan_create,
+ TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
+ TP_ARGS(vlan)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_vlan_template,
+ mlx5_esw_bridge_vlan_cleanup,
+ TP_PROTO(const struct mlx5_esw_bridge_vlan *vlan),
+ TP_ARGS(vlan)
+ );
+
+DECLARE_EVENT_CLASS(mlx5_esw_bridge_port_template,
+ TP_PROTO(const struct mlx5_esw_bridge_port *port),
+ TP_ARGS(port),
+ TP_STRUCT__entry(
+ __field(u16, vport_num)
+ ),
+ TP_fast_assign(
+ __entry->vport_num = port->vport_num;
+ ),
+ TP_printk("vport_num=%hu", __entry->vport_num)
+ );
+
+DEFINE_EVENT(mlx5_esw_bridge_port_template,
+ mlx5_esw_bridge_vport_init,
+ TP_PROTO(const struct mlx5_esw_bridge_port *port),
+ TP_ARGS(port)
+ );
+DEFINE_EVENT(mlx5_esw_bridge_port_template,
+ mlx5_esw_bridge_vport_cleanup,
+ TP_PROTO(const struct mlx5_esw_bridge_port *port),
+ TP_ARGS(port)
+ );
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH esw/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE bridge_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 570f2280823c..b88705a3a1a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/mpfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "mlx5_core.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 64ccb2bc0b58..48cac5bf606d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -150,6 +150,8 @@ enum mlx5_eswitch_vport_event {
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
+struct mlx5_esw_bridge;
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
@@ -178,6 +180,7 @@ struct mlx5_vport {
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct devlink_port *dl_port;
+ struct mlx5_esw_bridge *bridge;
};
struct mlx5_esw_indir_table;
@@ -196,6 +199,7 @@ struct mlx5_eswitch_fdb {
struct offloads_fdb {
struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *tc_miss_table;
struct mlx5_flow_table *slow_fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *send_to_vport_meta_grp;
@@ -270,6 +274,8 @@ enum {
MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED = BIT(1),
};
+struct mlx5_esw_bridge_offloads;
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -299,6 +305,7 @@ struct mlx5_eswitch {
u32 root_tsar_id;
} qos;
+ struct mlx5_esw_bridge_offloads *br_offloads;
struct mlx5_esw_offload offloads;
int mode;
u16 manager_vport;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index db1e74280e57..7579f3402776 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -219,7 +219,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
struct mlx5_fs_chains *chains,
int i)
{
- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
}
@@ -1633,7 +1634,21 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- err = esw_chains_create(esw, fdb);
+ /* Create empty TC-miss managed table. This allows plugging in following
+ * priorities without directly exposing their level 0 table to
+ * eswitch_offloads and passing it as miss_fdb to following call to
+ * esw_chains_create().
+ */
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ ft_attr.prio = FDB_TC_MISS;
+ esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
+ if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
+ err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
+ esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
+ goto tc_miss_table_err;
+ }
+
+ err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
if (err) {
esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err;
@@ -1778,6 +1793,8 @@ send_vport_meta_err:
send_vport_err:
esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err:
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
+tc_miss_table_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
@@ -1805,6 +1822,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_chains_destroy(esw, esw_chains(esw));
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index a81ece94f599..b45954905845 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -65,7 +65,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
- int err;
+ int err, err2;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
@@ -76,33 +76,34 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
- ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
+ ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
- ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.prio = FDB_TC_OFFLOAD;
ft_attr.max_fte = 1;
+ ft_attr.level = 1;
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
- esw_warn(dev, "Failed to create termination table (error %d)\n",
- IS_ERR(tt->termtbl));
- return -EOPNOTSUPP;
+ err = PTR_ERR(tt->termtbl);
+ esw_warn(dev, "Failed to create termination table, err %pe\n", tt->termtbl);
+ return err;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
- esw_warn(dev, "Failed to create termination table rule (error %d)\n",
- IS_ERR(tt->rule));
+ err = PTR_ERR(tt->rule);
+ esw_warn(dev, "Failed to create termination table rule, err %pe\n", tt->rule);
goto add_flow_err;
}
return 0;
add_flow_err:
- err = mlx5_destroy_flow_table(tt->termtbl);
- if (err)
- esw_warn(dev, "Failed to destroy termination table\n");
+ err2 = mlx5_destroy_flow_table(tt->termtbl);
+ if (err2)
+ esw_warn(dev, "Failed to destroy termination table, err %d\n", err2);
- return -EOPNOTSUPP;
+ return err;
}
static struct mlx5_termtbl_handle *
@@ -172,19 +173,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
-static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
-{
- switch (rt->reformat_type) {
- case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
- case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
- case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
- case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
- return true;
- default:
- return false;
- }
-}
-
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
@@ -202,14 +190,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
}
-
- if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
- mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
- src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- dst->pkt_reformat = src->pkt_reformat;
- src->pkt_reformat = NULL;
- }
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
@@ -238,6 +218,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
@@ -279,12 +260,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
+ if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
+ term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
+ } else {
+ term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ term_tbl_act.pkt_reformat = NULL;
+ }
+
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
- esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
- IS_ERR(tt));
+ esw_warn(esw->dev, "Failed to get termination table, err %pe\n", tt);
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
@@ -301,6 +289,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
goto revert_changes;
/* create the FTE */
+ flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = NULL;
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
if (IS_ERR(rule))
goto revert_changes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 8e06731d3cb3..896a6c3dbdb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -36,6 +36,7 @@
#include "fs_core.h"
#include "fs_cmd.h"
+#include "fs_ft_pool.h"
#include "mlx5_core.h"
#include "eswitch.h"
@@ -49,9 +50,11 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft)
{
+ ft->max_fte = size ? roundup_pow_of_two(size) : 1;
+
return 0;
}
@@ -108,9 +111,7 @@ static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
}
static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@@ -181,7 +182,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft)
{
int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
@@ -192,12 +193,18 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_core_dev *dev = ns->dev;
int err;
+ if (size != POOL_NEXT_SIZE)
+ size = roundup_pow_of_two(size);
+ size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size);
+ if (!size)
+ return -ENOSPC;
+
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
MLX5_SET(create_flow_table_in, in, table_type, ft->type);
MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
- MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, log_size);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
@@ -234,9 +241,14 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
}
err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
- if (!err)
+ if (!err) {
ft->id = MLX5_GET(create_flow_table_out, out,
table_id);
+ ft->max_fte = size;
+ } else {
+ mlx5_ft_pool_put_sz(ns->dev, size);
+ }
+
return err;
}
@@ -245,6 +257,7 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
{
u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
struct mlx5_core_dev *dev = ns->dev;
+ int err;
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
@@ -254,7 +267,11 @@ static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
MLX5_SET(destroy_flow_table_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
- return mlx5_cmd_exec_in(dev, destroy_flow_table, in);
+ err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
+ if (!err)
+ mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
+
+ return err;
}
static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
@@ -682,9 +699,7 @@ int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
}
static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@@ -702,14 +717,14 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
else
max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
- if (size > max_encap_size) {
+ if (params->size > max_encap_size) {
mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
- size, max_encap_size);
+ params->size, max_encap_size);
return -EINVAL;
}
- in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) + size,
- GFP_KERNEL);
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
+ params->size, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -718,15 +733,20 @@ static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
reformat = MLX5_ADDR_OF(packet_reformat_context_in,
packet_reformat_context_in,
reformat_data);
- inlen = reformat - (void *)in + size;
+ inlen = reformat - (void *)in + params->size;
MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_data_size, size);
+ reformat_data_size, params->size);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_type, params->type);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_param_0, params->param_0);
MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_type, reformat_type);
- memcpy(reformat, reformat_data, size);
+ reformat_param_1, params->param_1);
+ if (params->data && params->size)
+ memcpy(reformat, params->data, params->size);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index d62de642eca9..5ecd33cdc087 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -38,7 +38,7 @@
struct mlx5_flow_cmds {
int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft);
int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft);
@@ -77,9 +77,7 @@ struct mlx5_flow_cmds {
bool disconnect);
int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index f74d2c834037..2cd7aea5d329 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -38,6 +38,7 @@
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
+#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
#include "accel/ipsec.h"
#include "fpga/ipsec.h"
@@ -752,7 +753,7 @@ static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *f
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
enum fs_flow_table_type table_type,
enum fs_flow_table_op_mod op_mod,
u32 flags)
@@ -775,7 +776,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->op_mod = op_mod;
ft->type = table_type;
ft->vport = vport;
- ft->max_fte = max_fte;
ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -1070,7 +1070,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
struct mlx5_flow_table *next_ft;
struct fs_prio *fs_prio = NULL;
struct mlx5_flow_table *ft;
- int log_table_sz;
int err;
if (!root) {
@@ -1101,7 +1100,6 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
*/
ft = alloc_flow_table(ft_attr->level,
vport,
- ft_attr->max_fte ? roundup_pow_of_two(ft_attr->max_fte) : 0,
root->table_type,
op_mod, ft_attr->flags);
if (IS_ERR(ft)) {
@@ -1110,12 +1108,11 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
}
tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
- log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = unmanaged ? ft_attr->next_ft :
find_next_chained_ft(fs_prio);
ft->def_miss_action = ns->def_miss_action;
ft->ns = ns;
- err = root->cmds->create_flow_table(root, ft, log_table_sz, next_ft);
+ err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft);
if (err)
goto free_ft;
@@ -1170,28 +1167,36 @@ mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
ft_attr.level = level;
ft_attr.prio = prio;
+ ft_attr.max_fte = 1;
+
return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
}
EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
+#define MAX_FLOW_GROUP_SIZE BIT(24)
struct mlx5_flow_table*
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr)
{
int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
- int autogroups_max_fte = ft_attr->max_fte - num_reserved_entries;
int max_num_groups = ft_attr->autogroup.max_num_groups;
struct mlx5_flow_table *ft;
-
- if (max_num_groups > autogroups_max_fte)
- return ERR_PTR(-EINVAL);
- if (num_reserved_entries > ft_attr->max_fte)
- return ERR_PTR(-EINVAL);
+ int autogroups_max_fte;
ft = mlx5_create_flow_table(ns, ft_attr);
if (IS_ERR(ft))
return ft;
+ autogroups_max_fte = ft->max_fte - num_reserved_entries;
+ if (max_num_groups > autogroups_max_fte)
+ goto err_validate;
+ if (num_reserved_entries > ft->max_fte)
+ goto err_validate;
+
+ /* Align the number of groups according to the largest group size */
+ if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
+ max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
+
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
ft->autogroup.max_fte = autogroups_max_fte;
@@ -1199,6 +1204,10 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
return ft;
+
+err_validate:
+ mlx5_destroy_flow_table(ft);
+ return ERR_PTR(-ENOSPC);
}
EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
@@ -2592,6 +2601,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
mlx5_cleanup_fc_stats(dev);
kmem_cache_destroy(steering->ftes_cache);
kmem_cache_destroy(steering->fgs_cache);
+ mlx5_ft_pool_destroy(dev);
kfree(steering);
}
@@ -2770,6 +2780,18 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
if (err)
goto out_err;
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 3);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
if (IS_ERR(maj_prio)) {
err = PTR_ERR(maj_prio);
@@ -2942,9 +2964,13 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
if (err)
return err;
+ err = mlx5_ft_pool_init(dev);
+ if (err)
+ return err;
+
steering = kzalloc(sizeof(*steering), GFP_KERNEL);
if (!steering)
- return -ENOMEM;
+ goto err;
steering->dev = dev;
dev->priv.steering = steering;
@@ -3151,9 +3177,7 @@ void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
EXPORT_SYMBOL(mlx5_modify_header_dealloc);
struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_pkt_reformat *pkt_reformat;
@@ -3169,9 +3193,8 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
return ERR_PTR(-ENOMEM);
pkt_reformat->ns_type = ns_type;
- pkt_reformat->reformat_type = reformat_type;
- err = root->cmds->packet_reformat_alloc(root, reformat_type, size,
- reformat_data, ns_type,
+ pkt_reformat->reformat_type = params->type;
+ err = root->cmds->packet_reformat_alloc(root, params, ns_type,
pkt_reformat);
if (err) {
kfree(pkt_reformat);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index e577a2c424af..7317cdeab661 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -331,6 +331,7 @@ void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
(type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
+ (type == FS_FT_NIC_TX) ? MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) : \
(type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
(type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
new file mode 100644
index 000000000000..c14590acc772
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include "fs_ft_pool.h"
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
+ * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via mlx5_ft_pool_get_avail_sz.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define FT_SIZE (16 * 1024 * 1024)
+static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 128,
+ 1 /* size for termination tables */ };
+struct mlx5_ft_pool {
+ int ft_left[ARRAY_SIZE(FT_POOLS)];
+};
+
+int mlx5_ft_pool_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_ft_pool *ft_pool;
+ int i;
+
+ ft_pool = kzalloc(sizeof(*ft_pool), GFP_KERNEL);
+ if (!ft_pool)
+ return -ENOMEM;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
+ ft_pool->ft_left[i] = FT_SIZE / FT_POOLS[i];
+
+ dev->priv.ft_pool = ft_pool;
+ return 0;
+}
+
+void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev)
+{
+ kfree(dev->priv.ft_pool);
+}
+
+int
+mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
+ int desired_size)
+{
+ u32 max_ft_size = 1 << MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_ft_size, table_type);
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (dev->priv.ft_pool->ft_left[i] && FT_POOLS[i] >= desired_size &&
+ FT_POOLS[i] <= max_ft_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --dev->priv.ft_pool->ft_left[found_i];
+ return FT_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+void
+mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz)
+{
+ int i;
+
+ if (!sz)
+ return;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (sz == FT_POOLS[i]) {
+ ++dev->priv.ft_pool->ft_left[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
new file mode 100644
index 000000000000..25f4274b372b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_ft_pool.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_FS_FT_POOL_H__
+#define __MLX5_FS_FT_POOL_H__
+
+#include <linux/mlx5/driver.h>
+#include "fs_core.h"
+
+#define POOL_NEXT_SIZE 0
+
+int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
+void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
+
+int
+mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
+ int desired_size);
+void
+mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz);
+
+#endif /* __MLX5_FS_FT_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 02558ac2ace6..016d26f809a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -148,6 +148,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
+ if (MLX5_CAP_GEN(dev, hca_cap_2)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index d5d57630015f..106b50e42b46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -349,6 +349,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work)
reset_abort_work);
struct mlx5_core_dev *dev = fw_reset->dev;
+ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ return;
+
mlx5_sync_reset_clear_reset_requested(dev, true);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 97d96fc38a65..0e487ec57d5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -150,6 +150,7 @@ enum mlx5_ptys_rate {
MLX5_PTYS_RATE_FDR = 1 << 4,
MLX5_PTYS_RATE_EDR = 1 << 5,
MLX5_PTYS_RATE_HDR = 1 << 6,
+ MLX5_PTYS_RATE_NDR = 1 << 7,
};
static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
@@ -162,6 +163,7 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
case MLX5_PTYS_RATE_FDR: return 14000;
case MLX5_PTYS_RATE_EDR: return 25000;
case MLX5_PTYS_RATE_HDR: return 50000;
+ case MLX5_PTYS_RATE_NDR: return 100000;
default: return -1;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index b8748390335f..5c043c5cc403 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -93,6 +93,64 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+static int mlx5_lag_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr);
+static void mlx5_do_bond_work(struct work_struct *work);
+
+static void mlx5_ldev_free(struct kref *ref)
+{
+ struct mlx5_lag *ldev = container_of(ref, struct mlx5_lag, ref);
+
+ if (ldev->nb.notifier_call)
+ unregister_netdevice_notifier_net(&init_net, &ldev->nb);
+ mlx5_lag_mp_cleanup(ldev);
+ cancel_delayed_work_sync(&ldev->bond_work);
+ destroy_workqueue(ldev->wq);
+ kfree(ldev);
+}
+
+static void mlx5_ldev_put(struct mlx5_lag *ldev)
+{
+ kref_put(&ldev->ref, mlx5_ldev_free);
+}
+
+static void mlx5_ldev_get(struct mlx5_lag *ldev)
+{
+ kref_get(&ldev->ref);
+}
+
+static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
+ int err;
+
+ ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+ if (!ldev)
+ return NULL;
+
+ ldev->wq = create_singlethread_workqueue("mlx5_lag");
+ if (!ldev->wq) {
+ kfree(ldev);
+ return NULL;
+ }
+
+ kref_init(&ldev->ref);
+ INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+
+ ldev->nb.notifier_call = mlx5_lag_netdev_event;
+ if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
+ ldev->nb.notifier_call = NULL;
+ mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
+ }
+
+ err = mlx5_lag_mp_init(ldev);
+ if (err)
+ mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
+ err);
+
+ return ldev;
+}
+
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev)
{
@@ -118,17 +176,24 @@ static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev)
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
u8 *port1, u8 *port2)
{
+ bool p1en;
+ bool p2en;
+
+ p1en = tracker->netdev_state[MLX5_LAG_P1].tx_enabled &&
+ tracker->netdev_state[MLX5_LAG_P1].link_up;
+
+ p2en = tracker->netdev_state[MLX5_LAG_P2].tx_enabled &&
+ tracker->netdev_state[MLX5_LAG_P2].link_up;
+
*port1 = 1;
*port2 = 2;
- if (!tracker->netdev_state[MLX5_LAG_P1].tx_enabled ||
- !tracker->netdev_state[MLX5_LAG_P1].link_up) {
- *port1 = 2;
+ if ((!p1en && !p2en) || (p1en && p2en))
return;
- }
- if (!tracker->netdev_state[MLX5_LAG_P2].tx_enabled ||
- !tracker->netdev_state[MLX5_LAG_P2].link_up)
+ if (p1en)
*port2 = 1;
+ else
+ *port1 = 2;
}
void mlx5_modify_lag(struct mlx5_lag *ldev,
@@ -251,6 +316,10 @@ static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
if (!ldev->pf[i].dev)
continue;
+ if (ldev->pf[i].dev->priv.flags &
+ MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
+ continue;
+
ldev->pf[i].dev->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(ldev->pf[i].dev);
}
@@ -269,6 +338,31 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
}
}
+static void mlx5_disable_lag(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ bool roce_lag;
+ int err;
+
+ roce_lag = __mlx5_lag_is_roce(ldev);
+
+ if (roce_lag) {
+ if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) {
+ dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+ }
+ mlx5_nic_vport_disable_roce(dev1);
+ }
+
+ err = mlx5_deactivate_lag(ldev);
+ if (err)
+ return;
+
+ if (roce_lag)
+ mlx5_lag_add_devices(ldev);
+}
+
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
@@ -280,9 +374,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!mlx5_lag_is_ready(ldev))
return;
- spin_lock(&lag_lock);
tracker = ldev->tracker;
- spin_unlock(&lag_lock);
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -291,8 +383,9 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
!mlx5_sriov_is_enabled(dev1);
#ifdef CONFIG_MLX5_ESWITCH
- roce_lag &= dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
- dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
+ roce_lag = roce_lag &&
+ dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
+ dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE;
#endif
if (roce_lag)
@@ -316,20 +409,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (do_bond && __mlx5_lag_is_active(ldev)) {
mlx5_modify_lag(ldev, &tracker);
} else if (!do_bond && __mlx5_lag_is_active(ldev)) {
- roce_lag = __mlx5_lag_is_roce(ldev);
-
- if (roce_lag) {
- dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
- mlx5_rescan_drivers_locked(dev0);
- mlx5_nic_vport_disable_roce(dev1);
- }
-
- err = mlx5_deactivate_lag(ldev);
- if (err)
- return;
-
- if (roce_lag)
- mlx5_lag_add_devices(ldev);
+ mlx5_disable_lag(ldev);
}
}
@@ -481,9 +561,7 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
break;
}
- spin_lock(&lag_lock);
ldev->tracker = tracker;
- spin_unlock(&lag_lock);
if (changed)
mlx5_queue_bond_work(ldev, 0);
@@ -491,55 +569,52 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
-static struct mlx5_lag *mlx5_lag_dev_alloc(void)
+static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
- struct mlx5_lag *ldev;
-
- ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
- if (!ldev)
- return NULL;
-
- ldev->wq = create_singlethread_workqueue("mlx5_lag");
- if (!ldev->wq) {
- kfree(ldev);
- return NULL;
- }
+ unsigned int fn = PCI_FUNC(dev->pdev->devfn);
- INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+ if (fn >= MLX5_MAX_PORTS)
+ return;
- return ldev;
+ spin_lock(&lag_lock);
+ ldev->pf[fn].netdev = netdev;
+ ldev->tracker.netdev_state[fn].link_up = 0;
+ ldev->tracker.netdev_state[fn].tx_enabled = 0;
+ spin_unlock(&lag_lock);
}
-static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
+static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
+ struct net_device *netdev)
{
- destroy_workqueue(ldev->wq);
- kfree(ldev);
+ int i;
+
+ spin_lock(&lag_lock);
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (ldev->pf[i].netdev == netdev) {
+ ldev->pf[i].netdev = NULL;
+ break;
+ }
+ }
+ spin_unlock(&lag_lock);
}
-static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev)
{
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
if (fn >= MLX5_MAX_PORTS)
- return -EPERM;
-
- spin_lock(&lag_lock);
- ldev->pf[fn].dev = dev;
- ldev->pf[fn].netdev = netdev;
- ldev->tracker.netdev_state[fn].link_up = 0;
- ldev->tracker.netdev_state[fn].tx_enabled = 0;
+ return;
+ ldev->pf[fn].dev = dev;
dev->priv.lag = ldev;
-
- spin_unlock(&lag_lock);
-
- return fn;
}
-static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev)
+/* Must be called with intf_mutex held */
+static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev)
{
int i;
@@ -550,19 +625,15 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
if (i == MLX5_MAX_PORTS)
return;
- spin_lock(&lag_lock);
- memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
-
+ ldev->pf[i].dev = NULL;
dev->priv.lag = NULL;
- spin_unlock(&lag_lock);
}
/* Must be called with intf_mutex held */
-void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
+static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- int i, err;
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
@@ -574,67 +645,77 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
ldev = tmp_dev->priv.lag;
if (!ldev) {
- ldev = mlx5_lag_dev_alloc();
+ ldev = mlx5_lag_dev_alloc(dev);
if (!ldev) {
mlx5_core_err(dev, "Failed to alloc lag dev\n");
return;
}
+ } else {
+ mlx5_ldev_get(ldev);
}
- if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
- return;
+ mlx5_ldev_add_mdev(ldev, dev);
- for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (!ldev->pf[i].dev)
- break;
+ return;
+}
- if (i >= MLX5_MAX_PORTS)
- ldev->flags |= MLX5_LAG_FLAG_READY;
+void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev)
+{
+ struct mlx5_lag *ldev;
- if (!ldev->nb.notifier_call) {
- ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
- ldev->nb.notifier_call = NULL;
- mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
- }
- }
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
- err = mlx5_lag_mp_init(ldev);
- if (err)
- mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
- err);
+ mlx5_dev_list_lock();
+ mlx5_ldev_remove_mdev(ldev, dev);
+ mlx5_dev_list_unlock();
+ mlx5_ldev_put(ldev);
+}
+
+void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
+{
+ mlx5_dev_list_lock();
+ __mlx5_lag_dev_add_mdev(dev);
+ mlx5_dev_list_unlock();
}
/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev)
+void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
struct mlx5_lag *ldev;
- int i;
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!ldev)
return;
if (__mlx5_lag_is_active(ldev))
- mlx5_deactivate_lag(ldev);
-
- mlx5_lag_dev_remove_pf(ldev, dev);
+ mlx5_disable_lag(ldev);
+ mlx5_ldev_remove_netdev(ldev, netdev);
ldev->flags &= ~MLX5_LAG_FLAG_READY;
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_add_netdev(struct mlx5_core_dev *dev,
+ struct net_device *netdev)
+{
+ struct mlx5_lag *ldev;
+ int i;
+
+ ldev = mlx5_lag_dev(dev);
+ if (!ldev)
+ return;
+
+ mlx5_ldev_add_netdev(ldev, dev, netdev);
for (i = 0; i < MLX5_MAX_PORTS; i++)
- if (ldev->pf[i].dev)
+ if (!ldev->pf[i].dev)
break;
- if (i == MLX5_MAX_PORTS) {
- if (ldev->nb.notifier_call) {
- unregister_netdevice_notifier_net(&init_net, &ldev->nb);
- ldev->nb.notifier_call = NULL;
- }
- mlx5_lag_mp_cleanup(ldev);
- cancel_delayed_work_sync(&ldev->bond_work);
- mlx5_lag_dev_free(ldev);
- }
+ if (i >= MLX5_MAX_PORTS)
+ ldev->flags |= MLX5_LAG_FLAG_READY;
}
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
@@ -643,7 +724,7 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev)
bool res;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_roce(ldev);
spin_unlock(&lag_lock);
@@ -657,7 +738,7 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
bool res;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_active(ldev);
spin_unlock(&lag_lock);
@@ -671,7 +752,7 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev)
bool res;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_sriov(ldev);
spin_unlock(&lag_lock);
@@ -684,7 +765,7 @@ void mlx5_lag_update(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
mlx5_dev_list_lock();
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
@@ -700,7 +781,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -729,7 +810,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
u8 port = 0;
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (!(ldev && __mlx5_lag_is_roce(ldev)))
goto unlock;
@@ -765,7 +846,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
memset(values, 0, sizeof(*values) * num_counters);
spin_lock(&lag_lock);
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
if (ldev && __mlx5_lag_is_active(ldev)) {
num_ports = MLX5_MAX_PORTS;
mdev[MLX5_LAG_P1] = ldev->pf[MLX5_LAG_P1].dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 8d8cf2d0bc6d..191392c37558 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -40,6 +40,7 @@ struct lag_tracker {
struct mlx5_lag {
u8 flags;
u8 v2p_map[MLX5_MAX_PORTS];
+ struct kref ref;
struct lag_func pf[MLX5_MAX_PORTS];
struct lag_tracker tracker;
struct workqueue_struct *wq;
@@ -49,7 +50,7 @@ struct mlx5_lag {
};
static inline struct mlx5_lag *
-mlx5_lag_dev_get(struct mlx5_core_dev *dev)
+mlx5_lag_dev(struct mlx5_core_dev *dev)
{
return dev->priv.lag;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 2c41a6920264..c4bf8b679541 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -28,7 +28,7 @@ bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
struct mlx5_lag *ldev;
bool res;
- ldev = mlx5_lag_dev_get(dev);
+ ldev = mlx5_lag_dev(dev);
res = ldev && __mlx5_lag_is_multipath(ldev);
return res;
@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
struct lag_mp *mp = &ldev->lag_mp;
int err;
+ /* always clear mfi, as it might become stale when a route delete event
+ * has been missed
+ */
+ mp->mfi = NULL;
+
if (mp->fib_nb.notifier_call)
return 0;
@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
+ mp->mfi = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index f607a3858ef5..624cedebb510 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2018 Mellanox Technologies */
+/* Copyright (c) 2018-2021, Mellanox Technologies inc. All rights reserved. */
#ifndef __LIB_MLX5_EQ_H__
#define __LIB_MLX5_EQ_H__
@@ -32,6 +32,7 @@ struct mlx5_eq {
unsigned int irqn;
u8 eqn;
struct mlx5_rsc_debug *dbg;
+ struct mlx5_irq *irq;
};
struct mlx5_eq_async {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index 00ef10a1a9f8..97e5845b4cfd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -6,6 +6,7 @@
#include <linux/mlx5/fs.h>
#include "lib/fs_chains.h"
+#include "fs_ft_pool.h"
#include "en/mapping.h"
#include "fs_core.h"
#include "en_tc.h"
@@ -13,25 +14,10 @@
#define chains_lock(chains) ((chains)->lock)
#define chains_ht(chains) ((chains)->chains_ht)
#define prios_ht(chains) ((chains)->prios_ht)
-#define ft_pool_left(chains) ((chains)->ft_left)
#define tc_default_ft(chains) ((chains)->tc_default_ft)
#define tc_end_ft(chains) ((chains)->tc_end_ft)
#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
- * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via get_next_avail_sz_from_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small and match firmware
- * pools.
- */
-#define FT_SIZE (16 * 1024 * 1024)
-static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
- 1 * 1024 * 1024,
- 64 * 1024,
- 128 };
#define FT_TBL_SZ (64 * 1024)
struct mlx5_fs_chains {
@@ -49,8 +35,6 @@ struct mlx5_fs_chains {
enum mlx5_flow_namespace_type ns;
u32 group_num;
u32 flags;
-
- int ft_left[ARRAY_SIZE(FT_POOLS)];
};
struct fs_chain {
@@ -107,7 +91,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
}
-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
{
return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
}
@@ -160,54 +144,6 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
tc_end_ft(chains) = ft;
}
-#define POOL_NEXT_SIZE 0
-static int
-mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
- int desired_size)
-{
- int i, found_i = -1;
-
- for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
- found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
- break;
- }
- }
-
- if (found_i != -1) {
- --ft_pool_left(chains)[found_i];
- return FT_POOLS[found_i];
- }
-
- return 0;
-}
-
-static void
-mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
-{
- int i;
-
- for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
- if (sz == FT_POOLS[i]) {
- ++ft_pool_left(chains)[i];
- return;
- }
- }
-
- WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
-}
-
-static void
-mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
-{
- int i;
-
- for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
- ft_pool_left(chains)[i] =
- FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
-}
-
static struct mlx5_flow_table *
mlx5_chains_create_table(struct mlx5_fs_chains *chains,
u32 chain, u32 prio, u32 level)
@@ -221,11 +157,7 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
- mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
- mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
- if (!sz)
- return ERR_PTR(-ENOSPC);
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
ft_attr.max_fte = sz;
/* We use tc_default_ft(chains) as the table's next_ft till
@@ -266,21 +198,12 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
if (IS_ERR(ft)) {
mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
(int)PTR_ERR(ft), chain, prio, level, sz);
- mlx5_chains_put_sz_to_pool(chains, sz);
return ft;
}
return ft;
}
-static void
-mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
- struct mlx5_flow_table *ft)
-{
- mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
- mlx5_destroy_flow_table(ft);
-}
-
static int
create_chain_restore(struct fs_chain *chain)
{
@@ -336,9 +259,10 @@ create_chain_restore(struct fs_chain *chain)
MLX5_SET(set_action_in, modact, field,
mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
+ 0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
MLX5_SET(set_action_in, modact, data, chain->id);
mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
1, modact);
@@ -636,7 +560,7 @@ err_insert:
err_miss_rule:
mlx5_destroy_flow_group(miss_group);
err_group:
- mlx5_chains_destroy_table(chains, ft);
+ mlx5_destroy_flow_table(ft);
err_create:
err_alloc:
kvfree(prio_s);
@@ -659,7 +583,7 @@ mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
prio_params);
mlx5_del_flow_rules(prio->miss_rule);
mlx5_destroy_flow_group(prio->miss_group);
- mlx5_chains_destroy_table(chains, prio->ft);
+ mlx5_destroy_flow_table(prio->ft);
mlx5_chains_put_chain(chain);
kvfree(prio);
}
@@ -784,7 +708,7 @@ void
mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft)
{
- mlx5_chains_destroy_table(chains, ft);
+ mlx5_destroy_flow_table(ft);
}
static struct mlx5_fs_chains *
@@ -816,8 +740,6 @@ mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
mlx5_chains_get_chain_range(chains_priv),
mlx5_chains_get_prio_range(chains_priv));
- mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
-
err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
if (err)
goto init_chains_ht_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
index e96f345e7dae..d50bdb226cef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -28,6 +28,7 @@ struct mlx5_chains_attr {
bool
mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
bool
mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
u32
@@ -70,6 +71,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
#else /* CONFIG_MLX5_CLS_ACT */
+static inline bool
+mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{ return false; }
+
static inline struct mlx5_flow_table *
mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
u32 level) { return ERR_PTR(-EOPNOTSUPP); }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index fd8449ff9e17..839a01da110f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/mpfs.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
@@ -175,6 +176,7 @@ out:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_add_mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
{
@@ -206,3 +208,4 @@ unlock:
mutex_unlock(&mpfs->lock);
return err;
}
+EXPORT_SYMBOL(mlx5_mpfs_del_mac);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
index 4a7b2c3203a7..4a293542a7aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h
@@ -84,12 +84,9 @@ struct l2addr_node {
#ifdef CONFIG_MLX5_MPFS
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
-int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
-static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
-static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
#endif
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h
new file mode 100644
index 000000000000..84e5683861be
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sf.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies Ltd */
+
+#ifndef __LIB_MLX5_SF_H__
+#define __LIB_MLX5_SF_H__
+
+#include <linux/mlx5/driver.h>
+
+static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf_base_id);
+}
+
+#ifdef CONFIG_MLX5_SF
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN(dev, sf);
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ if (!mlx5_sf_supported(dev))
+ return 0;
+ if (MLX5_CAP_GEN(dev, max_num_sf))
+ return MLX5_CAP_GEN(dev, max_num_sf);
+ else
+ return 1 << MLX5_CAP_GEN(dev, log_max_sf);
+}
+
+#else
+
+static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
+{
+ return false;
+}
+
+static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c114365eb126..390b1d3a6fde 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -76,6 +76,7 @@
#include "sf/vhca_event.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
+#include "mlx5_irq.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
@@ -503,7 +504,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
- struct mlx5_profile *prof = dev->profile;
+ struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
int err;
@@ -524,11 +525,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
- if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
+ if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
- profile[prof_sel].log_max_qp,
+ prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
- profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
+ prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
}
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
@@ -1185,6 +1186,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
}
mlx5_sf_dev_table_create(dev);
+ mlx5_lag_add_mdev(dev);
return 0;
@@ -1219,6 +1221,7 @@ err_irq_table:
static void mlx5_unload(struct mlx5_core_dev *dev)
{
+ mlx5_lag_remove_mdev(dev);
mlx5_sf_dev_table_destroy(dev);
mlx5_sriov_detach(dev);
mlx5_ec_cleanup(dev);
@@ -1381,8 +1384,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
struct mlx5_priv *priv = &dev->priv;
int err;
- dev->profile = &profile[profile_idx];
-
+ memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->intf_state_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a22b706eebd3..343807ac2036 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -164,27 +164,10 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
-void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev);
-
-int mlx5_irq_table_init(struct mlx5_core_dev *dev);
-void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
-int mlx5_irq_table_create(struct mlx5_core_dev *dev);
-void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
-int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb);
-int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb);
-
-int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
- int msix_vec_count);
-int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
-
-struct cpumask *
-mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx);
-struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *table);
-int mlx5_irq_get_num_comp(struct mlx5_irq_table *table);
-struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
+void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_add_mdev(struct mlx5_core_dev *dev);
+void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev);
int mlx5_events_init(struct mlx5_core_dev *dev);
void mlx5_events_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
new file mode 100644
index 000000000000..abd024173c42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_IRQ_H__
+#define __MLX5_IRQ_H__
+
+#include <linux/mlx5/driver.h>
+
+#define MLX5_COMP_EQS_PER_SF 8
+
+#define MLX5_IRQ_EQ_CTRL (0)
+
+struct mlx5_irq;
+
+int mlx5_irq_table_init(struct mlx5_core_dev *dev);
+void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
+int mlx5_irq_table_create(struct mlx5_core_dev *dev);
+void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
+int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
+struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
+
+int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
+ int msix_vec_count);
+int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
+
+struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
+ struct cpumask *affinity);
+void mlx5_irq_release(struct mlx5_irq *irq);
+int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
+int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
+struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
+int mlx5_irq_get_index(struct mlx5_irq *irq);
+
+#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 1f907df5b3a2..27de8da8edf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -6,60 +6,52 @@
#include <linux/module.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
+#include "mlx5_irq.h"
+#include "lib/sf.h"
#ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h>
#endif
#define MLX5_MAX_IRQ_NAME (32)
+/* max irq_index is 255. three chars */
+#define MLX5_MAX_IRQ_IDX_CHARS (3)
+
+#define MLX5_SFS_PER_CTRL_IRQ 64
+#define MLX5_IRQ_CTRL_SF_MAX 8
+/* min num of vectores for SFs to be enabled */
+#define MLX5_IRQ_VEC_COMP_BASE_SF 2
+
+#define MLX5_EQ_SHARE_IRQ_MAX_COMP (8)
+#define MLX5_EQ_SHARE_IRQ_MAX_CTRL (UINT_MAX)
+#define MLX5_EQ_SHARE_IRQ_MIN_COMP (1)
+#define MLX5_EQ_SHARE_IRQ_MIN_CTRL (4)
+#define MLX5_EQ_REFS_PER_IRQ (2)
struct mlx5_irq {
+ u32 index;
struct atomic_notifier_head nh;
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
+ struct kref kref;
+ int irqn;
+ struct mlx5_irq_pool *pool;
};
-struct mlx5_irq_table {
- struct mlx5_irq *irq;
- int nvec;
-#ifdef CONFIG_RFS_ACCEL
- struct cpu_rmap *rmap;
-#endif
+struct mlx5_irq_pool {
+ char name[MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS];
+ struct xa_limit xa_num_irqs;
+ struct mutex lock; /* sync IRQs creations */
+ struct xarray irqs;
+ u32 max_threshold;
+ u32 min_threshold;
+ struct mlx5_core_dev *dev;
};
-int mlx5_irq_table_init(struct mlx5_core_dev *dev)
-{
- struct mlx5_irq_table *irq_table;
-
- if (mlx5_core_is_sf(dev))
- return 0;
-
- irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
- if (!irq_table)
- return -ENOMEM;
-
- dev->priv.irq_table = irq_table;
- return 0;
-}
-
-void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
-{
- if (mlx5_core_is_sf(dev))
- return;
-
- kvfree(dev->priv.irq_table);
-}
-
-int mlx5_irq_get_num_comp(struct mlx5_irq_table *table)
-{
- return table->nvec - MLX5_IRQ_VEC_COMP_BASE;
-}
-
-static struct mlx5_irq *mlx5_irq_get(struct mlx5_core_dev *dev, int vecidx)
-{
- struct mlx5_irq_table *irq_table = dev->priv.irq_table;
-
- return &irq_table->irq[vecidx];
-}
+struct mlx5_irq_table {
+ struct mlx5_irq_pool *pf_pool;
+ struct mlx5_irq_pool *sf_ctrl_pool;
+ struct mlx5_irq_pool *sf_comp_pool;
+};
/**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
@@ -95,9 +87,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int msix_vec_count)
{
- int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
- void *hca_cap, *cap;
int ret;
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
@@ -116,11 +109,20 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
- hca_cap = kzalloc(sz, GFP_KERNEL);
- if (!hca_cap)
- return -ENOMEM;
+ query_cap = kzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
+ if (ret)
+ goto out;
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
@@ -130,38 +132,52 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+out:
kfree(hca_cap);
+ kfree(query_cap);
return ret;
}
-int mlx5_irq_attach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb)
+static void irq_release(struct kref *kref)
{
- struct mlx5_irq *irq;
+ struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref);
+ struct mlx5_irq_pool *pool = irq->pool;
- irq = &irq_table->irq[vecidx];
- return atomic_notifier_chain_register(&irq->nh, nb);
+ xa_erase(&pool->irqs, irq->index);
+ /* free_irq requires that affinity and rmap will be cleared
+ * before calling it. This is why there is asymmetry with set_rmap
+ * which should be called after alloc_irq but before request_irq.
+ */
+ irq_set_affinity_hint(irq->irqn, NULL);
+ free_cpumask_var(irq->mask);
+ free_irq(irq->irqn, &irq->nh);
+ kfree(irq);
}
-int mlx5_irq_detach_nb(struct mlx5_irq_table *irq_table, int vecidx,
- struct notifier_block *nb)
+static void irq_put(struct mlx5_irq *irq)
{
- struct mlx5_irq *irq;
+ struct mlx5_irq_pool *pool = irq->pool;
- irq = &irq_table->irq[vecidx];
- return atomic_notifier_chain_unregister(&irq->nh, nb);
+ mutex_lock(&pool->lock);
+ kref_put(&irq->kref, irq_release);
+ mutex_unlock(&pool->lock);
}
-static irqreturn_t mlx5_irq_int_handler(int irq, void *nh)
+static irqreturn_t irq_int_handler(int irq, void *nh)
{
atomic_notifier_call_chain(nh, 0, NULL);
return IRQ_HANDLED;
}
+static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
+{
+ snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
+}
+
static void irq_set_name(char *name, int vecidx)
{
if (vecidx == 0) {
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async");
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return;
}
@@ -169,251 +185,431 @@ static void irq_set_name(char *name, int vecidx)
vecidx - MLX5_IRQ_VEC_COMP_BASE);
}
-static int request_irqs(struct mlx5_core_dev *dev, int nvec)
+static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
{
+ struct mlx5_core_dev *dev = pool->dev;
char name[MLX5_MAX_IRQ_NAME];
+ struct mlx5_irq *irq;
int err;
- int i;
-
- for (i = 0; i < nvec; i++) {
- struct mlx5_irq *irq = mlx5_irq_get(dev, i);
- int irqn = pci_irq_vector(dev->pdev, i);
+ irq = kzalloc(sizeof(*irq), GFP_KERNEL);
+ if (!irq)
+ return ERR_PTR(-ENOMEM);
+ irq->irqn = pci_irq_vector(dev->pdev, i);
+ if (!pool->name[0])
irq_set_name(name, i);
- ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
- snprintf(irq->name, MLX5_MAX_IRQ_NAME,
- "%s@pci:%s", name, pci_name(dev->pdev));
- err = request_irq(irqn, mlx5_irq_int_handler, 0, irq->name,
- &irq->nh);
- if (err) {
- mlx5_core_err(dev, "Failed to request irq\n");
- goto err_request_irq;
- }
+ else
+ irq_sf_set_name(pool, name, i);
+ ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
+ snprintf(irq->name, MLX5_MAX_IRQ_NAME,
+ "%s@pci:%s", name, pci_name(dev->pdev));
+ err = request_irq(irq->irqn, irq_int_handler, 0, irq->name,
+ &irq->nh);
+ if (err) {
+ mlx5_core_err(dev, "Failed to request irq. err = %d\n", err);
+ goto err_req_irq;
}
- return 0;
+ if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
+ mlx5_core_warn(dev, "zalloc_cpumask_var failed\n");
+ err = -ENOMEM;
+ goto err_cpumask;
+ }
+ kref_init(&irq->kref);
+ irq->index = i;
+ err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL));
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
+ irq->index, err);
+ goto err_xa;
+ }
+ irq->pool = pool;
+ return irq;
+err_xa:
+ free_cpumask_var(irq->mask);
+err_cpumask:
+ free_irq(irq->irqn, &irq->nh);
+err_req_irq:
+ kfree(irq);
+ return ERR_PTR(err);
+}
-err_request_irq:
- while (i--) {
- struct mlx5_irq *irq = mlx5_irq_get(dev, i);
- int irqn = pci_irq_vector(dev->pdev, i);
+int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
+{
+ int err;
- free_irq(irqn, &irq->nh);
- }
- return err;
+ err = kref_get_unless_zero(&irq->kref);
+ if (WARN_ON_ONCE(!err))
+ /* Something very bad happens here, we are enabling EQ
+ * on non-existing IRQ.
+ */
+ return -ENOENT;
+ err = atomic_notifier_chain_register(&irq->nh, nb);
+ if (err)
+ irq_put(irq);
+ return err;
}
-static void irq_clear_rmap(struct mlx5_core_dev *dev)
+int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb)
{
-#ifdef CONFIG_RFS_ACCEL
- struct mlx5_irq_table *irq_table = dev->priv.irq_table;
+ irq_put(irq);
+ return atomic_notifier_chain_unregister(&irq->nh, nb);
+}
- free_irq_cpu_rmap(irq_table->rmap);
-#endif
+struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
+{
+ return irq->mask;
}
-static int irq_set_rmap(struct mlx5_core_dev *mdev)
+int mlx5_irq_get_index(struct mlx5_irq *irq)
{
- int err = 0;
-#ifdef CONFIG_RFS_ACCEL
- struct mlx5_irq_table *irq_table = mdev->priv.irq_table;
- int num_affinity_vec;
- int vecidx;
+ return irq->index;
+}
- num_affinity_vec = mlx5_irq_get_num_comp(irq_table);
- irq_table->rmap = alloc_irq_cpu_rmap(num_affinity_vec);
- if (!irq_table->rmap) {
- err = -ENOMEM;
- mlx5_core_err(mdev, "Failed to allocate cpu_rmap. err %d", err);
- goto err_out;
+/* irq_pool API */
+
+/* creating an irq from irq_pool */
+static struct mlx5_irq *irq_pool_create_irq(struct mlx5_irq_pool *pool,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq *irq;
+ u32 irq_index;
+ int err;
+
+ err = xa_alloc(&pool->irqs, &irq_index, NULL, pool->xa_num_irqs,
+ GFP_KERNEL);
+ if (err)
+ return ERR_PTR(err);
+ irq = irq_request(pool, irq_index);
+ if (IS_ERR(irq))
+ return irq;
+ cpumask_copy(irq->mask, affinity);
+ irq_set_affinity_hint(irq->irqn, irq->mask);
+ return irq;
+}
+
+/* looking for the irq with the smallest refcount and the same affinity */
+static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool,
+ struct cpumask *affinity)
+{
+ int start = pool->xa_num_irqs.min;
+ int end = pool->xa_num_irqs.max;
+ struct mlx5_irq *irq = NULL;
+ struct mlx5_irq *iter;
+ unsigned long index;
+
+ lockdep_assert_held(&pool->lock);
+ xa_for_each_range(&pool->irqs, index, iter, start, end) {
+ if (!cpumask_equal(iter->mask, affinity))
+ continue;
+ if (kref_read(&iter->kref) < pool->min_threshold)
+ return iter;
+ if (!irq || kref_read(&iter->kref) <
+ kref_read(&irq->kref))
+ irq = iter;
}
+ return irq;
+}
- vecidx = MLX5_IRQ_VEC_COMP_BASE;
- for (; vecidx < irq_table->nvec; vecidx++) {
- err = irq_cpu_rmap_add(irq_table->rmap,
- pci_irq_vector(mdev->pdev, vecidx));
- if (err) {
- mlx5_core_err(mdev, "irq_cpu_rmap_add failed. err %d",
- err);
- goto err_irq_cpu_rmap_add;
+/* requesting an irq from a given pool according to given affinity */
+static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq *least_loaded_irq, *new_irq;
+
+ mutex_lock(&pool->lock);
+ least_loaded_irq = irq_pool_find_least_loaded(pool, affinity);
+ if (least_loaded_irq &&
+ kref_read(&least_loaded_irq->kref) < pool->min_threshold)
+ goto out;
+ new_irq = irq_pool_create_irq(pool, affinity);
+ if (IS_ERR(new_irq)) {
+ if (!least_loaded_irq) {
+ mlx5_core_err(pool->dev, "Didn't find IRQ for cpu = %u\n",
+ cpumask_first(affinity));
+ mutex_unlock(&pool->lock);
+ return new_irq;
}
+ /* We failed to create a new IRQ for the requested affinity,
+ * sharing existing IRQ.
+ */
+ goto out;
}
- return 0;
+ least_loaded_irq = new_irq;
+ goto unlock;
+out:
+ kref_get(&least_loaded_irq->kref);
+ if (kref_read(&least_loaded_irq->kref) > pool->max_threshold)
+ mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n",
+ least_loaded_irq->irqn, pool->name,
+ kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ);
+unlock:
+ mutex_unlock(&pool->lock);
+ return least_loaded_irq;
+}
-err_irq_cpu_rmap_add:
- irq_clear_rmap(mdev);
-err_out:
-#endif
- return err;
+/* requesting an irq from a given pool according to given index */
+static struct mlx5_irq *
+irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq *irq;
+
+ mutex_lock(&pool->lock);
+ irq = xa_load(&pool->irqs, vecidx);
+ if (irq) {
+ kref_get(&irq->kref);
+ goto unlock;
+ }
+ irq = irq_request(pool, vecidx);
+ if (IS_ERR(irq) || !affinity)
+ goto unlock;
+ cpumask_copy(irq->mask, affinity);
+ irq_set_affinity_hint(irq->irqn, irq->mask);
+unlock:
+ mutex_unlock(&pool->lock);
+ return irq;
}
-/* Completion IRQ vectors */
+static struct mlx5_irq_pool *find_sf_irq_pool(struct mlx5_irq_table *irq_table,
+ int i, struct cpumask *affinity)
+{
+ if (cpumask_empty(affinity) && i == MLX5_IRQ_EQ_CTRL)
+ return irq_table->sf_ctrl_pool;
+ return irq_table->sf_comp_pool;
+}
-static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+/**
+ * mlx5_irq_release - release an IRQ back to the system.
+ * @irq: irq to be released.
+ */
+void mlx5_irq_release(struct mlx5_irq *irq)
{
- int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
+ synchronize_irq(irq->irqn);
+ irq_put(irq);
+}
+
+/**
+ * mlx5_irq_request - request an IRQ for mlx5 device.
+ * @dev: mlx5 device that requesting the IRQ.
+ * @vecidx: vector index of the IRQ. This argument is ignore if affinity is
+ * provided.
+ * @affinity: cpumask requested for this IRQ.
+ *
+ * This function returns a pointer to IRQ, or ERR_PTR in case of error.
+ */
+struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
+ struct cpumask *affinity)
+{
+ struct mlx5_irq_table *irq_table = mlx5_irq_table_get(dev);
+ struct mlx5_irq_pool *pool;
struct mlx5_irq *irq;
- int irqn;
- irq = mlx5_irq_get(mdev, vecidx);
- irqn = pci_irq_vector(mdev->pdev, vecidx);
- if (!zalloc_cpumask_var(&irq->mask, GFP_KERNEL)) {
- mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
- return -ENOMEM;
+ if (mlx5_core_is_sf(dev)) {
+ pool = find_sf_irq_pool(irq_table, vecidx, affinity);
+ if (!pool)
+ /* we don't have IRQs for SFs, using the PF IRQs */
+ goto pf_irq;
+ if (cpumask_empty(affinity) && !strcmp(pool->name, "mlx5_sf_comp"))
+ /* In case an SF user request IRQ with vecidx */
+ irq = irq_pool_request_vector(pool, vecidx, NULL);
+ else
+ irq = irq_pool_request_affinity(pool, affinity);
+ goto out;
}
+pf_irq:
+ pool = irq_table->pf_pool;
+ irq = irq_pool_request_vector(pool, vecidx, affinity);
+out:
+ if (IS_ERR(irq))
+ return irq;
+ mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n",
+ irq->irqn, cpumask_pr_args(affinity),
+ kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ);
+ return irq;
+}
- cpumask_set_cpu(cpumask_local_spread(i, mdev->priv.numa_node),
- irq->mask);
- if (IS_ENABLED(CONFIG_SMP) &&
- irq_set_affinity_hint(irqn, irq->mask))
- mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x",
- irqn);
-
- return 0;
+static struct mlx5_irq_pool *
+irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
+ u32 min_threshold, u32 max_threshold)
+{
+ struct mlx5_irq_pool *pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+ pool->dev = dev;
+ xa_init_flags(&pool->irqs, XA_FLAGS_ALLOC);
+ pool->xa_num_irqs.min = start;
+ pool->xa_num_irqs.max = start + size - 1;
+ if (name)
+ snprintf(pool->name, MLX5_MAX_IRQ_NAME - MLX5_MAX_IRQ_IDX_CHARS,
+ name);
+ pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
+ pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
+ mutex_init(&pool->lock);
+ mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
+ name, size, start);
+ return pool;
}
-static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i)
+static void irq_pool_free(struct mlx5_irq_pool *pool)
{
- int vecidx = MLX5_IRQ_VEC_COMP_BASE + i;
struct mlx5_irq *irq;
- int irqn;
+ unsigned long index;
- irq = mlx5_irq_get(mdev, vecidx);
- irqn = pci_irq_vector(mdev->pdev, vecidx);
- irq_set_affinity_hint(irqn, NULL);
- free_cpumask_var(irq->mask);
+ xa_for_each(&pool->irqs, index, irq)
+ irq_release(&irq->kref);
+ xa_destroy(&pool->irqs);
+ kvfree(pool);
}
-static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev)
+static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec)
{
- int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
+ struct mlx5_irq_table *table = dev->priv.irq_table;
+ int num_sf_ctrl_by_msix;
+ int num_sf_ctrl_by_sfs;
+ int num_sf_ctrl;
int err;
- int i;
- for (i = 0; i < nvec; i++) {
- err = set_comp_irq_affinity_hint(mdev, i);
- if (err)
- goto err_out;
+ /* init pf_pool */
+ table->pf_pool = irq_pool_alloc(dev, 0, pf_vec, NULL,
+ MLX5_EQ_SHARE_IRQ_MIN_COMP,
+ MLX5_EQ_SHARE_IRQ_MAX_COMP);
+ if (IS_ERR(table->pf_pool))
+ return PTR_ERR(table->pf_pool);
+ if (!mlx5_sf_max_functions(dev))
+ return 0;
+ if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) {
+ mlx5_core_err(dev, "Not enught IRQs for SFs. SF may run at lower performance\n");
+ return 0;
}
+ /* init sf_ctrl_pool */
+ num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
+ num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
+ MLX5_SFS_PER_CTRL_IRQ);
+ num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
+ num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
+ table->sf_ctrl_pool = irq_pool_alloc(dev, pf_vec, num_sf_ctrl,
+ "mlx5_sf_ctrl",
+ MLX5_EQ_SHARE_IRQ_MIN_CTRL,
+ MLX5_EQ_SHARE_IRQ_MAX_CTRL);
+ if (IS_ERR(table->sf_ctrl_pool)) {
+ err = PTR_ERR(table->sf_ctrl_pool);
+ goto err_pf;
+ }
+ /* init sf_comp_pool */
+ table->sf_comp_pool = irq_pool_alloc(dev, pf_vec + num_sf_ctrl,
+ sf_vec - num_sf_ctrl, "mlx5_sf_comp",
+ MLX5_EQ_SHARE_IRQ_MIN_COMP,
+ MLX5_EQ_SHARE_IRQ_MAX_COMP);
+ if (IS_ERR(table->sf_comp_pool)) {
+ err = PTR_ERR(table->sf_comp_pool);
+ goto err_sf_ctrl;
+ }
return 0;
-
-err_out:
- for (i--; i >= 0; i--)
- clear_comp_irq_affinity_hint(mdev, i);
-
+err_sf_ctrl:
+ irq_pool_free(table->sf_ctrl_pool);
+err_pf:
+ irq_pool_free(table->pf_pool);
return err;
}
-static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev)
+static void irq_pools_destroy(struct mlx5_irq_table *table)
{
- int nvec = mlx5_irq_get_num_comp(mdev->priv.irq_table);
- int i;
-
- for (i = 0; i < nvec; i++)
- clear_comp_irq_affinity_hint(mdev, i);
+ if (table->sf_ctrl_pool) {
+ irq_pool_free(table->sf_comp_pool);
+ irq_pool_free(table->sf_ctrl_pool);
+ }
+ irq_pool_free(table->pf_pool);
}
-struct cpumask *
-mlx5_irq_get_affinity_mask(struct mlx5_irq_table *irq_table, int vecidx)
+/* irq_table API */
+
+int mlx5_irq_table_init(struct mlx5_core_dev *dev)
{
- return irq_table->irq[vecidx].mask;
+ struct mlx5_irq_table *irq_table;
+
+ if (mlx5_core_is_sf(dev))
+ return 0;
+
+ irq_table = kvzalloc(sizeof(*irq_table), GFP_KERNEL);
+ if (!irq_table)
+ return -ENOMEM;
+
+ dev->priv.irq_table = irq_table;
+ return 0;
}
-#ifdef CONFIG_RFS_ACCEL
-struct cpu_rmap *mlx5_irq_get_rmap(struct mlx5_irq_table *irq_table)
+void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
{
- return irq_table->rmap;
+ if (mlx5_core_is_sf(dev))
+ return;
+
+ kvfree(dev->priv.irq_table);
}
-#endif
-static void unrequest_irqs(struct mlx5_core_dev *dev)
+int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
{
- struct mlx5_irq_table *table = dev->priv.irq_table;
- int i;
-
- for (i = 0; i < table->nvec; i++)
- free_irq(pci_irq_vector(dev->pdev, i),
- &mlx5_irq_get(dev, i)->nh);
+ return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
}
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
- struct mlx5_priv *priv = &dev->priv;
- struct mlx5_irq_table *table = priv->irq_table;
int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
MLX5_CAP_GEN(dev, max_num_eqs) :
1 << MLX5_CAP_GEN(dev, log_max_eq);
- int nvec;
+ int total_vec;
+ int pf_vec;
int err;
if (mlx5_core_is_sf(dev))
return 0;
- nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_IRQ_VEC_COMP_BASE;
- nvec = min_t(int, nvec, num_eqs);
- if (nvec <= MLX5_IRQ_VEC_COMP_BASE)
- return -ENOMEM;
-
- table->irq = kcalloc(nvec, sizeof(*table->irq), GFP_KERNEL);
- if (!table->irq)
+ pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_IRQ_VEC_COMP_BASE;
+ pf_vec = min_t(int, pf_vec, num_eqs);
+ if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
return -ENOMEM;
- nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
- nvec, PCI_IRQ_MSIX);
- if (nvec < 0) {
- err = nvec;
- goto err_free_irq;
- }
-
- table->nvec = nvec;
+ total_vec = pf_vec;
+ if (mlx5_sf_max_functions(dev))
+ total_vec += MLX5_IRQ_CTRL_SF_MAX +
+ MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
- err = irq_set_rmap(dev);
- if (err)
- goto err_set_rmap;
+ total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
+ total_vec, PCI_IRQ_MSIX);
+ if (total_vec < 0)
+ return total_vec;
+ pf_vec = min(pf_vec, total_vec);
- err = request_irqs(dev, nvec);
+ err = irq_pools_init(dev, total_vec - pf_vec, pf_vec);
if (err)
- goto err_request_irqs;
-
- err = set_comp_irq_affinity_hints(dev);
- if (err) {
- mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n");
- goto err_set_affinity;
- }
-
- return 0;
+ pci_free_irq_vectors(dev->pdev);
-err_set_affinity:
- unrequest_irqs(dev);
-err_request_irqs:
- irq_clear_rmap(dev);
-err_set_rmap:
- pci_free_irq_vectors(dev->pdev);
-err_free_irq:
- kfree(table->irq);
return err;
}
void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
- int i;
if (mlx5_core_is_sf(dev))
return;
- /* free_irq requires that affinity and rmap will be cleared
- * before calling it. This is why there is asymmetry with set_rmap
- * which should be called after alloc_irq but before request_irq.
+ /* There are cases where IRQs still will be in used when we reaching
+ * to here. Hence, making sure all the irqs are realeased.
*/
- irq_clear_rmap(dev);
- clear_comp_irqs_affinity_hints(dev);
- for (i = 0; i < table->nvec; i++)
- free_irq(pci_irq_vector(dev->pdev, i),
- &mlx5_irq_get(dev, i)->nh);
+ irq_pools_destroy(table);
pci_free_irq_vectors(dev->pdev);
- kfree(table->irq);
+}
+
+int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
+{
+ if (table->sf_comp_pool)
+ return table->sf_comp_pool->xa_num_irqs.max -
+ table->sf_comp_pool->xa_num_irqs.min + 1;
+ else
+ return mlx5_irq_table_get_num_comp(table);
}
struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index a8e73c9ed1ea..1be048769309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -136,10 +136,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
switch (hw_state) {
case MLX5_VHCA_STATE_ACTIVE:
case MLX5_VHCA_STATE_IN_USE:
- case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
return DEVLINK_PORT_FN_STATE_ACTIVE;
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
+ case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
default:
return DEVLINK_PORT_FN_STATE_INACTIVE;
}
@@ -192,14 +192,17 @@ sf_err:
return err;
}
-static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
+static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
+ struct netlink_ext_ack *extack)
{
int err;
if (mlx5_sf_is_active(sf))
return 0;
- if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
- return -EINVAL;
+ if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
+ NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
+ return -EBUSY;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
@@ -226,7 +229,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf,
- enum devlink_port_fn_state state)
+ enum devlink_port_fn_state state,
+ struct netlink_ext_ack *extack)
{
int err = 0;
@@ -234,7 +238,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
if (state == mlx5_sf_to_devlink_state(sf->hw_state))
goto out;
if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
- err = mlx5_sf_activate(dev, sf);
+ err = mlx5_sf_activate(dev, sf, extack);
else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
err = mlx5_sf_deactivate(dev, sf);
else
@@ -265,7 +269,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
goto out;
}
- err = mlx5_sf_state_set(dev, table, sf, state);
+ err = mlx5_sf_state_set(dev, table, sf, state, extack);
out:
mlx5_sf_table_put(table);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index ef5f892aafad..500c71fb6f6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -6,7 +6,6 @@
#include "sf.h"
#include "mlx5_ifc_vhca_event.h"
#include "ecpf.h"
-#include "vhca_event.h"
#include "mlx5_core.h"
#include "eswitch.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 0b6aea1e6a94..81ce13b19ee8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -5,42 +5,7 @@
#define __MLX5_SF_H__
#include <linux/mlx5/driver.h>
-
-static inline u16 mlx5_sf_start_function_id(const struct mlx5_core_dev *dev)
-{
- return MLX5_CAP_GEN(dev, sf_base_id);
-}
-
-#ifdef CONFIG_MLX5_SF
-
-static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
-{
- return MLX5_CAP_GEN(dev, sf);
-}
-
-static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
-{
- if (!mlx5_sf_supported(dev))
- return 0;
- if (MLX5_CAP_GEN(dev, max_num_sf))
- return MLX5_CAP_GEN(dev, max_num_sf);
- else
- return 1 << MLX5_CAP_GEN(dev, log_max_sf);
-}
-
-#else
-
-static inline bool mlx5_sf_supported(const struct mlx5_core_dev *dev)
-{
- return false;
-}
-
-static inline u16 mlx5_sf_max_functions(const struct mlx5_core_dev *dev)
-{
- return 0;
-}
-
-#endif
+#include "lib/sf.h"
#ifdef CONFIG_MLX5_SF_MANAGER
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2338989d4403..e8185b69ac6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
+#include "mlx5_irq.h"
#include "eswitch.h"
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 949879cf2092..de68c0ec2143 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "dr_types.h"
+#include "dr_ste.h"
enum dr_action_domain {
DR_ACTION_DOMAIN_NIC_INGRESS,
@@ -14,7 +15,8 @@ enum dr_action_domain {
enum dr_action_valid_state {
DR_ACTION_STATE_ERR,
DR_ACTION_STATE_NO_ACTION,
- DR_ACTION_STATE_REFORMAT,
+ DR_ACTION_STATE_ENCAP,
+ DR_ACTION_STATE_DECAP,
DR_ACTION_STATE_MODIFY_HDR,
DR_ACTION_STATE_MODIFY_VLAN,
DR_ACTION_STATE_NON_TERM,
@@ -31,26 +33,42 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
+ [DR_ACTION_STATE_ENCAP] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
+ },
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -60,6 +78,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -67,8 +88,11 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_TAG] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
@@ -81,22 +105,24 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
@@ -104,15 +130,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
},
@@ -125,25 +153,41 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_DECAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
+ },
+ [DR_ACTION_STATE_ENCAP] = {
+ [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_MODIFY_VLAN] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
@@ -152,13 +196,19 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
},
[DR_ACTION_STATE_NON_TERM] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
- [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_TNL_L2_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_TNL_L3_TO_L2] = DR_ACTION_STATE_DECAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
[DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
@@ -173,23 +223,25 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
- [DR_ACTION_STATE_REFORMAT] = {
+ [DR_ACTION_STATE_ENCAP] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
- [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_MODIFY_HDR] = {
[DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
@@ -198,8 +250,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
[DR_ACTION_STATE_NON_TERM] = {
@@ -207,8 +260,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX]
[DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM,
[DR_ACTION_TYP_CTR] = DR_ACTION_STATE_NON_TERM,
[DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR,
- [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_REFORMAT,
- [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_REFORMAT,
+ [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP,
+ [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP,
[DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN,
[DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM,
},
@@ -235,6 +289,9 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type
case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3:
*action_type = DR_ACTION_TYP_L2_TO_TNL_L3;
break;
+ case DR_ACTION_REFORMAT_TYP_INSERT_HDR:
+ *action_type = DR_ACTION_TYP_INSERT_HDR;
+ break;
default:
return -EINVAL;
}
@@ -454,8 +511,13 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
- attr.reformat_size = action->reformat->reformat_size;
- attr.reformat_id = action->reformat->reformat_id;
+ if (rx_rule &&
+ !(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) {
+ mlx5dr_info(dmn, "Device doesn't support Encap on RX\n");
+ goto out_invalid_arg;
+ }
+ attr.reformat.size = action->reformat->size;
+ attr.reformat.id = action->reformat->id;
break;
case DR_ACTION_TYP_VPORT:
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
@@ -481,6 +543,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
break;
+ case DR_ACTION_TYP_INSERT_HDR:
+ attr.reformat.size = action->reformat->size;
+ attr.reformat.id = action->reformat->id;
+ attr.reformat.param_0 = action->reformat->param_0;
+ attr.reformat.param_1 = action->reformat->param_1;
+ break;
default:
goto out_invalid_arg;
}
@@ -543,6 +611,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = {
[DR_ACTION_TYP_MODIFY_HDR] = sizeof(struct mlx5dr_action_rewrite),
[DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport),
[DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan),
+ [DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat),
};
static struct mlx5dr_action *
@@ -651,7 +720,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
if (reformat_action) {
reformat_req = true;
hw_dests[i].vport.reformat_id =
- reformat_action->reformat->reformat_id;
+ reformat_action->reformat->id;
ref_actions[num_of_ref++] = reformat_action;
hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
@@ -758,11 +827,15 @@ struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
static int
dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
struct mlx5dr_domain *dmn,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t data_sz,
void *data)
{
- if ((!data && data_sz) || (data && !data_sz) || reformat_type >
- DR_ACTION_TYP_L2_TO_TNL_L3) {
+ if ((!data && data_sz) || (data && !data_sz) ||
+ ((reformat_param_0 || reformat_param_1) &&
+ reformat_type != DR_ACTION_TYP_INSERT_HDR) ||
+ reformat_type > DR_ACTION_TYP_INSERT_HDR) {
mlx5dr_dbg(dmn, "Invalid reformat parameter!\n");
goto out_err;
}
@@ -794,6 +867,7 @@ out_err:
static int
dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+ u8 reformat_param_0, u8 reformat_param_1,
size_t data_sz, void *data,
struct mlx5dr_action *action)
{
@@ -811,13 +885,14 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
else
rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
- ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, data_sz, data,
+ ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, 0, 0,
+ data_sz, data,
&reformat_id);
if (ret)
return ret;
- action->reformat->reformat_id = reformat_id;
- action->reformat->reformat_size = data_sz;
+ action->reformat->id = reformat_id;
+ action->reformat->size = data_sz;
return 0;
}
case DR_ACTION_TYP_TNL_L2_TO_L2:
@@ -859,6 +934,23 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
}
return 0;
}
+ case DR_ACTION_TYP_INSERT_HDR:
+ {
+ ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev,
+ MLX5_REFORMAT_TYPE_INSERT_HDR,
+ reformat_param_0,
+ reformat_param_1,
+ data_sz, data,
+ &reformat_id);
+ if (ret)
+ return ret;
+
+ action->reformat->id = reformat_id;
+ action->reformat->size = data_sz;
+ action->reformat->param_0 = reformat_param_0;
+ action->reformat->param_1 = reformat_param_1;
+ return 0;
+ }
default:
mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
return -EINVAL;
@@ -896,6 +988,8 @@ struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
struct mlx5dr_action *
mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
enum mlx5dr_action_reformat_type reformat_type,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t data_sz,
void *data)
{
@@ -912,7 +1006,9 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
goto dec_ref;
}
- ret = dr_action_verify_reformat_params(action_type, dmn, data_sz, data);
+ ret = dr_action_verify_reformat_params(action_type, dmn,
+ reformat_param_0, reformat_param_1,
+ data_sz, data);
if (ret)
goto dec_ref;
@@ -923,6 +1019,8 @@ mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
action->reformat->dmn = dmn;
ret = dr_action_create_reformat_action(dmn,
+ reformat_param_0,
+ reformat_param_1,
data_sz,
data,
action);
@@ -1516,8 +1614,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
break;
case DR_ACTION_TYP_L2_TO_TNL_L2:
case DR_ACTION_TYP_L2_TO_TNL_L3:
+ case DR_ACTION_TYP_INSERT_HDR:
mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
- action->reformat->reformat_id);
+ action->reformat->id);
refcount_dec(&action->reformat->dmn->refcount);
break;
case DR_ACTION_TYP_MODIFY_HDR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 5970cb8fc0c0..6314f50efbd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -460,6 +460,8 @@ int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
enum mlx5_reformat_ctx_type rt,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t reformat_size,
void *reformat_data,
u32 *reformat_id)
@@ -486,8 +488,11 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
+ MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
- memcpy(pdata, reformat_data, reformat_size);
+ if (reformat_data && reformat_size)
+ memcpy(pdata, reformat_data, reformat_size);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 1fbcd012bb85..7ccfd40586ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
int ret;
ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
- ft_attr.level = dmn->info.caps.max_ft_level - 2;
+ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+ MLX5_FT_MAX_MULTIPATH_LEVEL);
ft_attr.reformat_en = reformat_req;
ft_attr.decap_en = reformat_req;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
index 992b591bf0c5..12a8bbbf944b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
@@ -156,6 +156,7 @@ struct mlx5dr_ste_ctx {
u16 (*get_byte_mask)(u8 *hw_ste_p);
/* Actions */
+ u32 actions_caps;
void (*set_actions_rx)(struct mlx5dr_domain *dmn,
u8 *action_type_set,
u8 *hw_ste_arr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 0757a4e8540e..f1950e4968da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -437,8 +437,8 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
attr->gvmi);
dr_ste_v0_set_tx_encap(last_ste,
- attr->reformat_id,
- attr->reformat_size,
+ attr->reformat.id,
+ attr->reformat.size,
action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
/* Whenever prio_tag_required enabled, we can be sure that the
* previous table (ACL) already push vlan to our packet,
@@ -1893,6 +1893,7 @@ struct mlx5dr_ste_ctx ste_ctx_v0 = {
.get_byte_mask = &dr_ste_v0_get_byte_mask,
/* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_NONE,
.set_actions_rx = &dr_ste_v0_set_actions_rx,
.set_actions_tx = &dr_ste_v0_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 054c2e2b6554..42668de01abc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -116,6 +116,8 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3 = 0x4f,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0 = 0x5e,
DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1 = 0x5f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0 = 0x6f,
+ DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1 = 0x70,
DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE = 0x7b,
DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE = 0x7c,
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2 = 0x8c,
@@ -246,6 +248,12 @@ static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field
[MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
},
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+ },
+ [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+ .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+ },
};
static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
@@ -361,8 +369,8 @@ static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
}
-static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
- u32 reformat_id, int size)
+static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id, int size)
{
MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_INSERT_POINTER);
@@ -374,6 +382,26 @@ static void dr_ste_v1_set_tx_encap(u8 *hw_ste_p, u8 *d_action,
dr_ste_v1_set_reparse(hw_ste_p);
}
+static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+ u32 reformat_id,
+ u8 anchor, u8 offset,
+ int size)
+{
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
+ action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
+
+ /* The hardware expects here size and offset in words (2 byte) */
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
+
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
+ MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
+ DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+ dr_ste_v1_set_reparse(hw_ste_p);
+}
+
static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action,
u32 vlan_hdr)
{
@@ -401,11 +429,11 @@ static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
dr_ste_v1_set_reparse(hw_ste_p);
}
-static void dr_ste_v1_set_tx_encap_l3(u8 *hw_ste_p,
- u8 *frst_s_action,
- u8 *scnd_d_action,
- u32 reformat_id,
- int size)
+static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
+ u8 *frst_s_action,
+ u8 *scnd_d_action,
+ u32 reformat_id,
+ int size)
{
/* Remove L2 headers */
MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
@@ -519,9 +547,9 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
allow_encap = true;
}
- dr_ste_v1_set_tx_encap(last_ste, action,
- attr->reformat_id,
- attr->reformat_size);
+ dr_ste_v1_set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
@@ -532,12 +560,25 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action_sz = DR_STE_ACTION_TRIPLE_SZ;
d_action = action + DR_STE_ACTION_SINGLE_SZ;
- dr_ste_v1_set_tx_encap_l3(last_ste,
- action, d_action,
- attr->reformat_id,
- attr->reformat_size);
+ dr_ste_v1_set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
action_sz -= DR_STE_ACTION_TRIPLE_SZ;
action += DR_STE_ACTION_TRIPLE_SZ;
+ } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
+ if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
@@ -616,7 +657,9 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
}
if (action_type_set[DR_ACTION_TYP_CTR]) {
- /* Counter action set after decap to exclude decaped header */
+ /* Counter action set after decap and before insert_hdr
+ * to exclude decaped / encaped header respectively.
+ */
if (!allow_ctr) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
@@ -627,6 +670,52 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
}
+ if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
+ if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_encap(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_modify_hdr = false;
+ } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
+ u8 *d_action;
+
+ if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+
+ d_action = action + DR_STE_ACTION_SINGLE_SZ;
+
+ dr_ste_v1_set_encap_l3(last_ste,
+ action, d_action,
+ attr->reformat.id,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_TRIPLE_SZ;
+ allow_modify_hdr = false;
+ } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
+ /* Modify header, decap, and encap must use different STEs */
+ if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
+ dr_ste_v1_set_insert_hdr(last_ste, action,
+ attr->reformat.id,
+ attr->reformat.param_0,
+ attr->reformat.param_1,
+ attr->reformat.size);
+ action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+ action += DR_STE_ACTION_DOUBLE_SZ;
+ allow_modify_hdr = false;
+ }
+
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
@@ -1865,6 +1954,7 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = {
.set_byte_mask = &dr_ste_v1_set_byte_mask,
.get_byte_mask = &dr_ste_v1_get_byte_mask,
/* Actions */
+ .actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP,
.set_actions_rx = &dr_ste_v1_set_actions_rx,
.set_actions_tx = &dr_ste_v1_set_actions_tx,
.modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 67460c42a99b..60b8c04e165e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -89,6 +89,11 @@ enum {
DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
};
+enum mlx5dr_ste_ctx_action_cap {
+ DR_STE_CTX_ACTION_CAP_NONE = 0,
+ DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0,
+};
+
enum {
DR_MODIFY_ACTION_SIZE = 8,
};
@@ -118,6 +123,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_VPORT,
DR_ACTION_TYP_POP_VLAN,
DR_ACTION_TYP_PUSH_VLAN,
+ DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_MAX,
};
@@ -261,8 +267,12 @@ struct mlx5dr_ste_actions_attr {
u32 ctr_id;
u16 gvmi;
u16 hit_gvmi;
- u32 reformat_id;
- u32 reformat_size;
+ struct {
+ u32 id;
+ u32 size;
+ u8 param_0;
+ u8 param_1;
+ } reformat;
struct {
int count;
u32 headers[MLX5DR_MAX_VLANS];
@@ -903,8 +913,10 @@ struct mlx5dr_action_rewrite {
struct mlx5dr_action_reformat {
struct mlx5dr_domain *dmn;
- u32 reformat_id;
- u32 reformat_size;
+ u32 id;
+ u32 size;
+ u8 param_0;
+ u8 param_1;
};
struct mlx5dr_action_dest_tbl {
@@ -1142,6 +1154,8 @@ int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
struct mlx5dr_cmd_query_flow_table_details *output);
int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
enum mlx5_reformat_ctx_type rt,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t reformat_size,
void *reformat_data,
u32 *reformat_id);
@@ -1252,7 +1266,6 @@ struct mlx5dr_send_ring {
u32 tx_head;
void *buf;
u32 buf_size;
- struct ib_wc wc[MAX_SEND_CQE];
u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 96c39a17d026..00b4c753cae2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -62,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
- unsigned int log_size,
+ unsigned int size,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
@@ -71,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
- log_size,
+ size,
next_ft);
flags = ft->flags;
/* turn off encap/decap if not supported for sw-str by fw */
@@ -97,6 +97,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
}
}
+ ft->max_fte = INT_MAX;
+
return 0;
}
@@ -287,7 +289,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
tmp_action = mlx5dr_action_create_packet_reformat(domain,
- decap_type, 0,
+ decap_type,
+ 0, 0, 0,
NULL);
if (!tmp_action) {
err = -ENOMEM;
@@ -520,9 +523,7 @@ out_err:
}
static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
- int reformat_type,
- size_t size,
- void *reformat_data,
+ struct mlx5_pkt_reformat_params *params,
enum mlx5_flow_namespace_type namespace,
struct mlx5_pkt_reformat *pkt_reformat)
{
@@ -530,7 +531,7 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
struct mlx5dr_action *action;
int dr_reformat;
- switch (reformat_type) {
+ switch (params->type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
@@ -542,16 +543,21 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
break;
+ case MLX5_REFORMAT_TYPE_INSERT_HDR:
+ dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
+ break;
default:
mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
- reformat_type);
+ params->type);
return -EOPNOTSUPP;
}
action = mlx5dr_action_create_packet_reformat(dr_domain,
dr_reformat,
- size,
- reformat_data);
+ params->param_0,
+ params->param_1,
+ params->size,
+ params->data);
if (!action) {
mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 612b0ac31db2..0e2b73731117 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -26,6 +26,7 @@ enum mlx5dr_action_reformat_type {
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2,
DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
+ DR_ACTION_REFORMAT_TYP_INSERT_HDR,
};
struct mlx5dr_match_parameters {
@@ -105,6 +106,8 @@ mlx5dr_action_create_flow_counter(u32 counter_id);
struct mlx5dr_action *
mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
enum mlx5dr_action_reformat_type reformat_type,
+ u8 reformat_param_0,
+ u8 reformat_param_1,
size_t data_sz,
void *data);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index ad93e01b2cda..e775f08fb464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1444,7 +1444,9 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
if (err)
return err;
- err = devlink_info_version_fixed_put(req, "fw.psid", fw_info_psid);
+ err = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ fw_info_psid);
if (err)
return err;
@@ -1453,7 +1455,9 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
if (err)
return err;
- return 0;
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
}
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index dd26865bd587..b3ca5bd33a7f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -125,6 +125,7 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
int off, int *temp)
{
+ unsigned int module_temp, module_crit, module_emerg;
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
union {
u8 buf[MLXSW_REG_MCIA_TH_ITEM_SIZE];
@@ -132,7 +133,6 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
} temp_thresh;
char mcia_pl[MLXSW_REG_MCIA_LEN] = {0};
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- unsigned int module_temp;
bool qsfp, cmis;
int page;
int err;
@@ -142,12 +142,21 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err)
return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &module_temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &module_temp, NULL, &module_crit,
+ &module_emerg, NULL);
if (!module_temp) {
*temp = 0;
return 0;
}
+ /* Validate if threshold reading is available through MTMP register,
+ * otherwise fallback to read through MCIA.
+ */
+ if (module_emerg) {
+ *temp = off == SFP_TEMP_HIGH_WARN ? module_crit : module_emerg;
+ return 0;
+ }
+
/* Read Free Side Device Temperature Thresholds from page 03h
* (MSB at lower byte address).
* Bytes:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 2196c946698a..d41afdfbd085 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -72,7 +72,7 @@ static ssize_t mlxsw_hwmon_temp_show(struct device *dev,
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
return sprintf(buf, "%d\n", temp);
}
@@ -95,7 +95,7 @@ static ssize_t mlxsw_hwmon_temp_max_show(struct device *dev,
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to query temp sensor\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, NULL, &temp_max, NULL, NULL, NULL);
return sprintf(buf, "%d\n", temp_max);
}
@@ -239,7 +239,7 @@ static int mlxsw_hwmon_module_temp_get(struct device *dev,
dev_err(dev, "Failed to query module temperature\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL, NULL, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index dfea14399607..677a53f65008 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -149,22 +149,27 @@ mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
static int
mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
- struct mlxsw_thermal_module *tz)
+ struct mlxsw_thermal_module *tz,
+ int crit_temp, int emerg_temp)
{
- int crit_temp, emerg_temp;
int err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
- SFP_TEMP_HIGH_WARN,
- &crit_temp);
- if (err)
- return err;
+ /* Do not try to query temperature thresholds directly from the module's
+ * EEPROM if we got valid thresholds from MTMP.
+ */
+ if (!emerg_temp || !crit_temp) {
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ SFP_TEMP_HIGH_WARN,
+ &crit_temp);
+ if (err)
+ return err;
- err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
- SFP_TEMP_HIGH_ALARM,
- &emerg_temp);
- if (err)
- return err;
+ err = mlxsw_env_module_temp_thresholds_get(core, tz->module,
+ SFP_TEMP_HIGH_ALARM,
+ &emerg_temp);
+ if (err)
+ return err;
+ }
if (crit_temp > emerg_temp) {
dev_warn(dev, "%s : Critical threshold %d is above emergency threshold %d\n",
@@ -281,7 +286,7 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
dev_err(dev, "Failed to query temp sensor\n");
return err;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
if (temp > 0)
mlxsw_thermal_tz_score_update(thermal, tzdev, thermal->trips,
temp);
@@ -420,36 +425,57 @@ static int mlxsw_thermal_module_unbind(struct thermal_zone_device *tzdev,
return err;
}
-static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
- int *p_temp)
+static void
+mlxsw_thermal_module_temp_and_thresholds_get(struct mlxsw_core *core,
+ u16 sensor_index, int *p_temp,
+ int *p_crit_temp,
+ int *p_emerg_temp)
{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
- struct mlxsw_thermal *thermal = tz->parent;
- struct device *dev = thermal->bus_info->dev;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
- int temp;
int err;
- /* Read module temperature. */
- mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN +
- tz->module, false, false);
- err = mlxsw_reg_query(thermal->core, MLXSW_REG(mtmp), mtmp_pl);
+ /* Read module temperature and thresholds. */
+ mlxsw_reg_mtmp_pack(mtmp_pl, sensor_index, false, false);
+ err = mlxsw_reg_query(core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
- /* Do not return error - in case of broken module's sensor
- * it will cause error message flooding.
+ /* Set temperature and thresholds to zero to avoid passing
+ * uninitialized data back to the caller.
*/
- temp = 0;
- *p_temp = (int) temp;
- return 0;
+ *p_temp = 0;
+ *p_crit_temp = 0;
+ *p_emerg_temp = 0;
+
+ return;
}
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, p_crit_temp, p_emerg_temp,
+ NULL);
+}
+
+static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
+ int *p_temp)
+{
+ struct mlxsw_thermal_module *tz = tzdev->devdata;
+ struct mlxsw_thermal *thermal = tz->parent;
+ int temp, crit_temp, emerg_temp;
+ struct device *dev;
+ u16 sensor_index;
+ int err;
+
+ dev = thermal->bus_info->dev;
+ sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + tz->module;
+
+ /* Read module temperature and thresholds. */
+ mlxsw_thermal_module_temp_and_thresholds_get(thermal->core,
+ sensor_index, &temp,
+ &crit_temp, &emerg_temp);
*p_temp = temp;
if (!temp)
return 0;
/* Update trip points. */
- err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz);
+ err = mlxsw_thermal_module_trips_update(dev, thermal->core, tz,
+ crit_temp, emerg_temp);
if (!err && temp > 0)
mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
@@ -560,7 +586,7 @@ static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
if (err)
return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
+ mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL, NULL, NULL);
if (temp > 0)
mlxsw_thermal_tz_score_update(thermal, tzdev, tz->trips, temp);
@@ -716,7 +742,10 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
struct mlxsw_thermal *thermal, u8 module)
{
struct mlxsw_thermal_module *module_tz;
+ int dummy_temp, crit_temp, emerg_temp;
+ u16 sensor_index;
+ sensor_index = MLXSW_REG_MTMP_MODULE_INDEX_MIN + module;
module_tz = &thermal->tz_module_arr[module];
/* Skip if parent is already set (case of port split). */
if (module_tz->parent)
@@ -727,8 +756,12 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
sizeof(thermal->trips));
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
+ /* Read module temperature and thresholds. */
+ mlxsw_thermal_module_temp_and_thresholds_get(core, sensor_index, &dummy_temp,
+ &crit_temp, &emerg_temp);
/* Update trip point according to the module data. */
- return mlxsw_thermal_module_trips_update(dev, core, module_tz);
+ return mlxsw_thermal_module_trips_update(dev, core, module_tz,
+ crit_temp, emerg_temp);
}
static void mlxsw_thermal_module_fini(struct mlxsw_thermal_module *module_tz)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index f9419cc53480..5304309ecb9d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -9463,6 +9463,14 @@ MLXSW_ITEM32(reg, mtmp, sensor_index, 0x00, 0, 12);
((s16)((GENMASK(15, 0) + (v_) + 1) \
* 125)); })
+/* reg_mtmp_max_operational_temperature
+ * The highest temperature in the nominal operational range. Reading is in
+ * 0.125 Celsius degrees units.
+ * In case of module this is SFF critical temperature threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mtmp, max_operational_temperature, 0x04, 16, 16);
+
/* reg_mtmp_temperature
* Temperature reading from the sensor. Reading is in 0.125 Celsius
* degrees units.
@@ -9541,7 +9549,9 @@ static inline void mlxsw_reg_mtmp_pack(char *payload, u16 sensor_index,
}
static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
- int *p_max_temp, char *sensor_name)
+ int *p_max_temp, int *p_temp_hi,
+ int *p_max_oper_temp,
+ char *sensor_name)
{
s16 temp;
@@ -9553,6 +9563,14 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
temp = mlxsw_reg_mtmp_max_temperature_get(payload);
*p_max_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
}
+ if (p_temp_hi) {
+ temp = mlxsw_reg_mtmp_temperature_threshold_hi_get(payload);
+ *p_temp_hi = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
+ if (p_max_oper_temp) {
+ temp = mlxsw_reg_mtmp_max_operational_temperature_get(payload);
+ *p_max_oper_temp = MLXSW_REG_MTMP_TEMP_TO_MC(temp);
+ }
if (sensor_name)
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 6decc5a43f98..7e221ef01437 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4312,9 +4312,6 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_key key;
struct mlxsw_sp_nexthop *nh;
- if (mlxsw_sp->router->aborted)
- return;
-
key.fib_nh = fib_nh;
nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
if (!nh)
@@ -5410,7 +5407,6 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
&rt->fib6_nh->fib_nh_gw6))
return nh;
- continue;
}
return NULL;
@@ -6422,9 +6418,6 @@ mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node;
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (fen_info->fi->nh &&
!mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
return 0;
@@ -6485,9 +6478,6 @@ static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node;
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
if (!fib4_entry)
return 0;
@@ -7070,9 +7060,6 @@ static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
struct fib6_info *rt = rt_arr[0];
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (rt->fib6_src.plen)
return -EINVAL;
@@ -7136,9 +7123,6 @@ static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
struct fib6_info *rt = rt_arr[0];
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (rt->fib6_src.plen)
return -EINVAL;
@@ -7180,9 +7164,6 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
struct fib6_info *rt = rt_arr[0];
int err;
- if (mlxsw_sp->router->aborted)
- return 0;
-
if (mlxsw_sp_fib6_rt_should_ignore(rt))
return 0;
@@ -7211,55 +7192,6 @@ static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
- enum mlxsw_sp_l3proto proto,
- u8 tree_id)
-{
- const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
- enum mlxsw_reg_ralxx_protocol ralxx_proto =
- (enum mlxsw_reg_ralxx_protocol) proto;
- struct mlxsw_sp_fib_entry_priv *priv;
- char xralta_pl[MLXSW_REG_XRALTA_LEN];
- char xralst_pl[MLXSW_REG_XRALST_LEN];
- int i, err;
-
- mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
- err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
- if (err)
- return err;
-
- mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
- err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
- if (err)
- return err;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
- struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
- struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
- char xraltb_pl[MLXSW_REG_XRALTB_LEN];
-
- mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
- mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
- err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
- if (err)
- return err;
-
- priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
- if (IS_ERR(priv))
- return PTR_ERR(priv);
-
- ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
- vr->id, 0, NULL, priv);
- ll_ops->fib_entry_act_ip2me_pack(op_ctx);
- err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
- mlxsw_sp_fib_entry_priv_put(priv);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static struct mlxsw_sp_mr_table *
mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
{
@@ -7276,9 +7208,6 @@ static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return 0;
-
vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
if (IS_ERR(vr))
return PTR_ERR(vr);
@@ -7293,9 +7222,6 @@ static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return;
-
vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
if (WARN_ON(!vr))
return;
@@ -7313,9 +7239,6 @@ mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return 0;
-
vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
if (IS_ERR(vr))
return PTR_ERR(vr);
@@ -7334,9 +7257,6 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_mr_table *mrt;
struct mlxsw_sp_vr *vr;
- if (mlxsw_sp->router->aborted)
- return;
-
vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
if (WARN_ON(!vr))
return;
@@ -7346,25 +7266,6 @@ mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_vr_put(mlxsw_sp, vr);
}
-static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
-{
- enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
- int err;
-
- err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
- MLXSW_SP_LPM_TREE_MIN);
- if (err)
- return err;
-
- /* The multicast router code does not need an abort trap as by default,
- * packets that don't match any routes are trapped to the CPU.
- */
-
- proto = MLXSW_SP_L3_PROTO_IPV6;
- return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
- MLXSW_SP_LPM_TREE_MIN + 1);
-}
-
static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node)
{
@@ -7451,20 +7352,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp->router->adj_discard_index_valid = false;
}
-static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
-{
- int err;
-
- if (mlxsw_sp->router->aborted)
- return;
- dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
- mlxsw_sp_router_fib_flush(mlxsw_sp);
- mlxsw_sp->router->aborted = true;
- err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
- if (err)
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
-}
-
struct mlxsw_sp_fib6_event {
struct fib6_info **rt_arr;
unsigned int nrt6;
@@ -7546,7 +7433,7 @@ static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
&fib_event->fen_info);
}
@@ -7581,7 +7468,7 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
fib_event->fib6_event.nrt6);
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
fib6_event->rt_arr,
fib6_event->nrt6);
@@ -7593,7 +7480,7 @@ static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
fib_event->fib6_event.nrt6);
if (err) {
mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
fib6_event->rt_arr,
fib6_event->nrt6);
@@ -7625,7 +7512,7 @@ static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
if (err)
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
mr_cache_put(fib_event->men_info.mfc);
break;
case FIB_EVENT_ENTRY_DEL:
@@ -7636,7 +7523,7 @@ static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
&fib_event->ven_info);
if (err)
- mlxsw_sp_router_fib_abort(mlxsw_sp);
+ dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
dev_put(fib_event->ven_info.dev);
break;
case FIB_EVENT_VIF_DEL:
@@ -7800,9 +7687,6 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
if (event == FIB_EVENT_RULE_DEL)
return 0;
- if (mlxsw_sp->router->aborted)
- return 0;
-
fr_info = container_of(info, struct fib_rule_notifier_info, info);
rule = fr_info->rule;
@@ -7860,10 +7744,6 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
case FIB_EVENT_ENTRY_ADD:
case FIB_EVENT_ENTRY_REPLACE:
case FIB_EVENT_ENTRY_APPEND:
- if (router->aborted) {
- NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
- return notifier_from_errno(-EINVAL);
- }
if (info->family == AF_INET) {
struct fib_entry_notifier_info *fen_info = ptr;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index be7708a375e1..c5d7007f9173 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -58,7 +58,6 @@ struct mlxsw_sp_router {
#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
struct list_head nexthop_neighs_list;
struct list_head ipip_list;
- bool aborted;
struct notifier_block nexthop_nb;
struct notifier_block fib_nb;
struct notifier_block netevent_nb;
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index caa251d0e381..b27713906d3a 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1135,6 +1135,10 @@ static int ks8842_probe(struct platform_device *pdev)
unsigned i;
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
goto err_mem_region;
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 13eef6e9bd2d..831518466de2 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -1022,30 +1022,23 @@ static int ks8851_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
*
* Read and check the TX/RX memory selftest information.
*/
-static int ks8851_read_selftest(struct ks8851_net *ks)
+static void ks8851_read_selftest(struct ks8851_net *ks)
{
unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
- int ret = 0;
unsigned rd;
rd = ks8851_rdreg16(ks, KS_MBIR);
if ((rd & both_done) != both_done) {
netdev_warn(ks->netdev, "Memory selftest not finished\n");
- return 0;
+ return;
}
- if (rd & MBIR_TXMBFA) {
+ if (rd & MBIR_TXMBFA)
netdev_err(ks->netdev, "TX memory selftest fail\n");
- ret |= 1;
- }
- if (rd & MBIR_RXMBFA) {
+ if (rd & MBIR_RXMBFA)
netdev_err(ks->netdev, "RX memory selftest fail\n");
- ret |= 2;
- }
-
- return 0;
}
/* driver bus management functions */
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 3532bfe936f6..7945eb5e2fe8 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -25,6 +25,7 @@
#include <linux/crc32.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/micrel_phy.h>
/* DMA Registers */
@@ -271,84 +272,15 @@
#define KS884X_PHY_CTRL_OFFSET 0x00
-/* Mode Control Register */
-#define PHY_REG_CTRL 0
-
-#define PHY_RESET 0x8000
-#define PHY_LOOPBACK 0x4000
-#define PHY_SPEED_100MBIT 0x2000
-#define PHY_AUTO_NEG_ENABLE 0x1000
-#define PHY_POWER_DOWN 0x0800
-#define PHY_MII_DISABLE 0x0400
-#define PHY_AUTO_NEG_RESTART 0x0200
-#define PHY_FULL_DUPLEX 0x0100
-#define PHY_COLLISION_TEST 0x0080
-#define PHY_HP_MDIX 0x0020
-#define PHY_FORCE_MDIX 0x0010
-#define PHY_AUTO_MDIX_DISABLE 0x0008
-#define PHY_REMOTE_FAULT_DISABLE 0x0004
-#define PHY_TRANSMIT_DISABLE 0x0002
-#define PHY_LED_DISABLE 0x0001
-
#define KS884X_PHY_STATUS_OFFSET 0x02
-/* Mode Status Register */
-#define PHY_REG_STATUS 1
-
-#define PHY_100BT4_CAPABLE 0x8000
-#define PHY_100BTX_FD_CAPABLE 0x4000
-#define PHY_100BTX_CAPABLE 0x2000
-#define PHY_10BT_FD_CAPABLE 0x1000
-#define PHY_10BT_CAPABLE 0x0800
-#define PHY_MII_SUPPRESS_CAPABLE 0x0040
-#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
-#define PHY_REMOTE_FAULT 0x0010
-#define PHY_AUTO_NEG_CAPABLE 0x0008
-#define PHY_LINK_STATUS 0x0004
-#define PHY_JABBER_DETECT 0x0002
-#define PHY_EXTENDED_CAPABILITY 0x0001
-
#define KS884X_PHY_ID_1_OFFSET 0x04
#define KS884X_PHY_ID_2_OFFSET 0x06
-/* PHY Identifier Registers */
-#define PHY_REG_ID_1 2
-#define PHY_REG_ID_2 3
-
#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
-/* Auto-Negotiation Advertisement Register */
-#define PHY_REG_AUTO_NEGOTIATION 4
-
-#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
-#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
-/* Not supported. */
-#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
-#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
-#define PHY_AUTO_NEG_100BT4 0x0200
-#define PHY_AUTO_NEG_100BTX_FD 0x0100
-#define PHY_AUTO_NEG_100BTX 0x0080
-#define PHY_AUTO_NEG_10BT_FD 0x0040
-#define PHY_AUTO_NEG_10BT 0x0020
-#define PHY_AUTO_NEG_SELECTOR 0x001F
-#define PHY_AUTO_NEG_802_3 0x0001
-
-#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
-
#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
-/* Auto-Negotiation Link Partner Ability Register */
-#define PHY_REG_REMOTE_CAPABILITY 5
-
-#define PHY_REMOTE_NEXT_PAGE 0x8000
-#define PHY_REMOTE_ACKNOWLEDGE 0x4000
-#define PHY_REMOTE_REMOTE_FAULT 0x2000
-#define PHY_REMOTE_SYM_PAUSE 0x0400
-#define PHY_REMOTE_100BTX_FD 0x0100
-#define PHY_REMOTE_100BTX 0x0080
-#define PHY_REMOTE_10BT_FD 0x0040
-#define PHY_REMOTE_10BT 0x0020
-
/* P1VCT */
#define KS884X_P1VCT_P 0x04F0
#define KS884X_P1PHYCTRL_P 0x04F2
@@ -2886,15 +2818,6 @@ static void sw_block_addr(struct ksz_hw *hw)
}
}
-#define PHY_LINK_SUPPORT \
- (PHY_AUTO_NEG_ASYM_PAUSE | \
- PHY_AUTO_NEG_SYM_PAUSE | \
- PHY_AUTO_NEG_100BT4 | \
- PHY_AUTO_NEG_100BTX_FD | \
- PHY_AUTO_NEG_100BTX | \
- PHY_AUTO_NEG_10BT_FD | \
- PHY_AUTO_NEG_10BT)
-
static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
{
*data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
@@ -3238,16 +3161,18 @@ static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
rx = tx = 0;
if (port->force_link)
rx = tx = 1;
- if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
- if (local & PHY_AUTO_NEG_SYM_PAUSE) {
+ if (remote & LPA_PAUSE_CAP) {
+ if (local & ADVERTISE_PAUSE_CAP) {
rx = tx = 1;
- } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
- (local & PHY_AUTO_NEG_PAUSE) ==
- PHY_AUTO_NEG_ASYM_PAUSE) {
+ } else if ((remote & LPA_PAUSE_ASYM) &&
+ (local &
+ (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) ==
+ ADVERTISE_PAUSE_ASYM) {
tx = 1;
}
- } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
- if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
+ } else if (remote & LPA_PAUSE_ASYM) {
+ if ((local & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
+ == (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM))
rx = 1;
}
if (!hw->ksz_switch)
@@ -3428,16 +3353,16 @@ static void port_force_link_speed(struct ksz_port *port)
phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
hw_r_phy_ctrl(hw, phy, &data);
- data &= ~PHY_AUTO_NEG_ENABLE;
+ data &= ~BMCR_ANENABLE;
if (10 == port->speed)
- data &= ~PHY_SPEED_100MBIT;
+ data &= ~BMCR_SPEED100;
else if (100 == port->speed)
- data |= PHY_SPEED_100MBIT;
+ data |= BMCR_SPEED100;
if (1 == port->duplex)
- data &= ~PHY_FULL_DUPLEX;
+ data &= ~BMCR_FULLDPLX;
else if (2 == port->duplex)
- data |= PHY_FULL_DUPLEX;
+ data |= BMCR_FULLDPLX;
hw_w_phy_ctrl(hw, phy, data);
}
}
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index f604a260ede7..fac61a8fbd02 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/**
+/*
* encx24j600_hw.h: Register definitions
*
*/
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index b85733942053..5249b64f4fc5 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -481,13 +481,12 @@ static int moxart_mac_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->pdev = pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ndev->base_addr = res->start;
- priv->base = devm_ioremap_resource(p_dev, res);
+ priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
goto init_fail;
}
+ ndev->base_addr = res->start;
spin_lock_init(&priv->txlock);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c84c8bf2bc20..fc99ad8e4a38 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3815,6 +3815,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"invalid sram_size %dB or board span %ldB\n",
mgp->sram_size, mgp->board_span);
+ status = -EINVAL;
goto abort_with_ioremap;
}
memcpy_fromio(mgp->eeprom_strings,
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 27a65ab3d501..0b017d4f5c08 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -1101,6 +1101,8 @@ static int s2io_print_pci_mode(struct s2io_nic *nic)
* @nic: device private variable
* @link: link status (UP/DOWN) used to enable/disable continuous
* transmit interrupts
+ * @may_sleep: parameter indicates if sleeping when waiting for
+ * command complete
* Description: The function configures transmit traffic interrupts
* Return Value: SUCCESS on success and
* '-1' on failure
@@ -3323,6 +3325,8 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
* @addr: address
* @busy_bit: bit to check for busy
* @bit_state: state to check
+ * @may_sleep: parameter indicates if sleeping when waiting for
+ * command complete
* Description: Function that waits for a command to Write into RMAC
* ADDR DATA registers to be completed and returns either success or
* error depending on whether the command was complete or not.
@@ -4868,6 +4872,8 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
/**
* s2io_set_multicast - entry point for multicast address enable/disable.
* @dev : pointer to the device structure
+ * @may_sleep: parameter indicates if sleeping when waiting for command
+ * complete
* Description:
* This function is a driver entry point which gets called by the kernel
* whenever multicast addresses must be enabled/disabled. This also gets
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index b113c158d6e3..0528b8f49061 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -87,7 +87,7 @@ static unsigned int bw_percentage[VXGE_HW_MAX_VIRTUAL_PATHS] =
module_param_array(bw_percentage, uint, NULL, 0);
static struct vxge_drv_config *driver_config;
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+static void vxge_reset_all_vpaths(struct vxgedev *vdev);
static inline int is_vxge_card_up(struct vxgedev *vdev)
{
@@ -1606,7 +1606,6 @@ static void vxge_config_ci_for_tti_rti(struct vxgedev *vdev)
static int do_vxge_reset(struct vxgedev *vdev, int event)
{
- enum vxge_hw_status status;
int ret = 0, vp_id, i;
vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
@@ -1709,14 +1708,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
netif_tx_stop_all_queues(vdev->ndev);
if (event == VXGE_LL_FULL_RESET) {
- status = vxge_reset_all_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vxge_debug_init(VXGE_ERR,
- "fatal: %s: can not reset vpaths",
- vdev->ndev->name);
- ret = -EPERM;
- goto out;
- }
+ vxge_reset_all_vpaths(vdev);
}
if (event == VXGE_LL_COMPL_RESET) {
@@ -1969,9 +1961,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
}
/* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+static void vxge_reset_all_vpaths(struct vxgedev *vdev)
{
- enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath;
int i;
@@ -1986,18 +1977,16 @@ static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
"vxge_hw_vpath_recover_"
"from_reset failed for vpath: "
"%d", i);
- return status;
+ return;
}
} else {
vxge_debug_init(VXGE_ERR,
"vxge_hw_vpath_reset failed for "
"vpath:%d", i);
- return status;
+ return;
}
}
}
-
- return status;
}
/* close vpaths */
@@ -2676,11 +2665,7 @@ static int vxge_set_features(struct net_device *dev, netdev_features_t features)
/* !netif_running() ensured by vxge_fix_features() */
vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
- if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
- dev->features = features ^ NETIF_F_RXHASH;
- vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
- return -EIO;
- }
+ vxge_reset_all_vpaths(vdev);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index d31772ae511d..9cff3d48acbc 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -51,7 +51,8 @@ nfp-objs += \
flower/metadata.o \
flower/offload.o \
flower/tunnel_conf.o \
- flower/qos_conf.o
+ flower/qos_conf.o \
+ flower/conntrack.o
endif
ifeq ($(CONFIG_BPF_SYSCALL),y)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
new file mode 100644
index 000000000000..9ea77bb3b69c
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -0,0 +1,1178 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2021 Corigine, Inc. */
+
+#include "conntrack.h"
+
+const struct rhashtable_params nfp_tc_ct_merge_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_tc_merge,
+ hash_node),
+ .key_len = sizeof(unsigned long) * 2,
+ .key_offset = offsetof(struct nfp_fl_ct_tc_merge, cookie),
+ .automatic_shrinking = true,
+};
+
+const struct rhashtable_params nfp_nft_ct_merge_params = {
+ .head_offset = offsetof(struct nfp_fl_nft_tc_merge,
+ hash_node),
+ .key_len = sizeof(unsigned long) * 3,
+ .key_offset = offsetof(struct nfp_fl_nft_tc_merge, cookie),
+ .automatic_shrinking = true,
+};
+
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
+ enum flow_action_id act_id);
+
+/**
+ * get_hashentry() - Wrapper around hashtable lookup.
+ * @ht: hashtable where entry could be found
+ * @key: key to lookup
+ * @params: hashtable params
+ * @size: size of entry to allocate if not in table
+ *
+ * Returns an entry from a hashtable. If entry does not exist
+ * yet allocate the memory for it and return the new entry.
+ */
+static void *get_hashentry(struct rhashtable *ht, void *key,
+ const struct rhashtable_params params, size_t size)
+{
+ void *result;
+
+ result = rhashtable_lookup_fast(ht, key, params);
+
+ if (result)
+ return result;
+
+ result = kzalloc(size, GFP_KERNEL);
+ if (!result)
+ return ERR_PTR(-ENOMEM);
+
+ return result;
+}
+
+bool is_pre_ct_flow(struct flow_cls_offload *flow)
+{
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, &flow->rule->action) {
+ if (act->id == FLOW_ACTION_CT && !act->ct.action)
+ return true;
+ }
+ return false;
+}
+
+bool is_post_ct_flow(struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_ct ct;
+
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
+ flow_rule_match_ct(rule, &ct);
+ if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
+ return true;
+ }
+ return false;
+}
+
+static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
+ struct nfp_fl_ct_flow_entry *entry2)
+{
+ unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
+ entry2->rule->match.dissector->used_keys;
+ bool out;
+
+ /* check the overlapped fields one by one, the unmasked part
+ * should not conflict with each other.
+ */
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_control(entry1->rule, &match1);
+ flow_rule_match_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match1, match2;
+
+ flow_rule_match_basic(entry1->rule, &match1);
+ flow_rule_match_basic(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match1, match2;
+
+ flow_rule_match_ports(entry1->rule, &match1);
+ flow_rule_match_ports(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match1, match2;
+
+ flow_rule_match_eth_addrs(entry1->rule, &match1);
+ flow_rule_match_eth_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match1, match2;
+
+ flow_rule_match_vlan(entry1->rule, &match1);
+ flow_rule_match_vlan(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
+ struct flow_match_mpls match1, match2;
+
+ flow_rule_match_mpls(entry1->rule, &match1);
+ flow_rule_match_mpls(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_match_tcp match1, match2;
+
+ flow_rule_match_tcp(entry1->rule, &match1);
+ flow_rule_match_tcp(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_ip(entry1->rule, &match1);
+ flow_rule_match_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid match1, match2;
+
+ flow_rule_match_enc_keyid(entry1->rule, &match1);
+ flow_rule_match_enc_keyid(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match1, match2;
+
+ flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match1, match2;
+
+ flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
+ flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+ struct flow_match_control match1, match2;
+
+ flow_rule_match_enc_control(entry1->rule, &match1);
+ flow_rule_match_enc_control(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match1, match2;
+
+ flow_rule_match_enc_ip(entry1->rule, &match1);
+ flow_rule_match_enc_ip(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ struct flow_match_enc_opts match1, match2;
+
+ flow_rule_match_enc_opts(entry1->rule, &match1);
+ flow_rule_match_enc_opts(entry2->rule, &match2);
+ COMPARE_UNMASKED_FIELDS(match1, match2, &out);
+ if (out)
+ goto check_failed;
+ }
+
+ return 0;
+
+check_failed:
+ return -EINVAL;
+}
+
+static int nfp_ct_check_mangle_merge(struct flow_action_entry *a_in,
+ struct flow_rule *rule)
+{
+ enum flow_action_mangle_base htype = a_in->mangle.htype;
+ u32 offset = a_in->mangle.offset;
+
+ switch (htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS))
+ return -EOPNOTSUPP;
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == offsetof(struct iphdr, ttl) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ if (offset == round_down(offsetof(struct iphdr, tos), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (offset == offsetof(struct iphdr, saddr) &&
+ match.mask->src)
+ return -EOPNOTSUPP;
+ if (offset == offsetof(struct iphdr, daddr) &&
+ match.mask->dst)
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ if (offset == round_down(offsetof(struct ipv6hdr, hop_limit), 4) &&
+ match.mask->ttl)
+ return -EOPNOTSUPP;
+ /* for ipv6, tos and flow_lbl are in the same word */
+ if (offset == round_down(offsetof(struct ipv6hdr, flow_lbl), 4) &&
+ match.mask->tos)
+ return -EOPNOTSUPP;
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (offset >= offsetof(struct ipv6hdr, saddr) &&
+ offset < offsetof(struct ipv6hdr, daddr) &&
+ memchr_inv(&match.mask->src, 0, sizeof(match.mask->src)))
+ return -EOPNOTSUPP;
+ if (offset >= offsetof(struct ipv6hdr, daddr) &&
+ offset < sizeof(struct ipv6hdr) &&
+ memchr_inv(&match.mask->dst, 0, sizeof(match.mask->dst)))
+ return -EOPNOTSUPP;
+ }
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ /* currently only can modify ports */
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
+ return -EOPNOTSUPP;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
+ struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_action_entry *act;
+ int err, i;
+
+ /* Check for pre_ct->action conflicts */
+ flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, nft_entry->rule);
+ if (err)
+ return err;
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+
+ /* Check for nft->action conflicts */
+ flow_action_for_each(i, act, &nft_entry->rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ err = nfp_ct_check_mangle_merge(act, post_ct_entry->rule);
+ if (err)
+ return err;
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ case FLOW_ACTION_VLAN_POP:
+ case FLOW_ACTION_VLAN_MANGLE:
+ case FLOW_ACTION_MPLS_PUSH:
+ case FLOW_ACTION_MPLS_POP:
+ case FLOW_ACTION_MPLS_MANGLE:
+ return -EOPNOTSUPP;
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
+ struct nfp_fl_ct_flow_entry *nft_entry)
+{
+ struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
+ struct flow_action_entry *ct_met;
+ struct flow_match_ct ct;
+ int i;
+
+ ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
+ if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
+ u32 *act_lbl;
+
+ act_lbl = ct_met->ct_metadata.labels;
+ flow_rule_match_ct(post_ct_entry->rule, &ct);
+ for (i = 0; i < 4; i++) {
+ if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
+ (act_lbl[i] & ct.mask->ct_labels[i]))
+ return -EINVAL;
+ }
+
+ if ((ct.key->ct_mark & ct.mask->ct_mark) ^
+ (ct_met->ct_metadata.mark & ct.mask->ct_mark))
+ return -EINVAL;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ return 0;
+}
+
+static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
+ struct net_device *netdev)
+{
+ return 0;
+}
+
+static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
+ struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_tc_merge *tc_m_entry)
+{
+ struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_nft_tc_merge *nft_m_entry;
+ unsigned long new_cookie[3];
+ int err;
+
+ pre_ct_entry = tc_m_entry->pre_ct_parent;
+ post_ct_entry = tc_m_entry->post_ct_parent;
+
+ err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Check that the two tc flows are also compatible with
+ * the nft entry. No need to check the pre_ct and post_ct
+ * entries as that was already done during pre_merge.
+ * The nft entry does not have a netdev or chain populated, so
+ * skip this check.
+ */
+ err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_merge_check(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+ err = nfp_ct_check_meta(post_ct_entry, nft_entry);
+ if (err)
+ return err;
+
+ /* Combine tc_merge and nft cookies for this cookie. */
+ new_cookie[0] = tc_m_entry->cookie[0];
+ new_cookie[1] = tc_m_entry->cookie[1];
+ new_cookie[2] = nft_entry->cookie;
+ nft_m_entry = get_hashentry(&zt->nft_merge_tb,
+ &new_cookie,
+ nfp_nft_ct_merge_params,
+ sizeof(*nft_m_entry));
+
+ if (IS_ERR(nft_m_entry))
+ return PTR_ERR(nft_m_entry);
+
+ /* nft_m_entry already present, not merging again */
+ if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
+ return 0;
+
+ memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
+ nft_m_entry->zt = zt;
+ nft_m_entry->tc_m_parent = tc_m_entry;
+ nft_m_entry->nft_parent = nft_entry;
+ nft_m_entry->tc_flower_cookie = 0;
+ /* Copy the netdev from one the pre_ct entry. When the tc_m_entry was created
+ * it only combined them if the netdevs were the same, so can use any of them.
+ */
+ nft_m_entry->netdev = pre_ct_entry->netdev;
+
+ /* Add this entry to the tc_m_list and nft_flow lists */
+ list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
+ list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
+
+ /* Generate offload structure and send to nfp */
+ err = nfp_fl_ct_add_offload(nft_m_entry);
+ if (err)
+ goto err_nft_ct_offload;
+
+ err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
+ nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_ct_merge_insert;
+
+ zt->nft_merge_count++;
+
+ return err;
+
+err_nft_ct_merge_insert:
+ nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
+ nft_m_entry->netdev);
+err_nft_ct_offload:
+ list_del(&nft_m_entry->tc_merge_list);
+ list_del(&nft_m_entry->nft_flow_list);
+ kfree(nft_m_entry);
+ return err;
+}
+
+static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
+ struct nfp_fl_ct_flow_entry *ct_entry1,
+ struct nfp_fl_ct_flow_entry *ct_entry2)
+{
+ struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
+ struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
+ struct nfp_fl_ct_tc_merge *m_entry;
+ unsigned long new_cookie[2];
+ int err;
+
+ if (ct_entry1->type == CT_TYPE_PRE_CT) {
+ pre_ct_entry = ct_entry1;
+ post_ct_entry = ct_entry2;
+ } else {
+ post_ct_entry = ct_entry1;
+ pre_ct_entry = ct_entry2;
+ }
+
+ if (post_ct_entry->netdev != pre_ct_entry->netdev)
+ return -EINVAL;
+ /* Checks that the chain_index of the filter matches the
+ * chain_index of the GOTO action.
+ */
+ if (post_ct_entry->chain_index != pre_ct_entry->chain_index)
+ return -EINVAL;
+
+ err = nfp_ct_merge_check(post_ct_entry, pre_ct_entry);
+ if (err)
+ return err;
+
+ new_cookie[0] = pre_ct_entry->cookie;
+ new_cookie[1] = post_ct_entry->cookie;
+ m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
+ nfp_tc_ct_merge_params, sizeof(*m_entry));
+ if (IS_ERR(m_entry))
+ return PTR_ERR(m_entry);
+
+ /* m_entry already present, not merging again */
+ if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
+ return 0;
+
+ memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
+ m_entry->zt = zt;
+ m_entry->post_ct_parent = post_ct_entry;
+ m_entry->pre_ct_parent = pre_ct_entry;
+
+ /* Add this entry to the pre_ct and post_ct lists */
+ list_add(&m_entry->post_ct_list, &post_ct_entry->children);
+ list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
+ INIT_LIST_HEAD(&m_entry->children);
+
+ err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
+ nfp_tc_ct_merge_params);
+ if (err)
+ goto err_ct_tc_merge_insert;
+ zt->tc_merge_count++;
+
+ /* Merge with existing nft flows */
+ list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
+ list_node) {
+ nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
+ }
+
+ return 0;
+
+err_ct_tc_merge_insert:
+ list_del(&m_entry->post_ct_list);
+ list_del(&m_entry->pre_ct_list);
+ kfree(m_entry);
+ return err;
+}
+
+static struct
+nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
+ u16 zone, bool wildcarded)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ if (wildcarded && priv->ct_zone_wc)
+ return priv->ct_zone_wc;
+
+ if (!wildcarded) {
+ zt = get_hashentry(&priv->ct_zone_table, &zone,
+ nfp_zone_table_params, sizeof(*zt));
+
+ /* If priv is set this is an existing entry, just return it */
+ if (IS_ERR(zt) || zt->priv)
+ return zt;
+ } else {
+ zt = kzalloc(sizeof(*zt), GFP_KERNEL);
+ if (!zt)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ zt->zone = zone;
+ zt->priv = priv;
+ zt->nft = NULL;
+
+ /* init the various hash tables and lists*/
+ INIT_LIST_HEAD(&zt->pre_ct_list);
+ INIT_LIST_HEAD(&zt->post_ct_list);
+ INIT_LIST_HEAD(&zt->nft_flows_list);
+
+ err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
+ if (err)
+ goto err_tc_merge_tb_init;
+
+ err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
+ if (err)
+ goto err_nft_merge_tb_init;
+
+ if (wildcarded) {
+ priv->ct_zone_wc = zt;
+ } else {
+ err = rhashtable_insert_fast(&priv->ct_zone_table,
+ &zt->hash_node,
+ nfp_zone_table_params);
+ if (err)
+ goto err_zone_insert;
+ }
+
+ return zt;
+
+err_zone_insert:
+ rhashtable_destroy(&zt->nft_merge_tb);
+err_nft_merge_tb_init:
+ rhashtable_destroy(&zt->tc_merge_tb);
+err_tc_merge_tb_init:
+ kfree(zt);
+ return ERR_PTR(err);
+}
+
+static struct
+nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ bool is_nft, struct netlink_ext_ack *extack)
+{
+ struct nf_flow_match *nft_match = NULL;
+ struct nfp_fl_ct_flow_entry *entry;
+ struct nfp_fl_ct_map_entry *map;
+ struct flow_action_entry *act;
+ int err, i;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
+
+ entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
+ if (!entry->rule) {
+ err = -ENOMEM;
+ goto err_pre_ct_rule;
+ }
+
+ /* nft flows gets destroyed after callback return, so need
+ * to do a full copy instead of just a reference.
+ */
+ if (is_nft) {
+ nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
+ if (!nft_match) {
+ err = -ENOMEM;
+ goto err_pre_ct_act;
+ }
+ memcpy(&nft_match->dissector, flow->rule->match.dissector,
+ sizeof(nft_match->dissector));
+ memcpy(&nft_match->mask, flow->rule->match.mask,
+ sizeof(nft_match->mask));
+ memcpy(&nft_match->key, flow->rule->match.key,
+ sizeof(nft_match->key));
+ entry->rule->match.dissector = &nft_match->dissector;
+ entry->rule->match.mask = &nft_match->mask;
+ entry->rule->match.key = &nft_match->key;
+ } else {
+ entry->rule->match.dissector = flow->rule->match.dissector;
+ entry->rule->match.mask = flow->rule->match.mask;
+ entry->rule->match.key = flow->rule->match.key;
+ }
+
+ entry->zt = zt;
+ entry->netdev = netdev;
+ entry->cookie = flow->cookie;
+ entry->chain_index = flow->common.chain_index;
+ entry->tun_offset = NFP_FL_CT_NO_TUN;
+
+ /* Copy over action data. Unfortunately we do not get a handle to the
+ * original tcf_action data, and the flow objects gets destroyed, so we
+ * cannot just save a pointer to this either, so need to copy over the
+ * data unfortunately.
+ */
+ entry->rule->action.num_entries = flow->rule->action.num_entries;
+ flow_action_for_each(i, act, &flow->rule->action) {
+ struct flow_action_entry *new_act;
+
+ new_act = &entry->rule->action.entries[i];
+ memcpy(new_act, act, sizeof(struct flow_action_entry));
+ /* Entunnel is a special case, need to allocate and copy
+ * tunnel info.
+ */
+ if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
+ struct ip_tunnel_info *tun = act->tunnel;
+ size_t tun_size = sizeof(*tun) + tun->options_len;
+
+ new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
+ if (!new_act->tunnel) {
+ err = -ENOMEM;
+ goto err_pre_ct_tun_cp;
+ }
+ entry->tun_offset = i;
+ }
+ }
+
+ INIT_LIST_HEAD(&entry->children);
+
+ /* Now add a ct map entry to flower-priv */
+ map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params, sizeof(*map));
+ if (IS_ERR(map)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: ct map entry creation failed");
+ err = -ENOMEM;
+ goto err_ct_flow_insert;
+ }
+ map->cookie = flow->cookie;
+ map->ct_entry = entry;
+ err = rhashtable_insert_fast(&zt->priv->ct_map_table,
+ &map->hash_node,
+ nfp_ct_map_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: ct map entry table add failed");
+ goto err_map_insert;
+ }
+
+ return entry;
+
+err_map_insert:
+ kfree(map);
+err_ct_flow_insert:
+ if (entry->tun_offset != NFP_FL_CT_NO_TUN)
+ kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
+err_pre_ct_tun_cp:
+ kfree(nft_match);
+err_pre_ct_act:
+ kfree(entry->rule);
+err_pre_ct_rule:
+ kfree(entry);
+ return ERR_PTR(err);
+}
+
+static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ zt = m_entry->zt;
+
+ /* Flow is in HW, need to delete */
+ if (m_entry->tc_flower_cookie) {
+ err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
+ m_entry->netdev);
+ if (err)
+ return;
+ }
+
+ WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
+ &m_entry->hash_node,
+ nfp_nft_ct_merge_params));
+ zt->nft_merge_count--;
+ list_del(&m_entry->tc_merge_list);
+ list_del(&m_entry->nft_flow_list);
+
+ kfree(m_entry);
+}
+
+static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
+{
+ struct nfp_fl_nft_tc_merge *m_entry, *tmp;
+
+ /* These post entries are parts of two lists, one is a list of nft_entries
+ * and the other is of from a list of tc_merge structures. Iterate
+ * through the relevant list and cleanup the entries.
+ */
+
+ if (is_nft_flow) {
+ /* Need to iterate through list of nft_flow entries*/
+ struct nfp_fl_ct_flow_entry *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ nft_flow_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ } else {
+ /* Need to iterate through list of tc_merged_flow entries*/
+ struct nfp_fl_ct_tc_merge *ct_entry = entry;
+
+ list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
+ tc_merge_list) {
+ cleanup_nft_merge_entry(m_entry);
+ }
+ }
+}
+
+static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
+{
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ zt = m_ent->zt;
+ err = rhashtable_remove_fast(&zt->tc_merge_tb,
+ &m_ent->hash_node,
+ nfp_tc_ct_merge_params);
+ if (err)
+ pr_warn("WARNING: could not remove merge_entry from hashtable\n");
+ zt->tc_merge_count--;
+ list_del(&m_ent->post_ct_list);
+ list_del(&m_ent->pre_ct_list);
+
+ if (!list_empty(&m_ent->children))
+ nfp_free_nft_merge_children(m_ent, false);
+ kfree(m_ent);
+}
+
+static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
+{
+ struct nfp_fl_ct_tc_merge *m_ent, *tmp;
+
+ switch (entry->type) {
+ case CT_TYPE_PRE_CT:
+ list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
+ nfp_del_tc_merge_entry(m_ent);
+ }
+ break;
+ case CT_TYPE_POST_CT:
+ list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
+ nfp_del_tc_merge_entry(m_ent);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
+{
+ list_del(&entry->list_node);
+
+ if (!list_empty(&entry->children)) {
+ if (entry->type == CT_TYPE_NFT)
+ nfp_free_nft_merge_children(entry, true);
+ else
+ nfp_free_tc_merge_children(entry);
+ }
+
+ if (entry->tun_offset != NFP_FL_CT_NO_TUN)
+ kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
+
+ if (entry->type == CT_TYPE_NFT) {
+ struct nf_flow_match *nft_match;
+
+ nft_match = container_of(entry->rule->match.dissector,
+ struct nf_flow_match, dissector);
+ kfree(nft_match);
+ }
+
+ kfree(entry->rule);
+ kfree(entry);
+}
+
+static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
+ enum flow_action_id act_id)
+{
+ struct flow_action_entry *act = NULL;
+ int i;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == act_id)
+ return act;
+ }
+ return NULL;
+}
+
+static void
+nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
+ struct nfp_fl_ct_zone_entry *zt_src,
+ struct nfp_fl_ct_zone_entry *zt_dst)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
+ struct list_head *ct_list;
+
+ if (ct_entry1->type == CT_TYPE_PRE_CT)
+ ct_list = &zt_src->post_ct_list;
+ else if (ct_entry1->type == CT_TYPE_POST_CT)
+ ct_list = &zt_src->pre_ct_list;
+ else
+ return;
+
+ list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
+ list_node) {
+ nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
+ }
+}
+
+static void
+nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
+ struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_tc_merge *tc_merge_entry;
+ struct rhashtable_iter iter;
+
+ rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
+ rhashtable_walk_start(&iter);
+ while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(tc_merge_entry))
+ continue;
+ rhashtable_walk_stop(&iter);
+ nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+}
+
+int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *ct_act, *ct_goto;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ int err;
+
+ ct_act = get_flow_act(flow->rule, FLOW_ACTION_CT);
+ if (!ct_act) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: Conntrack action empty in conntrack offload");
+ return -EOPNOTSUPP;
+ }
+
+ ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
+ if (!ct_goto) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: Conntrack requires ACTION_GOTO");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
+ if (!zt->nft) {
+ zt->nft = ct_act->ct.flow_table;
+ err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not register nft_callback");
+ return err;
+ }
+ }
+
+ /* Add entry to pre_ct_list */
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+ ct_entry->type = CT_TYPE_PRE_CT;
+ ct_entry->chain_index = ct_goto->chain_index;
+ list_add(&ct_entry->list_node, &zt->pre_ct_list);
+ zt->pre_ct_count++;
+
+ nfp_ct_merge_tc_entries(ct_entry, zt, zt);
+
+ /* Need to check and merge with tables in the wc_zone as well */
+ if (priv->ct_zone_wc)
+ nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
+
+ return 0;
+}
+
+int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ bool wildcarded = false;
+ struct flow_match_ct ct;
+
+ flow_rule_match_ct(rule, &ct);
+ if (!ct.mask->ct_zone) {
+ wildcarded = true;
+ } else if (ct.mask->ct_zone != U16_MAX) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: partially wildcarded ct_zone is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
+ if (IS_ERR(zt)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "offload error: Could not create zone table entry");
+ return PTR_ERR(zt);
+ }
+
+ /* Add entry to post_ct_list */
+ ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
+ if (IS_ERR(ct_entry))
+ return PTR_ERR(ct_entry);
+
+ ct_entry->type = CT_TYPE_POST_CT;
+ ct_entry->chain_index = flow->common.chain_index;
+ list_add(&ct_entry->list_node, &zt->post_ct_list);
+ zt->post_ct_count++;
+
+ if (wildcarded) {
+ /* Iterate through all zone tables if not empty, look for merges with
+ * pre_ct entries and merge them.
+ */
+ struct rhashtable_iter iter;
+ struct nfp_fl_ct_zone_entry *zone_table;
+
+ rhashtable_walk_enter(&priv->ct_zone_table, &iter);
+ rhashtable_walk_start(&iter);
+ while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(zone_table))
+ continue;
+ rhashtable_walk_stop(&iter);
+ nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
+ rhashtable_walk_start(&iter);
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ } else {
+ nfp_ct_merge_tc_entries(ct_entry, zt, zt);
+ }
+
+ return 0;
+}
+
+static int
+nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
+{
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct netlink_ext_ack *extack = NULL;
+
+ ASSERT_RTNL();
+
+ extack = flow->common.extack;
+ switch (flow->command) {
+ case FLOW_CLS_REPLACE:
+ /* Netfilter can request offload multiple times for the same
+ * flow - protect against adding duplicates.
+ */
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (!ct_map_ent) {
+ ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
+ ct_entry->type = CT_TYPE_NFT;
+ list_add(&ct_entry->list_node, &zt->nft_flows_list);
+ zt->nft_flows_count++;
+ nfp_ct_merge_nft_with_tc(ct_entry, zt);
+ }
+ return 0;
+ case FLOW_CLS_DESTROY:
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ return nfp_fl_ct_del_flow(ct_map_ent);
+ case FLOW_CLS_STATS:
+ return 0;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *flow = type_data;
+ struct nfp_fl_ct_zone_entry *zt = cb_priv;
+ int err = -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ rtnl_lock();
+ err = nfp_fl_ct_offload_nft_flow(zt, flow);
+ rtnl_unlock();
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static void
+nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
+{
+ struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
+
+ list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
+ list_node) {
+ ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
+ &nft_entry->cookie,
+ nfp_ct_map_params);
+ nfp_fl_ct_del_flow(ct_map_ent);
+ }
+}
+
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
+{
+ struct nfp_fl_ct_flow_entry *ct_entry;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct rhashtable *m_table;
+
+ if (!ct_map_ent)
+ return -ENOENT;
+
+ zt = ct_map_ent->ct_entry->zt;
+ ct_entry = ct_map_ent->ct_entry;
+ m_table = &zt->priv->ct_map_table;
+
+ switch (ct_entry->type) {
+ case CT_TYPE_PRE_CT:
+ zt->pre_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+
+ /* If this is the last pre_ct_rule it means that it is
+ * very likely that the nft table will be cleaned up next,
+ * as this happens on the removal of the last act_ct flow.
+ * However we cannot deregister the callback on the removal
+ * of the last nft flow as this runs into a deadlock situation.
+ * So deregister the callback on removal of the last pre_ct flow
+ * and remove any remaining nft flow entries. We also cannot
+ * save this state and delete the callback later since the
+ * nft table would already have been freed at that time.
+ */
+ if (!zt->pre_ct_count) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ nfp_fl_ct_clean_nft_entries(zt);
+ }
+ break;
+ case CT_TYPE_POST_CT:
+ zt->post_ct_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_entry);
+ kfree(ct_map_ent);
+ break;
+ case CT_TYPE_NFT:
+ zt->nft_flows_count--;
+ rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
+ nfp_ct_map_params);
+ nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
+ kfree(ct_map_ent);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
new file mode 100644
index 000000000000..170b6cdb8cd0
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h
@@ -0,0 +1,231 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2021 Corigine, Inc. */
+
+#ifndef __NFP_FLOWER_CONNTRACK_H__
+#define __NFP_FLOWER_CONNTRACK_H__ 1
+
+#include <net/netfilter/nf_flow_table.h>
+#include "main.h"
+
+#define NFP_FL_CT_NO_TUN 0xff
+
+#define COMPARE_UNMASKED_FIELDS(__match1, __match2, __out) \
+ do { \
+ typeof(__match1) _match1 = (__match1); \
+ typeof(__match2) _match2 = (__match2); \
+ bool *_out = (__out); \
+ int i, size = sizeof(*(_match1).key); \
+ char *k1, *m1, *k2, *m2; \
+ *_out = false; \
+ k1 = (char *)_match1.key; \
+ m1 = (char *)_match1.mask; \
+ k2 = (char *)_match2.key; \
+ m2 = (char *)_match2.mask; \
+ for (i = 0; i < size; i++) \
+ if ((k1[i] & m1[i] & m2[i]) ^ \
+ (k2[i] & m1[i] & m2[i])) { \
+ *_out = true; \
+ break; \
+ } \
+ } while (0) \
+
+extern const struct rhashtable_params nfp_zone_table_params;
+extern const struct rhashtable_params nfp_ct_map_params;
+extern const struct rhashtable_params nfp_tc_ct_merge_params;
+extern const struct rhashtable_params nfp_nft_ct_merge_params;
+
+/**
+ * struct nfp_fl_ct_zone_entry - Zone entry containing conntrack flow information
+ * @zone: The zone number, used as lookup key in hashtable
+ * @hash_node: Used by the hashtable
+ * @priv: Pointer to nfp_flower_priv data
+ * @nft: Pointer to nf_flowtable for this zone
+ *
+ * @pre_ct_list: The pre_ct_list of nfp_fl_ct_flow_entry entries
+ * @pre_ct_count: Keep count of the number of pre_ct entries
+ *
+ * @post_ct_list: The post_ct_list of nfp_fl_ct_flow_entry entries
+ * @post_ct_count: Keep count of the number of post_ct entries
+ *
+ * @tc_merge_tb: The table of merged tc flows
+ * @tc_merge_count: Keep count of the number of merged tc entries
+ *
+ * @nft_flows_list: The list of nft relatednfp_fl_ct_flow_entry entries
+ * @nft_flows_count: Keep count of the number of nft_flow entries
+ *
+ * @nft_merge_tb: The table of merged tc+nft flows
+ * @nft_merge_count: Keep count of the number of merged tc+nft entries
+ */
+struct nfp_fl_ct_zone_entry {
+ u16 zone;
+ struct rhash_head hash_node;
+
+ struct nfp_flower_priv *priv;
+ struct nf_flowtable *nft;
+
+ struct list_head pre_ct_list;
+ unsigned int pre_ct_count;
+
+ struct list_head post_ct_list;
+ unsigned int post_ct_count;
+
+ struct rhashtable tc_merge_tb;
+ unsigned int tc_merge_count;
+
+ struct list_head nft_flows_list;
+ unsigned int nft_flows_count;
+
+ struct rhashtable nft_merge_tb;
+ unsigned int nft_merge_count;
+};
+
+enum ct_entry_type {
+ CT_TYPE_PRE_CT,
+ CT_TYPE_NFT,
+ CT_TYPE_POST_CT,
+};
+
+/**
+ * struct nfp_fl_ct_flow_entry - Flow entry containing conntrack flow information
+ * @cookie: Flow cookie, same as original TC flow, used as key
+ * @list_node: Used by the list
+ * @chain_index: Chain index of the original flow
+ * @netdev: netdev structure.
+ * @type: Type of pre-entry from enum ct_entry_type
+ * @zt: Reference to the zone table this belongs to
+ * @children: List of tc_merge flows this flow forms part of
+ * @rule: Reference to the original TC flow rule
+ * @stats: Used to cache stats for updating
+ * @tun_offset: Used to indicate tunnel action offset in action list
+ */
+struct nfp_fl_ct_flow_entry {
+ unsigned long cookie;
+ struct list_head list_node;
+ u32 chain_index;
+ enum ct_entry_type type;
+ struct net_device *netdev;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct list_head children;
+ struct flow_rule *rule;
+ struct flow_stats stats;
+ u8 tun_offset; // Set to NFP_FL_CT_NO_TUN if no tun
+};
+
+/**
+ * struct nfp_fl_ct_tc_merge - Merge of two flows from tc
+ * @cookie: Flow cookie, combination of pre and post ct cookies
+ * @hash_node: Used by the hashtable
+ * @pre_ct_list: This entry is part of a pre_ct_list
+ * @post_ct_list: This entry is part of a post_ct_list
+ * @zt: Reference to the zone table this belongs to
+ * @pre_ct_parent: The pre_ct_parent
+ * @post_ct_parent: The post_ct_parent
+ * @children: List of nft merged entries
+ */
+struct nfp_fl_ct_tc_merge {
+ unsigned long cookie[2];
+ struct rhash_head hash_node;
+ struct list_head pre_ct_list;
+ struct list_head post_ct_list;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct nfp_fl_ct_flow_entry *pre_ct_parent;
+ struct nfp_fl_ct_flow_entry *post_ct_parent;
+ struct list_head children;
+};
+
+/**
+ * struct nfp_fl_nft_tc_merge - Merge of tc_merge flows with nft flow
+ * @netdev: Ingress netdev name
+ * @cookie: Flow cookie, combination of tc_merge and nft cookies
+ * @hash_node: Used by the hashtable
+ * @zt: Reference to the zone table this belongs to
+ * @nft_flow_list: This entry is part of a nft_flows_list
+ * @tc_merge_list: This entry is part of a ct_merge_list
+ * @tc_m_parent: The tc_merge parent
+ * @nft_parent: The nft_entry parent
+ * @tc_flower_cookie: The cookie of the flow offloaded to the nfp
+ * @flow_pay: Reference to the offloaded flow struct
+ */
+struct nfp_fl_nft_tc_merge {
+ struct net_device *netdev;
+ unsigned long cookie[3];
+ struct rhash_head hash_node;
+ struct nfp_fl_ct_zone_entry *zt;
+ struct list_head nft_flow_list;
+ struct list_head tc_merge_list;
+ struct nfp_fl_ct_tc_merge *tc_m_parent;
+ struct nfp_fl_ct_flow_entry *nft_parent;
+ unsigned long tc_flower_cookie;
+ struct nfp_fl_payload *flow_pay;
+};
+
+/**
+ * struct nfp_fl_ct_map_entry - Map between flow cookie and specific ct_flow
+ * @cookie: Flow cookie, same as original TC flow, used as key
+ * @hash_node: Used by the hashtable
+ * @ct_entry: Pointer to corresponding ct_entry
+ */
+struct nfp_fl_ct_map_entry {
+ unsigned long cookie;
+ struct rhash_head hash_node;
+ struct nfp_fl_ct_flow_entry *ct_entry;
+};
+
+bool is_pre_ct_flow(struct flow_cls_offload *flow);
+bool is_post_ct_flow(struct flow_cls_offload *flow);
+
+/**
+ * nfp_fl_ct_handle_pre_ct() - Handles -trk conntrack rules
+ * @priv: Pointer to app priv
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ * @extack: Extack pointer for errors
+ *
+ * Adds a new entry to the relevant zone table and tries to
+ * merge with other +trk+est entries and offload if possible.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack);
+/**
+ * nfp_fl_ct_handle_post_ct() - Handles +trk+est conntrack rules
+ * @priv: Pointer to app priv
+ * @netdev: netdev structure.
+ * @flow: TC flower classifier offload structure.
+ * @extack: Extack pointer for errors
+ *
+ * Adds a new entry to the relevant zone table and tries to
+ * merge with other -trk entries and offload if possible.
+ *
+ * Return: negative value on error, 0 if configured successfully.
+ */
+int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ struct flow_cls_offload *flow,
+ struct netlink_ext_ack *extack);
+
+/**
+ * nfp_fl_ct_clean_flow_entry() - Free a nfp_fl_ct_flow_entry
+ * @entry: Flow entry to cleanup
+ */
+void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry);
+
+/**
+ * nfp_fl_ct_del_flow() - Handle flow_del callbacks for conntrack
+ * @ct_map_ent: ct map entry for the flow that needs deleting
+ */
+int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent);
+
+/**
+ * nfp_fl_ct_handle_nft_flow() - Handle flower flow callbacks for nft table
+ * @type: Type provided by callback
+ * @type_data: Callback data
+ * @cb_priv: Pointer to data provided when registering the callback, in this
+ * case it's the zone table.
+ */
+int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 31377923ea3d..0fbd682ccf72 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -193,6 +193,9 @@ struct nfp_fl_internal_ports {
* @qos_stats_lock: Lock on qos stats updates
* @pre_tun_rule_cnt: Number of pre-tunnel rules offloaded
* @merge_table: Hash table to store merged flows
+ * @ct_zone_table: Hash table used to store the different zones
+ * @ct_zone_wc: Special zone entry for wildcarded zone matches
+ * @ct_map_table: Hash table used to referennce ct flows
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -227,6 +230,9 @@ struct nfp_flower_priv {
spinlock_t qos_stats_lock; /* Protect the qos stats */
int pre_tun_rule_cnt;
struct rhashtable merge_table;
+ struct rhashtable ct_zone_table;
+ struct nfp_fl_ct_zone_entry *ct_zone_wc;
+ struct rhashtable ct_map_table;
};
/**
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 327bb56b3ef5..621113650a9b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -9,6 +9,7 @@
#include <net/pkt_cls.h>
#include "cmsg.h"
+#include "conntrack.h"
#include "main.h"
#include "../nfp_app.h"
@@ -496,6 +497,20 @@ const struct rhashtable_params merge_table_params = {
.key_len = sizeof(u64),
};
+const struct rhashtable_params nfp_zone_table_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_zone_entry, hash_node),
+ .key_len = sizeof(u16),
+ .key_offset = offsetof(struct nfp_fl_ct_zone_entry, zone),
+ .automatic_shrinking = false,
+};
+
+const struct rhashtable_params nfp_ct_map_params = {
+ .head_offset = offsetof(struct nfp_fl_ct_map_entry, hash_node),
+ .key_len = sizeof(unsigned long),
+ .key_offset = offsetof(struct nfp_fl_ct_map_entry, cookie),
+ .automatic_shrinking = true,
+};
+
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
unsigned int host_num_mems)
{
@@ -516,6 +531,14 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
if (err)
goto err_free_stats_ctx_table;
+ err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
+ if (err)
+ goto err_free_merge_table;
+
+ err = rhashtable_init(&priv->ct_map_table, &nfp_ct_map_params);
+ if (err)
+ goto err_free_ct_zone_table;
+
get_random_bytes(&priv->mask_id_seed, sizeof(priv->mask_id_seed));
/* Init ring buffer and unallocated mask_ids. */
@@ -523,7 +546,7 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
kmalloc_array(NFP_FLOWER_MASK_ENTRY_RS,
NFP_FLOWER_MASK_ELEMENT_RS, GFP_KERNEL);
if (!priv->mask_ids.mask_id_free_list.buf)
- goto err_free_merge_table;
+ goto err_free_ct_map_table;
priv->mask_ids.init_unallocated = NFP_FLOWER_MASK_ENTRY_RS - 1;
@@ -560,6 +583,10 @@ err_free_last_used:
kfree(priv->mask_ids.last_used);
err_free_mask_id:
kfree(priv->mask_ids.mask_id_free_list.buf);
+err_free_ct_map_table:
+ rhashtable_destroy(&priv->ct_map_table);
+err_free_ct_zone_table:
+ rhashtable_destroy(&priv->ct_zone_table);
err_free_merge_table:
rhashtable_destroy(&priv->merge_table);
err_free_stats_ctx_table:
@@ -569,6 +596,100 @@ err_free_flow_table:
return -ENOMEM;
}
+static void nfp_zone_table_entry_destroy(struct nfp_fl_ct_zone_entry *zt)
+{
+ if (!zt)
+ return;
+
+ if (!list_empty(&zt->pre_ct_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "pre_ct_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->pre_ct_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ if (!list_empty(&zt->post_ct_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "post_ct_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->post_ct_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ if (zt->nft) {
+ nf_flow_table_offload_del_cb(zt->nft,
+ nfp_fl_ct_handle_nft_flow,
+ zt);
+ zt->nft = NULL;
+ }
+
+ if (!list_empty(&zt->nft_flows_list)) {
+ struct rhashtable *m_table = &zt->priv->ct_map_table;
+ struct nfp_fl_ct_flow_entry *entry, *tmp;
+ struct nfp_fl_ct_map_entry *map;
+
+ WARN_ONCE(1, "nft_flows_list not empty as expected, cleaning up\n");
+ list_for_each_entry_safe(entry, tmp, &zt->nft_flows_list,
+ list_node) {
+ map = rhashtable_lookup_fast(m_table,
+ &entry->cookie,
+ nfp_ct_map_params);
+ WARN_ON_ONCE(rhashtable_remove_fast(m_table,
+ &map->hash_node,
+ nfp_ct_map_params));
+ nfp_fl_ct_clean_flow_entry(entry);
+ kfree(map);
+ }
+ }
+
+ rhashtable_free_and_destroy(&zt->tc_merge_tb,
+ nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&zt->nft_merge_tb,
+ nfp_check_rhashtable_empty, NULL);
+
+ kfree(zt);
+}
+
+static void nfp_free_zone_table_entry(void *ptr, void *arg)
+{
+ struct nfp_fl_ct_zone_entry *zt = ptr;
+
+ nfp_zone_table_entry_destroy(zt);
+}
+
+static void nfp_free_map_table_entry(void *ptr, void *arg)
+{
+ struct nfp_fl_ct_map_entry *map = ptr;
+
+ if (!map)
+ return;
+
+ kfree(map);
+}
+
void nfp_flower_metadata_cleanup(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
@@ -582,6 +703,12 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app)
nfp_check_rhashtable_empty, NULL);
rhashtable_free_and_destroy(&priv->merge_table,
nfp_check_rhashtable_empty, NULL);
+ rhashtable_free_and_destroy(&priv->ct_zone_table,
+ nfp_free_zone_table_entry, NULL);
+ nfp_zone_table_entry_destroy(priv->ct_zone_wc);
+
+ rhashtable_free_and_destroy(&priv->ct_map_table,
+ nfp_free_map_table_entry, NULL);
kvfree(priv->stats);
kfree(priv->mask_ids.mask_id_free_list.buf);
kfree(priv->mask_ids.last_used);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index e95969c462e4..2406d33356ad 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -7,6 +7,7 @@
#include "cmsg.h"
#include "main.h"
+#include "conntrack.h"
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
@@ -1276,6 +1277,20 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return 0;
}
+static bool offload_pre_check(struct flow_cls_offload *flow)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct flow_dissector *dissector = rule->match.dissector;
+
+ if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))
+ return false;
+
+ if (flow->common.chain_index)
+ return false;
+
+ return true;
+}
+
/**
* nfp_flower_add_offload() - Adds a new flow to hardware.
* @app: Pointer to the APP handle
@@ -1302,6 +1317,15 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
+ if (is_pre_ct_flow(flow))
+ return nfp_fl_ct_handle_pre_ct(priv, netdev, flow, extack);
+
+ if (is_post_ct_flow(flow))
+ return nfp_fl_ct_handle_post_ct(priv, netdev, flow, extack);
+
+ if (!offload_pre_check(flow))
+ return -EOPNOTSUPP;
+
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
@@ -1481,6 +1505,7 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
struct flow_cls_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_fl_ct_map_entry *ct_map_ent;
struct netlink_ext_ack *extack = NULL;
struct nfp_fl_payload *nfp_flow;
struct nfp_port *port = NULL;
@@ -1490,6 +1515,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_port_from_netdev(netdev);
+ /* Check ct_map_table */
+ ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie,
+ nfp_ct_map_params);
+ if (ct_map_ent) {
+ err = nfp_fl_ct_del_flow(ct_map_ent);
+ return err;
+ }
+
nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow) {
NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot remove flow that does not exist");
@@ -1646,9 +1679,10 @@ nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
+ struct flow_cls_common_offload *common = type_data;
struct nfp_repr *repr = cb_priv;
- if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
+ if (!tc_can_offload_extack(repr->netdev, common->extack))
return -EOPNOTSUPP;
switch (type) {
@@ -1746,10 +1780,6 @@ static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
void *type_data, void *cb_priv)
{
struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
- struct flow_cls_offload *flower = type_data;
-
- if (flower->common.chain_index)
- return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_CLSFLOWER:
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
index 94994a939277..d7ac0307797f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
@@ -905,8 +905,7 @@ area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
return;
/* Move to front of LRU */
- list_del(&cache->entry);
- list_add(&cache->entry, &cpp->area_cache_list);
+ list_move(&cache->entry, &cpp->area_cache_list);
mutex_unlock(&cpp->area_cache_mutex);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index a6861df9904f..2d097dcb7bda 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1224,7 +1224,6 @@ static int nixge_of_get_resources(struct platform_device *pdev)
const struct of_device_id *of_id;
enum nixge_version version;
struct resource *ctrlres;
- struct resource *dmares;
struct net_device *ndev;
struct nixge_priv *priv;
@@ -1236,12 +1235,9 @@ static int nixge_of_get_resources(struct platform_device *pdev)
version = (enum nixge_version)of_id->data;
if (version <= NIXGE_V2)
- dmares = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
else
- dmares = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- "dma");
-
- priv->dma_regs = devm_ioremap_resource(&pdev->dev, dmares);
+ priv->dma_regs = devm_platform_ioremap_resource_byname(pdev, "dma");
if (IS_ERR(priv->dma_regs)) {
netdev_err(ndev, "failed to map dma regs\n");
return PTR_ERR(priv->dma_regs);
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 5f8b0bb3af6e..202973a82712 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
+ depends on PTP_1588_CLOCK || !PTP_1588_CLOCK
select NET_DEVLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 6b5ddb07ee83..98f430905ffa 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -110,6 +110,9 @@ config QED_RDMA
config QED_ISCSI
bool
+config QED_NVMETCP
+ bool
+
config QED_FCOE
bool
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 8251755ec18c..0d9c2fe0245d 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -28,6 +28,11 @@ qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
qed-$(CONFIG_QED_LL2) += qed_ll2.o
qed-$(CONFIG_QED_OOO) += qed_ooo.o
+qed-$(CONFIG_QED_NVMETCP) += \
+ qed_nvmetcp.o \
+ qed_nvmetcp_fw_funcs.o \
+ qed_nvmetcp_ip_services.o
+
qed-$(CONFIG_QED_RDMA) += \
qed_iwarp.o \
qed_rdma.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index a20cb8a0c377..b590c70539b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -49,6 +49,8 @@ extern const struct qed_common_ops qed_common_ops_pass;
#define QED_MIN_WIDS (4)
#define QED_PF_DEMS_SIZE (4)
+#define QED_LLH_DONT_CARE 0
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -200,6 +202,7 @@ enum qed_pci_personality {
QED_PCI_ETH,
QED_PCI_FCOE,
QED_PCI_ISCSI,
+ QED_PCI_NVMETCP,
QED_PCI_ETH_ROCE,
QED_PCI_ETH_IWARP,
QED_PCI_ETH_RDMA,
@@ -239,6 +242,7 @@ enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_RDMA_CNQ,
+ QED_NVMETCP_CQ,
QED_ISCSI_CQ,
QED_FCOE_CQ,
QED_VF_L2_QUE,
@@ -284,6 +288,8 @@ struct qed_hw_info {
((dev)->hw_info.personality == QED_PCI_FCOE)
#define QED_IS_ISCSI_PERSONALITY(dev) \
((dev)->hw_info.personality == QED_PCI_ISCSI)
+#define QED_IS_NVMETCP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_NVMETCP)
/* Resource Allocation scheme results */
u32 resc_start[QED_MAX_RESC];
@@ -592,6 +598,7 @@ struct qed_hwfn {
struct qed_ooo_info *p_ooo_info;
struct qed_rdma_info *p_rdma_info;
struct qed_iscsi_info *p_iscsi_info;
+ struct qed_nvmetcp_info *p_nvmetcp_info;
struct qed_fcoe_info *p_fcoe_info;
struct qed_pf_params pf_params;
@@ -828,6 +835,7 @@ struct qed_dev {
struct qed_eth_cb_ops *eth;
struct qed_fcoe_cb_ops *fcoe;
struct qed_iscsi_cb_ops *iscsi;
+ struct qed_nvmetcp_cb_ops *nvmetcp;
} protocol_ops;
void *ops_cookie;
@@ -999,4 +1007,10 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_clear_all_filters(struct qed_dev *cdev);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 0a22f8ce9a2c..5a0a3cbcc1c1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -94,14 +94,14 @@ struct src_ent {
static bool src_proto(enum protocol_type type)
{
- return type == PROTOCOLID_ISCSI ||
+ return type == PROTOCOLID_TCP_ULP ||
type == PROTOCOLID_FCOE ||
type == PROTOCOLID_IWARP;
}
static bool tm_cid_proto(enum protocol_type type)
{
- return type == PROTOCOLID_ISCSI ||
+ return type == PROTOCOLID_TCP_ULP ||
type == PROTOCOLID_FCOE ||
type == PROTOCOLID_ROCE ||
type == PROTOCOLID_IWARP;
@@ -2072,7 +2072,6 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
PROTOCOLID_FCOE,
p_params->num_cons,
0);
-
qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
QED_CXT_FCOE_TID_SEG, 0,
p_params->num_tasks, true);
@@ -2090,13 +2089,12 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
if (p_params->num_cons && p_params->num_tasks) {
qed_cxt_set_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
p_params->num_cons,
0);
-
qed_cxt_set_proto_tid_count(p_hwfn,
- PROTOCOLID_ISCSI,
- QED_CXT_ISCSI_TID_SEG,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
0,
p_params->num_tasks,
true);
@@ -2106,6 +2104,29 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
}
break;
}
+ case QED_PCI_NVMETCP:
+ {
+ struct qed_nvmetcp_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ p_params->num_cons,
+ 0);
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "NvmeTCP personality used without setting params!\n");
+ }
+ break;
+ }
default:
return -EINVAL;
}
@@ -2129,8 +2150,9 @@ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
seg = QED_CXT_FCOE_TID_SEG;
break;
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
- seg = QED_CXT_ISCSI_TID_SEG;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
break;
default:
return -EINVAL;
@@ -2455,8 +2477,9 @@ int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
seg = QED_CXT_FCOE_TID_SEG;
break;
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
- seg = QED_CXT_ISCSI_TID_SEG;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
break;
default:
return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 056e79620a0e..8adb7ed0c12d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -50,7 +50,7 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
struct qed_tid_mem *p_info);
-#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
+#define QED_CXT_TCP_ULP_TID_SEG PROTOCOLID_TCP_ULP
#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE
enum qed_cxt_elem_type {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index d2f5855b2ea7..0410c3604abd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -37,6 +37,7 @@
#include "qed_sriov.h"
#include "qed_vf.h"
#include "qed_rdma.h"
+#include "qed_nvmetcp.h"
static DEFINE_SPINLOCK(qm_lock);
@@ -667,7 +668,8 @@ qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
/* Storage PF is bound to a single engine while L2 PF uses both */
- if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) ||
+ QED_IS_NVMETCP_PERSONALITY(p_hwfn))
eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
else /* L2_PERSONALITY */
eng = QED_BOTH_ENG;
@@ -1164,6 +1166,9 @@ void qed_llh_remove_mac_filter(struct qed_dev *cdev,
if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
goto out;
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ return;
+
ether_addr_copy(filter.mac.addr, mac_addr);
rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
&ref_cnt);
@@ -1381,6 +1386,11 @@ void qed_resc_free(struct qed_dev *cdev)
qed_ooo_free(p_hwfn);
}
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
+ }
+
if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
qed_rdma_info_free(p_hwfn);
@@ -1423,6 +1433,7 @@ static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
flags |= PQ_FLAGS_OFLD;
break;
case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
break;
case QED_PCI_ETH_ROCE:
@@ -2263,10 +2274,11 @@ int qed_resc_alloc(struct qed_dev *cdev)
* at the same time
*/
n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
- } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
num_cons =
qed_cxt_get_proto_cid_count(p_hwfn,
- PROTOCOLID_ISCSI,
+ PROTOCOLID_TCP_ULP,
NULL);
n_eqes += 2 * num_cons;
}
@@ -2313,6 +2325,15 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
}
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ rc = qed_nvmetcp_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
rc = qed_rdma_info_alloc(p_hwfn);
if (rc)
@@ -2393,6 +2414,11 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_iscsi_setup(p_hwfn);
qed_ooo_setup(p_hwfn);
}
+
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
+ }
}
}
@@ -2854,7 +2880,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
- (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
+ ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) ||
+ (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
@@ -3535,14 +3562,21 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
RESC_NUM(p_hwfn,
QED_CMDQS_CQS));
+
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
- "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d #SBS=%d\n",
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n",
(int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
(int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
(int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
(int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+ (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ),
(int)sb_cnt.cnt);
}
@@ -3734,7 +3768,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
break;
case QED_BDQ:
if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
- p_hwfn->hw_info.personality != QED_PCI_FCOE)
+ p_hwfn->hw_info.personality != QED_PCI_FCOE &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP)
*p_resc_num = 0;
else
*p_resc_num = 1;
@@ -3755,7 +3790,8 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
*p_resc_start = 0;
else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id;
- else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
*p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
*p_resc_start = p_hwfn->port_id + 2;
@@ -5326,3 +5362,93 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
((u8 *)fw_lsb)[0] = mac[5];
((u8 *)fw_lsb)[1] = mac[4];
}
+
+static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(*p_filters));
+
+ return 0;
+}
+
+static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ rc = qed_llh_shadow_remove_all_filters(cdev, ppfid);
+ if (rc)
+ goto out;
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt,
+ abs_ppfid, filter_idx);
+ if (rc)
+ goto out;
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_clear_all_filters(struct qed_dev *cdev)
+{
+ u8 ppfid;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ return;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++)
+ qed_llh_clear_ppfid_filters(cdev, ppfid);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 559df9f4d656..fb1baa2da2d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -20,6 +20,7 @@
#include <linux/qed/fcoe_common.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/iscsi_common.h>
+#include <linux/qed/nvmetcp_common.h>
#include <linux/qed/iwarp_common.h>
#include <linux/qed/rdma_common.h>
#include <linux/qed/roce_common.h>
@@ -1118,7 +1119,7 @@ struct outer_tag_config_struct {
/* personality per PF */
enum personality_type {
BAD_PERSONALITY_TYP,
- PERSONALITY_ISCSI,
+ PERSONALITY_TCP_ULP,
PERSONALITY_FCOE,
PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RDMA,
@@ -12147,7 +12148,8 @@ struct public_func {
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040
#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
#define FUNC_MF_CFG_MIN_BW_SHIFT 8
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 448567a1f520..db926d8b3033 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -158,7 +158,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_INIT_FUNC,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -250,7 +250,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
- qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
qed_iscsi_async_event);
return qed_spq_post(p_hwfn, p_ent, NULL);
@@ -286,7 +286,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -465,7 +465,7 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -506,7 +506,7 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_MAC_UPDATE,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -548,7 +548,7 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -582,7 +582,7 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_CLEAR_SQ,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
@@ -606,13 +606,13 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
rc = qed_sp_init_request(p_hwfn, &p_ent,
ISCSI_RAMROD_CMD_ID_DESTROY_FUNC,
- PROTOCOLID_ISCSI, &init_data);
+ PROTOCOLID_TCP_ULP, &init_data);
if (rc)
return rc;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
- qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
return rc;
}
@@ -786,7 +786,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
u32 icid;
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
- rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ISCSI, &icid);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
if (rc)
return rc;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 49783f365079..02a4610d9330 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -960,7 +960,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
- conn_type != QED_LL2_TYPE_IWARP) {
+ conn_type != QED_LL2_TYPE_IWARP &&
+ (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) {
p_ramrod->mf_si_bcast_accept_all = 1;
p_ramrod->mf_si_mcast_accept_all = 1;
} else {
@@ -1037,8 +1038,8 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
case QED_LL2_TYPE_FCOE:
p_ramrod->conn_type = PROTOCOLID_FCOE;
break;
- case QED_LL2_TYPE_ISCSI:
- p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ case QED_LL2_TYPE_TCP_ULP:
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
break;
case QED_LL2_TYPE_ROCE:
p_ramrod->conn_type = PROTOCOLID_ROCE;
@@ -1047,8 +1048,9 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
case QED_LL2_TYPE_OOO:
- if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
- p_ramrod->conn_type = PROTOCOLID_ISCSI;
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
else
p_ramrod->conn_type = PROTOCOLID_IWARP;
break;
@@ -1634,7 +1636,8 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
if (rc)
goto out;
- if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
+ if (!QED_IS_RDMA_PERSONALITY(p_hwfn) &&
+ !QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
@@ -2376,7 +2379,8 @@ out:
static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
{
return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
- QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
+ QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
+ QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
(QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
}
@@ -2402,11 +2406,13 @@ static int qed_ll2_stop(struct qed_dev *cdev)
if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
return 0;
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
eth_zero_addr(cdev->ll2_mac_address);
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_ll2_stop_ooo(p_hwfn);
/* In CMT mode, LL2 is always started on engine 0 for a storage PF */
@@ -2442,7 +2448,8 @@ static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
conn_type = QED_LL2_TYPE_FCOE;
break;
case QED_PCI_ISCSI:
- conn_type = QED_LL2_TYPE_ISCSI;
+ case QED_PCI_NVMETCP:
+ conn_type = QED_LL2_TYPE_TCP_ULP;
break;
case QED_PCI_ETH_ROCE:
conn_type = QED_LL2_TYPE_ROCE;
@@ -2567,7 +2574,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
}
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) {
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(p_hwfn, params);
if (rc) {
@@ -2576,10 +2583,13 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
}
- rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
- if (rc) {
- DP_NOTICE(cdev, "Failed to add an LLH filter\n");
- goto err3;
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
+ rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to add an LLH filter\n");
+ goto err3;
+ }
+
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
@@ -2587,7 +2597,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
return 0;
err3:
- if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
qed_ll2_stop_ooo(p_hwfn);
err2:
if (b_is_storage_eng1)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index cd882c453394..4387292c37e2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -2446,6 +2446,9 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
+ case FUNC_MF_CFG_PROTOCOL_NVMETCP:
+ *p_proto = QED_PCI_NVMETCP;
+ break;
case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE;
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
index 3e3192a3ad9b..6190adf965bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -1306,7 +1306,8 @@ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
}
if ((tlv_group & QED_MFW_TLV_ISCSI) &&
- p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+ p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP) {
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Skipping iSCSI TLVs for non-iSCSI function\n");
tlv_group &= ~QED_MFW_TLV_ISCSI;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
new file mode 100644
index 000000000000..f19128c8d9cc
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_nvmetcp.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_reg_addr.h"
+#include "qed_nvmetcp_fw_funcs.h"
+
+static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ u16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ if (p_hwfn->p_nvmetcp_info->event_cb) {
+ struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
+
+ return p_nvmetcp->event_cb(p_nvmetcp->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
+
+ return -EINVAL;
+ }
+}
+
+static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct scsi_init_func_queues *p_queue = NULL;
+ struct nvmetcp_spe_func_init *p_init = NULL;
+ struct qed_sp_init_data init_data = {};
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+ u16 val;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_init;
+ p_init = &p_ramrod->nvmetcp_init_spe;
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_queue = &p_init->q_params;
+ p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
+ p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
+ p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_params->ll2_ooo_queue_id;
+ SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
+ p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
+ p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
+ p_init->debug_flags = p_params->debug_mode;
+ DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
+ p_params->glbl_q_params_addr);
+ p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
+ p_queue->num_queues = p_params->num_queues;
+ val = RESC_START(p_hwfn, QED_CMDQS_CQS);
+ p_queue->queue_relative_offset = cpu_to_le16((u16)val);
+ p_queue->cq_sb_pi = p_params->gl_rq_pi;
+
+ for (i = 0; i < p_params->num_queues; i++) {
+ val = qed_get_igu_sb_id(p_hwfn, i);
+ p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
+ }
+
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
+ p_queue->cmdq_num_entries = 0;
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+ p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
+ p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
+ p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
+ p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
+ SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
+ NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
+ p_hwfn->p_nvmetcp_info->event_context = event_context;
+ p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
+ qed_nvmetcp_async_event);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
+
+ return rc;
+}
+
+static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info)
+{
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+ info->port_id = MFW_PORT(hwfn);
+ info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
+
+ return rc;
+}
+
+static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.nvmetcp = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_nvmetcp_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "nvmetcp already stopped\n");
+
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop nvmetcp - not all connections were returned\n");
+
+ return -EINVAL;
+ }
+
+ /* Stop the nvmetcp */
+ rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+ return rc;
+}
+
+static int qed_nvmetcp_start(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct qed_tid_mem *tid_info;
+ int rc;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "nvmetcp already started;\n");
+
+ return 0;
+ }
+
+ rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL,
+ event_context, async_event_cb);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start nvmetcp\n");
+
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (!tasks)
+ return 0;
+
+ tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
+ if (!tid_info) {
+ qed_nvmetcp_stop(cdev);
+
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_nvmetcp_stop(cdev);
+ kfree(tid_info);
+
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
+ kfree(tid_info);
+
+ return 0;
+}
+
+static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || hash_con->con->icid != handle)
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_offload *p_ramrod = NULL;
+ struct tcp_offload_params_opt2 *p_tcp = NULL;
+ struct qed_sp_init_data init_data = { 0 };
+ struct qed_spq_entry *p_ent = NULL;
+ dma_addr_t r2tq_pbl_addr;
+ dma_addr_t xhq_pbl_addr;
+ dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
+ int rc = 0;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload;
+
+ /* Transmission PQ is the first of the PF */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q);
+
+ /* nvmetcp Pure-ACK PQ */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr);
+ r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr);
+ xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr);
+ uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr);
+ p_ramrod->nvmetcp.flags = p_conn->offl_flags;
+ p_ramrod->nvmetcp.default_cq = p_conn->default_cq;
+ p_ramrod->nvmetcp.initial_ack = 0;
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr,
+ p_conn->nvmetcp_cccid_itid_table_addr);
+ p_ramrod->nvmetcp.nvmetcp.cccid_max_range =
+ cpu_to_le16(p_conn->nvmetcp_cccid_max_range);
+ p_tcp = &p_ramrod->tcp;
+ qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi,
+ &p_tcp->remote_mac_addr_mid,
+ &p_tcp->remote_mac_addr_lo, p_conn->remote_mac);
+ qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi,
+ &p_tcp->local_mac_addr_mid,
+ &p_tcp->local_mac_addr_lo, p_conn->local_mac);
+ p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
+ p_tcp->ip_version = p_conn->ip_version;
+ if (p_tcp->ip_version == TCP_IPV6) {
+ for (i = 0; i < 4; i++) {
+ p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]);
+ p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]);
+ }
+ } else {
+ p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]);
+ p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]);
+ }
+
+ p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp->ttl = p_conn->ttl;
+ p_tcp->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp->mss = cpu_to_le16(p_conn->mss);
+ p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ p_tcp->connect_mode = p_conn->connect_mode;
+ p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
+ p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u32 dval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_update;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->flags = p_conn->update_flag;
+ p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
+ dval = p_conn->max_recv_pdu_length;
+ p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->max_send_pdu_length;
+ p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
+ p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_termination *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->abortive = p_conn->abortive_dsconnect;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ };
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+
+ /* Try finding a free connection that can be used */
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list))
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ *p_out_conn = p_conn;
+
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ /* Need to allocate a new connection */
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ params.num_elems = p_params->num_r2tq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe);
+ params.elem_size = sizeof(struct nvmetcp_wqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
+ if (rc)
+ goto nomem_r2tq;
+
+ params.num_elems = p_params->num_uhq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
+ params.elem_size = sizeof(struct iscsi_uhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
+ if (rc)
+ goto nomem_uhq;
+
+ params.elem_size = sizeof(struct iscsi_xhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
+ if (rc)
+ goto nomem;
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+
+ return 0;
+
+nomem:
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+nomem_uhq:
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+nomem_r2tq:
+ kfree(p_conn);
+
+ return -ENOMEM;
+}
+
+static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ if (rc)
+ return rc;
+
+ rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn);
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ return rc;
+ }
+
+ p_conn->icid = icid;
+ p_conn->conn_id = (u16)icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ kfree(p_conn);
+}
+
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_info *p_nvmetcp_info;
+
+ p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL);
+ if (!p_nvmetcp_info)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_nvmetcp_info->free_list);
+ p_hwfn->p_nvmetcp_info = p_nvmetcp_info;
+
+ return 0;
+}
+
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn)
+{
+ spin_lock_init(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+
+ if (!p_hwfn->p_nvmetcp_info)
+ return;
+
+ while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_nvmetcp_free_connection(p_hwfn, p_conn);
+ }
+ }
+
+ kfree(p_hwfn->p_nvmetcp_info);
+ p_hwfn->p_nvmetcp_info = NULL;
+}
+
+static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
+ if (!hash_con)
+ return -ENOMEM;
+
+ /* Acquire the connection */
+ rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev),
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+ if (p_doorbell)
+ *p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ /* FW initializations */
+ con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE;
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range;
+ con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr;
+ con->default_cq = conn_info->default_cq;
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1);
+
+ /* Networking and TCP stack initializations */
+ ether_addr_copy(con->local_mac, conn_info->src.mac);
+ ether_addr_copy(con->remote_mac, conn_info->dst.mac);
+ memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
+ memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
+ con->local_port = conn_info->src.port;
+ con->remote_port = conn_info->dst.port;
+ con->vlan_id = conn_info->vlan_id;
+
+ if (conn_info->timestamp_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1);
+
+ if (conn_info->delayed_ack_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1);
+
+ if (conn_info->tcp_keep_alive_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1);
+
+ if (conn_info->ecn_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1);
+
+ con->ip_version = conn_info->ip_version;
+ con->flow_label = QED_TCP_FLOW_LABEL;
+ con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
+ con->ka_timeout = conn_info->ka_timeout;
+ con->ka_interval = conn_info->ka_interval;
+ con->max_rt_time = conn_info->max_rt_time;
+ con->ttl = conn_info->ttl;
+ con->tos_or_tc = conn_info->tos_or_tc;
+ con->mss = conn_info->mss;
+ con->cwnd = conn_info->cwnd;
+ con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
+ con->connect_mode = 0;
+
+ return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_update_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0);
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1);
+ if (conn_info->hdr_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1);
+
+ if (conn_info->data_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1);
+
+ /* Placeholder - initialize pfv, cpda, hpda */
+
+ con->max_seq_size = conn_info->max_io_size;
+ con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
+ con->max_send_pdu_length = conn_info->max_send_pdu_length;
+ con->first_seq_length = conn_info->max_io_size;
+
+ return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev,
+ u32 handle, u8 abrt_conn)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hash_con->con->abortive_dsconnect = abrt_conn;
+
+ return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_nvmetcp_dev_info,
+ .register_ops = &qed_register_nvmetcp_ops,
+ .start = &qed_nvmetcp_start,
+ .stop = &qed_nvmetcp_stop,
+ .acquire_conn = &qed_nvmetcp_acquire_conn,
+ .release_conn = &qed_nvmetcp_release_conn,
+ .offload_conn = &qed_nvmetcp_offload_conn,
+ .update_conn = &qed_nvmetcp_update_conn,
+ .destroy_conn = &qed_nvmetcp_destroy_conn,
+ .clear_sq = &qed_nvmetcp_clear_conn_sq,
+ .add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter,
+ .remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter,
+ .add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter,
+ .remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter,
+ .clear_all_filters = &qed_llh_clear_all_filters,
+ .init_read_io = &init_nvmetcp_host_read_task,
+ .init_write_io = &init_nvmetcp_host_write_task,
+ .init_icreq_exchange = &init_nvmetcp_init_conn_req_task,
+ .init_task_cleanup = &init_cleanup_task_nvmetcp
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
+{
+ return &qed_nvmetcp_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_nvmetcp_ops);
+
+void qed_put_nvmetcp_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_nvmetcp_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
new file mode 100644
index 000000000000..e5e9d075bf4f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_H
+#define _QED_NVMETCP_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+#define QED_NVMETCP_FW_CQ_SIZE (4 * 1024)
+
+/* tcp parameters */
+#define QED_TCP_FLOW_LABEL 0
+#define QED_TCP_TWO_MSL_TIMER 4000
+#define QED_TCP_HALF_WAY_CLOSE_TIMEOUT 10
+#define QED_TCP_MAX_FIN_RT 2
+#define QED_TCP_SWS_TIMER 5000
+
+struct qed_nvmetcp_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+ u16 max_num_outstanding_tasks;
+ void *event_context;
+ nvmetcp_event_cb_t event_cb;
+};
+
+struct qed_hash_nvmetcp_con {
+ struct hlist_node node;
+ struct qed_nvmetcp_conn *con;
+};
+
+struct qed_nvmetcp_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+ u8 layer_code;
+ u8 offl_flags;
+ u8 connect_mode;
+ dma_addr_t sq_pbl_addr;
+ struct qed_chain r2tq;
+ struct qed_chain xhq;
+ struct qed_chain uhq;
+ u8 local_mac[6];
+ u8 remote_mac[6];
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u16 vlan_id;
+ u16 tcp_flags;
+ u32 remote_ip[4];
+ u32 local_ip[4];
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 rcv_wnd_scale;
+ u32 rcv_wnd;
+ u32 cwnd;
+ u8 update_flag;
+ u8 default_cq;
+ u8 abortive_dsconnect;
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u16 physical_q0;
+ u16 physical_q1;
+ u16 nvmetcp_cccid_max_range;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+};
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+static inline int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_nvmetcp_free(struct qed_hwfn *p_hwfn) {}
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
new file mode 100644
index 000000000000..c1dd71d19f3f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed_nvmetcp_fw_funcs.h"
+
+#define NVMETCP_NUM_SGES_IN_CACHE 0x4
+
+bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct storage_sgl_task_params *sgl_params)
+{
+ u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ?
+ NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges);
+ u8 sge_index;
+
+ /* sgl params */
+ ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size);
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+ ctx_data_desc->sge[sge_index].sge_addr.lo =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.hi =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_len =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_len);
+ }
+}
+
+static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ u32 io_size;
+
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE)
+ io_size = task_params->tx_io_size;
+ else
+ io_size = task_params->rx_io_size;
+
+ if (unlikely(!io_size))
+ return 0;
+
+ return io_size;
+}
+
+static inline void init_sqe(struct nvmetcp_task_params *task_params,
+ struct storage_sgl_task_params *sgl_task_params,
+ enum nvmetcp_task_type task_type)
+{
+ if (!task_params->sqe)
+ return;
+
+ memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+ task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+
+ switch (task_type) {
+ case NVMETCP_TASK_TYPE_HOST_WRITE: {
+ u32 buf_size = 0;
+ u32 num_sges = 0;
+
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ if (task_params->tx_io_size) {
+ if (task_params->send_write_incapsule)
+ buf_size = calc_rw_task_size(task_params, task_type);
+
+ if (nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size);
+ } break;
+
+ case NVMETCP_TASK_TYPE_HOST_READ: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ } break;
+
+ case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH);
+
+ if (task_params->tx_io_size) {
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN,
+ task_params->tx_io_size);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES,
+ min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+ }
+ } break;
+
+ case NVMETCP_TASK_TYPE_CLEANUP:
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP);
+
+ default:
+ break;
+ }
+}
+
+/* The following function initializes of NVMeTCP task params */
+static inline void
+init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context,
+ struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ context->ystorm_st_context.state.cccid = task_params->host_cccid;
+ SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1);
+ context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo);
+ context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi);
+}
+
+/* The following function initializes default values to all tasks */
+static inline void
+init_default_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ void *pdu_header, void *nvme_cmd,
+ enum nvmetcp_task_type task_type)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ const u8 val_byte = context->mstorm_ag_context.cdu_validation;
+ u8 dw_index;
+
+ memset(context, 0, sizeof(*context));
+ init_nvmetcp_task_params(context, task_params,
+ (enum nvmetcp_task_type)task_type);
+
+ /* Swapping requirements used below, will be removed in future FW versions */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE ||
+ task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+
+ for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2]));
+ } else {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+ }
+
+ /* M-Storm Context: */
+ context->mstorm_ag_context.cdu_validation = val_byte;
+ context->mstorm_st_context.task_type = (u8)(task_type);
+ context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid);
+
+ /* Ustorm Context: */
+ SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1);
+ context->ustorm_st_context.task_type = (u8)(task_type);
+ context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+ context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+/* The following function initializes the U-Storm Task Contexts */
+static inline void
+init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context,
+ struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context,
+ u32 remaining_recv_len,
+ u32 expected_data_transfer_len, u8 num_sges,
+ bool tx_dif_conn_err_en)
+{
+ /* Remaining data to be received in bytes. Used in validations*/
+ ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+ ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+ ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len);
+ SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges);
+ SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ tx_dif_conn_err_en ? 1 : 0);
+}
+
+/* The following function initializes Local Completion Contexts: */
+static inline void
+set_local_completion_context(struct e5_nvmetcp_task_context *context)
+{
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1);
+ SET_FIELD(context->ustorm_st_context.flags,
+ USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+/* Common Fastpath task init function: */
+static inline void
+init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type,
+ void *pdu_header, void *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ u32 task_size = calc_rw_task_size(task_params, task_type);
+ bool slow_io = false;
+ u8 num_sges = 0;
+
+ init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type);
+
+ /* Tx/Rx: */
+ if (task_params->tx_io_size) {
+ /* if data to transmit: */
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ sgl_task_params);
+ slow_io = nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+ num_sges =
+ (u8)(!slow_io ? min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ if (slow_io) {
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1);
+ }
+ } else if (task_params->rx_io_size) {
+ /* if data to receive: */
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ sgl_task_params);
+ num_sges =
+ (u8)(!nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge) ?
+ min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+ }
+
+ /* Ustorm context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+ /* Remaining Receive length is the Task Size */
+ task_size,
+ /* The size of the transmitted task */
+ task_size,
+ /* num_sges */
+ num_sges,
+ false);
+
+ /* Set exp_data_acked */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) {
+ if (task_params->send_write_incapsule)
+ context->ustorm_ag_context.exp_data_acked = task_size;
+ else
+ context->ustorm_ag_context.exp_data_acked = 0;
+ } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ context->ustorm_ag_context.exp_data_acked = 0;
+ }
+
+ context->ustorm_ag_context.exp_cont_len = 0;
+ init_sqe(task_params, sgl_task_params, task_type);
+}
+
+static void
+init_common_initiator_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_READ,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_read_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_initiator_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_WRITE,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_write_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_login_request_task(struct nvmetcp_task_params *task_params,
+ void *login_req_pdu_header,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+
+ init_default_nvmetcp_task(task_params, (void *)login_req_pdu_header, NULL,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+
+ /* Ustorm Context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+
+ /* Remaining Receive length is the Task Size */
+ task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0,
+
+ /* The size of the transmitted task */
+ task_params->tx_io_size ?
+ tx_sgl_task_params->total_buffer_size : 0,
+ 0, /* num_sges */
+ 0); /* tx_dif_conn_err_en */
+
+ /* SGL context: */
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ tx_sgl_task_params);
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ rx_sgl_task_params);
+
+ context->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0);
+ init_sqe(task_params, tx_sgl_task_params, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+}
+
+/* The following function initializes Login task in Host mode: */
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ init_common_login_request_task(task_params, init_conn_req_pdu_hdr,
+ tx_sgl_task_params, rx_sgl_task_params);
+}
+
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params)
+{
+ init_sqe(task_params, NULL, NVMETCP_TASK_TYPE_CLEANUP);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
new file mode 100644
index 000000000000..1d5ddc217bdb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_FW_FUNCS_H
+#define _QED_NVMETCP_FW_FUNCS_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* _QED_NVMETCP_FW_FUNCS_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
new file mode 100644
index 000000000000..96a2077fd315
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_ip_services.c
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/*
+ * Copyright 2021 Marvell. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/errno.h>
+
+#include <net/tcp.h>
+
+#include <linux/qed/qed_nvmetcp_ip_services_if.h>
+
+#define QED_IP_RESOL_TIMEOUT 4
+
+int qed_route_ipv4(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev)
+{
+ struct neighbour *neigh = NULL;
+ __be32 *loc_ip, *rem_ip;
+ struct rtable *rt;
+ int rc = -ENXIO;
+ int retry;
+
+ loc_ip = &((struct sockaddr_in *)local_addr)->sin_addr.s_addr;
+ rem_ip = &((struct sockaddr_in *)remote_addr)->sin_addr.s_addr;
+ *ndev = NULL;
+ rt = ip_route_output(&init_net, *rem_ip, *loc_ip, 0/*tos*/, 0/*oif*/);
+ if (IS_ERR(rt)) {
+ pr_err("lookup route failed\n");
+ rc = PTR_ERR(rt);
+ goto return_err;
+ }
+
+ neigh = dst_neigh_lookup(&rt->dst, rem_ip);
+ if (!neigh) {
+ rc = -ENOMEM;
+ ip_rt_put(rt);
+ goto return_err;
+ }
+
+ *ndev = rt->dst.dev;
+ ip_rt_put(rt);
+
+ /* If not resolved, kick-off state machine towards resolution */
+ if (!(neigh->nud_state & NUD_VALID))
+ neigh_event_send(neigh, NULL);
+
+ /* query neighbor until resolved or timeout */
+ retry = QED_IP_RESOL_TIMEOUT;
+ while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
+ msleep(1000);
+ retry--;
+ }
+
+ if (neigh->nud_state & NUD_VALID) {
+ /* copy resolved MAC address */
+ neigh_ha_snapshot(hardware_address->sa_data, neigh, *ndev);
+ hardware_address->sa_family = (*ndev)->type;
+ rc = 0;
+ }
+
+ neigh_release(neigh);
+ if (!(*loc_ip)) {
+ *loc_ip = inet_select_addr(*ndev, *rem_ip, RT_SCOPE_UNIVERSE);
+ local_addr->ss_family = AF_INET;
+ }
+
+return_err:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_route_ipv4);
+
+int qed_route_ipv6(struct sockaddr_storage *local_addr,
+ struct sockaddr_storage *remote_addr,
+ struct sockaddr *hardware_address,
+ struct net_device **ndev)
+{
+ struct neighbour *neigh = NULL;
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+ int rc = -ENXIO;
+ int retry;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.saddr = ((struct sockaddr_in6 *)local_addr)->sin6_addr;
+ fl6.daddr = ((struct sockaddr_in6 *)remote_addr)->sin6_addr;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst || dst->error) {
+ if (dst) {
+ dst_release(dst);
+ pr_err("lookup route failed %d\n", dst->error);
+ }
+
+ goto out;
+ }
+
+ neigh = dst_neigh_lookup(dst, &fl6.daddr);
+ if (neigh) {
+ *ndev = ip6_dst_idev(dst)->dev;
+
+ /* If not resolved, kick-off state machine towards resolution */
+ if (!(neigh->nud_state & NUD_VALID))
+ neigh_event_send(neigh, NULL);
+
+ /* query neighbor until resolved or timeout */
+ retry = QED_IP_RESOL_TIMEOUT;
+ while (!(neigh->nud_state & NUD_VALID) && retry > 0) {
+ msleep(1000);
+ retry--;
+ }
+
+ if (neigh->nud_state & NUD_VALID) {
+ neigh_ha_snapshot((u8 *)hardware_address->sa_data,
+ neigh, *ndev);
+ hardware_address->sa_family = (*ndev)->type;
+ rc = 0;
+ }
+
+ neigh_release(neigh);
+
+ if (ipv6_addr_any(&fl6.saddr)) {
+ if (ipv6_dev_get_saddr(dev_net(*ndev), *ndev,
+ &fl6.daddr, 0, &fl6.saddr)) {
+ pr_err("Unable to find source IP address\n");
+ goto out;
+ }
+
+ local_addr->ss_family = AF_INET6;
+ ((struct sockaddr_in6 *)local_addr)->sin6_addr =
+ fl6.saddr;
+ }
+ }
+
+ dst_release(dst);
+
+out:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_route_ipv6);
+
+void qed_vlan_get_ndev(struct net_device **ndev, u16 *vlan_id)
+{
+ if (is_vlan_dev(*ndev)) {
+ *vlan_id = vlan_dev_vlan_id(*ndev);
+ *ndev = vlan_dev_real_dev(*ndev);
+ }
+}
+EXPORT_SYMBOL(qed_vlan_get_ndev);
+
+struct pci_dev *qed_validate_ndev(struct net_device *ndev)
+{
+ struct pci_dev *pdev = NULL;
+ struct net_device *upper;
+
+ for_each_pci_dev(pdev) {
+ if (pdev && pdev->driver &&
+ !strcmp(pdev->driver->name, "qede")) {
+ upper = pci_get_drvdata(pdev);
+ if (upper->ifindex == ndev->ifindex)
+ return pdev;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(qed_validate_ndev);
+
+__be16 qed_get_in_port(struct sockaddr_storage *sa)
+{
+ return sa->ss_family == AF_INET
+ ? ((struct sockaddr_in *)sa)->sin_port
+ : ((struct sockaddr_in6 *)sa)->sin6_port;
+}
+EXPORT_SYMBOL(qed_get_in_port);
+
+int qed_fetch_tcp_port(struct sockaddr_storage local_ip_addr,
+ struct socket **sock, u16 *port)
+{
+ struct sockaddr_storage sa;
+ int rc = 0;
+
+ rc = sock_create(local_ip_addr.ss_family, SOCK_STREAM, IPPROTO_TCP,
+ sock);
+ if (rc) {
+ pr_warn("failed to create socket: %d\n", rc);
+ goto err;
+ }
+
+ (*sock)->sk->sk_allocation = GFP_KERNEL;
+ sk_set_memalloc((*sock)->sk);
+
+ rc = kernel_bind(*sock, (struct sockaddr *)&local_ip_addr,
+ sizeof(local_ip_addr));
+
+ if (rc) {
+ pr_warn("failed to bind socket: %d\n", rc);
+ goto err_sock;
+ }
+
+ rc = kernel_getsockname(*sock, (struct sockaddr *)&sa);
+ if (rc < 0) {
+ pr_warn("getsockname() failed: %d\n", rc);
+ goto err_sock;
+ }
+
+ *port = ntohs(qed_get_in_port(&sa));
+
+ return 0;
+
+err_sock:
+ sock_release(*sock);
+ sock = NULL;
+err:
+
+ return rc;
+}
+EXPORT_SYMBOL(qed_fetch_tcp_port);
+
+void qed_return_tcp_port(struct socket *sock)
+{
+ if (sock && sock->sk) {
+ tcp_set_state(sock->sk, TCP_CLOSE);
+ sock_release(sock);
+ }
+}
+EXPORT_SYMBOL(qed_return_tcp_port);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 88353aa404dc..b8c5641b29a8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -16,7 +16,7 @@
#include "qed_ll2.h"
#include "qed_ooo.h"
#include "qed_cxt.h"
-
+#include "qed_nvmetcp.h"
static struct qed_ooo_archipelago
*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
struct qed_ooo_info
@@ -83,7 +83,8 @@ int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
switch (p_hwfn->hw_info.personality) {
case QED_PCI_ISCSI:
- proto = PROTOCOLID_ISCSI;
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
break;
case QED_PCI_ETH_RDMA:
case QED_PCI_ETH_IWARP:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 993f1357b6fc..60ff3222bf55 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -100,6 +100,11 @@ union ramrod_data {
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
struct iscsi_spe_conn_termination iscsi_conn_terminate;
+ struct nvmetcp_init_ramrod_params nvmetcp_init;
+ struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
+ struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
+ struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
+
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index aa71adcf31ee..b4ed54ffef9b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -385,7 +385,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->personality = PERSONALITY_FCOE;
break;
case QED_PCI_ISCSI:
- p_ramrod->personality = PERSONALITY_ISCSI;
+ case QED_PCI_NVMETCP:
+ p_ramrod->personality = PERSONALITY_TCP_ULP;
break;
case QED_PCI_ETH_ROCE:
case QED_PCI_ETH_IWARP:
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index 2f6598086d9b..6304514a6f2c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -247,12 +247,10 @@ static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev *edev)
{
struct qede_rdma_event_work *event_node = NULL;
- struct list_head *list_node = NULL;
bool found = false;
- list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
- event_node = list_entry(list_node, struct qede_rdma_event_work,
- list);
+ list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list,
+ list) {
if (!work_pending(&event_node->work)) {
found = true;
break;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 214e347097a7..2376b2729633 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -114,7 +114,7 @@ static int ql_sem_spinlock(struct ql3_adapter *qdev,
value = readl(&port_regs->CommonRegs.semaphoreReg);
if ((value & (sem_mask >> 16)) == sem_bits)
return 0;
- ssleep(1);
+ mdelay(1000);
} while (--seconds);
return -1;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index d8a3ecaed3fc..d8f0863b3934 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,7 +1048,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
if (!skb)
- break;
+ goto error;
qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
skb_put(skb, QLCNIC_ILB_PKT_SIZE);
adapter->ahw->diag_cnt = 0;
@@ -1072,6 +1072,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
cnt++;
}
if (cnt != i) {
+error:
dev_err(&adapter->pdev->dev,
"LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
if (mode != QLCNIC_ILB_MODE)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index e1b8490bed0a..4b8bc46f55c2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -460,12 +460,10 @@ int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
int err = -EINVAL;
/* Delete MAC from the existing list */
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal(addr, cur->mac_addr)) {
err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
@@ -483,11 +481,9 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan,
enum qlcnic_mac_type mac_type)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
/* look up if already exists */
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal(addr, cur->mac_addr) &&
cur->vlan_id == vlan)
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 601d22495a88..95ecc84dddcd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -203,7 +203,6 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
-int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 96b947fde646..8a31ce29ecfc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -319,10 +319,8 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
{
struct qlcnic_mac_vlan_list *cur;
- struct list_head *head;
- list_for_each(head, &adapter->mac_list) {
- cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ list_for_each_entry(cur, &adapter->mac_list, list) {
if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
0, QLCNIC_MAC_DEL);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 8d51b0cb545c..27b1663c476e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -163,7 +163,8 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
struct ifla_rmnet_flags *flags;
flags = nla_data(data[IFLA_RMNET_FLAGS]);
- data_format = flags->flags & flags->mask;
+ data_format &= ~flags->mask;
+ data_format |= flags->flags & flags->mask;
}
netdev_dbg(dev, "data format [0x%08X]\n", data_format);
@@ -336,7 +337,8 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
old_data_format = port->data_format;
flags = nla_data(data[IFLA_RMNET_FLAGS]);
- port->data_format = flags->flags & flags->mask;
+ port->data_format &= ~flags->mask;
+ port->data_format |= flags->flags & flags->mask;
if (rmnet_vnd_update_dev_mtu(port, real_dev)) {
port->data_format = old_data_format;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 8d8d4690a074..3d3cba56c516 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016-2018, 2021 The Linux Foundation.
+ * All rights reserved.
*
* RMNET Data configuration engine
*/
@@ -48,6 +49,7 @@ struct rmnet_pcpu_stats {
struct rmnet_priv_stats {
u64 csum_ok;
+ u64 csum_ip4_header_bad;
u64 csum_valid_unset;
u64 csum_validation_failed;
u64 csum_err_bad_buffer;
@@ -56,6 +58,7 @@ struct rmnet_priv_stats {
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
+ u64 csum_hw;
};
struct rmnet_priv {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 0be5ac7ab261..bfbd7847f946 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data ingress/egress handler
*/
@@ -82,12 +82,18 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
skb->dev = ep->egress_dev;
- /* Subtract MAP header */
- skb_pull(skb, sizeof(struct rmnet_map_header));
- rmnet_set_skb_proto(skb);
-
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
- if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
+ if ((port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) &&
+ (map_header->flags & MAP_NEXT_HEADER_FLAG)) {
+ if (rmnet_map_process_next_hdr_packet(skb, len))
+ goto free_skb;
+ skb_pull(skb, sizeof(*map_header));
+ rmnet_set_skb_proto(skb);
+ } else {
+ /* Subtract MAP header */
+ skb_pull(skb, sizeof(*map_header));
+ rmnet_set_skb_proto(skb);
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4 &&
+ !rmnet_map_checksum_downlink_packet(skb, len + pad))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -128,7 +134,7 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
- int required_headroom, additional_header_len;
+ int required_headroom, additional_header_len, csum_type = 0;
struct rmnet_map_header *map_header;
additional_header_len = 0;
@@ -136,18 +142,23 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
- required_headroom += additional_header_len;
+ csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
+ } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
+ additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
+ csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}
- if (skb_headroom(skb) < required_headroom) {
- if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
- return -ENOMEM;
- }
+ required_headroom += additional_header_len;
+
+ if (skb_cow_head(skb, required_headroom) < 0)
+ return -ENOMEM;
- if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
- rmnet_map_checksum_uplink_packet(skb, orig_dev);
+ if (csum_type)
+ rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
+ csum_type);
- map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
+ map_header = rmnet_map_add_map_header(skb, additional_header_len,
+ port, 0);
if (!map_header)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 2aea153f4247..e5a0b38f7dbe 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*/
#ifndef _RMNET_MAP_H_
@@ -43,10 +43,15 @@ enum rmnet_map_commands {
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
- int hdrlen, int pad);
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
- struct net_device *orig_dev);
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type);
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 0ac2ff828320..3ee5c1a8b46e 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data MAP protocol
*/
@@ -8,6 +8,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
+#include <linux/bitfield.h>
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
@@ -18,23 +19,13 @@
static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
const void *txporthdr)
{
- __sum16 *check = NULL;
+ if (protocol == IPPROTO_TCP)
+ return &((struct tcphdr *)txporthdr)->check;
- switch (protocol) {
- case IPPROTO_TCP:
- check = &(((struct tcphdr *)txporthdr)->check);
- break;
-
- case IPPROTO_UDP:
- check = &(((struct udphdr *)txporthdr)->check);
- break;
+ if (protocol == IPPROTO_UDP)
+ return &((struct udphdr *)txporthdr)->check;
- default:
- check = NULL;
- break;
- }
-
- return check;
+ return NULL;
}
static int
@@ -42,71 +33,74 @@ rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
- __sum16 *csum_field, csum_temp, pseudo_csum, hdr_csum, ip_payload_csum;
- u16 csum_value, csum_value_final;
- struct iphdr *ip4h;
- void *txporthdr;
- __be16 addend;
-
- ip4h = (struct iphdr *)(skb->data);
- if ((ntohs(ip4h->frag_off) & IP_MF) ||
- ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0)) {
+ struct iphdr *ip4h = (struct iphdr *)skb->data;
+ void *txporthdr = skb->data + ip4h->ihl * 4;
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip_payload_csum;
+
+ /* Computing the checksum over just the IPv4 header--including its
+ * checksum field--should yield 0. If it doesn't, the IP header
+ * is bad, so return an error and let the IP layer drop it.
+ */
+ if (ip_fast_csum(ip4h, ip4h->ihl)) {
+ priv->stats.csum_ip4_header_bad++;
+ return -EINVAL;
+ }
+
+ /* We don't support checksum offload on IPv4 fragments */
+ if (ip_is_fragment(ip4h)) {
priv->stats.csum_fragmented_pkt++;
return -EOPNOTSUPP;
}
- txporthdr = skb->data + ip4h->ihl * 4;
-
+ /* Checksum offload is only supported for UDP and TCP protocols */
csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
-
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
- /* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
- if (*csum_field == 0 && ip4h->protocol == IPPROTO_UDP) {
+ /* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
+ if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
priv->stats.csum_skipped++;
return 0;
}
- csum_value = ~ntohs(csum_trailer->csum_value);
- hdr_csum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
- ip_payload_csum = csum16_sub((__force __sum16)csum_value,
- (__force __be16)hdr_csum);
-
- pseudo_csum = ~csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
- ntohs(ip4h->tot_len) - ip4h->ihl * 4,
- ip4h->protocol, 0);
- addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
- pseudo_csum = csum16_add(ip_payload_csum, addend);
-
- addend = (__force __be16)ntohs((__force __be16)*csum_field);
- csum_temp = ~csum16_sub(pseudo_csum, addend);
- csum_value_final = (__force u16)csum_temp;
-
- if (unlikely(csum_value_final == 0)) {
- switch (ip4h->protocol) {
- case IPPROTO_UDP:
- /* RFC 768 - DL4 1's complement rule for UDP csum 0 */
- csum_value_final = ~csum_value_final;
- break;
-
- case IPPROTO_TCP:
- /* DL4 Non-RFC compliant TCP checksum found */
- if (*csum_field == (__force __sum16)0xFFFF)
- csum_value_final = ~csum_value_final;
- break;
- }
- }
-
- if (csum_value_final == ntohs((__force __be16)*csum_field)) {
- priv->stats.csum_ok++;
- return 0;
- } else {
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ *
+ * We verified above that the IP header contributes zero to the
+ * trailer checksum. Therefore the checksum in the trailer is
+ * just the checksum computed over the IP payload.
+
+ * If the IP payload arrives intact, adding the pseudo header
+ * checksum to the IP payload checksum will yield 0xffff (negative
+ * zero). This means the trailer checksum and the pseudo checksum
+ * are additive inverses of each other. Put another way, the
+ * message passes the checksum test if the trailer checksum value
+ * is the negated pseudo header checksum.
+ *
+ * Knowing this, we don't even need to examine the transport
+ * header checksum value; it is already accounted for in the
+ * checksum value found in the trailer.
+ */
+ ip_payload_csum = csum_trailer->csum_value;
+
+ pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+
+ /* The cast is required to ensure only the low 16 bits are examined */
+ if (ip_payload_csum != (__sum16)~pseudo_csum) {
priv->stats.csum_validation_failed++;
return -EINVAL;
}
+
+ priv->stats.csum_ok++;
+ return 0;
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -115,76 +109,66 @@ rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
struct rmnet_map_dl_csum_trailer *csum_trailer,
struct rmnet_priv *priv)
{
- __sum16 *csum_field, ip6_payload_csum, pseudo_csum, csum_temp;
- u16 csum_value, csum_value_final;
- __be16 ip6_hdr_csum, addend;
- struct ipv6hdr *ip6h;
- void *txporthdr;
- u32 length;
-
- ip6h = (struct ipv6hdr *)(skb->data);
-
- txporthdr = skb->data + sizeof(struct ipv6hdr);
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
+ void *txporthdr = skb->data + sizeof(*ip6h);
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip6_payload_csum;
+ __be16 ip_header_csum;
+
+ /* Checksum offload is only supported for UDP and TCP protocols;
+ * the packet cannot include any IPv6 extension headers
+ */
csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
-
if (!csum_field) {
priv->stats.csum_err_invalid_transport++;
return -EPROTONOSUPPORT;
}
- csum_value = ~ntohs(csum_trailer->csum_value);
- ip6_hdr_csum = (__force __be16)
- ~ntohs((__force __be16)ip_compute_csum(ip6h,
- (int)(txporthdr - (void *)(skb->data))));
- ip6_payload_csum = csum16_sub((__force __sum16)csum_value,
- ip6_hdr_csum);
-
- length = (ip6h->nexthdr == IPPROTO_UDP) ?
- ntohs(((struct udphdr *)txporthdr)->len) :
- ntohs(ip6h->payload_len);
- pseudo_csum = ~(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- length, ip6h->nexthdr, 0));
- addend = (__force __be16)ntohs((__force __be16)pseudo_csum);
- pseudo_csum = csum16_add(ip6_payload_csum, addend);
-
- addend = (__force __be16)ntohs((__force __be16)*csum_field);
- csum_temp = ~csum16_sub(pseudo_csum, addend);
- csum_value_final = (__force u16)csum_temp;
-
- if (unlikely(csum_value_final == 0)) {
- switch (ip6h->nexthdr) {
- case IPPROTO_UDP:
- /* RFC 2460 section 8.1
- * DL6 One's complement rule for UDP checksum 0
- */
- csum_value_final = ~csum_value_final;
- break;
-
- case IPPROTO_TCP:
- /* DL6 Non-RFC compliant TCP checksum found */
- if (*csum_field == (__force __sum16)0xFFFF)
- csum_value_final = ~csum_value_final;
- break;
- }
- }
-
- if (csum_value_final == ntohs((__force __be16)*csum_field)) {
- priv->stats.csum_ok++;
- return 0;
- } else {
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ */
+ ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
+ ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
+
+ pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ntohs(ip6h->payload_len),
+ ip6h->nexthdr, 0);
+
+ /* It's sufficient to compare the IP payload checksum with the
+ * negated pseudo checksum to determine whether the packet
+ * checksum was good. (See further explanation in comments
+ * in rmnet_map_ipv4_dl_csum_trailer()).
+ *
+ * The cast is required to ensure only the low 16 bits are
+ * examined.
+ */
+ if (ip6_payload_csum != (__sum16)~pseudo_csum) {
priv->stats.csum_validation_failed++;
return -EINVAL;
}
+
+ priv->stats.csum_ok++;
+ return 0;
+}
+#else
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
+{
+ return 0;
}
#endif
-static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
{
- struct iphdr *ip4h = (struct iphdr *)iphdr;
void *txphdr;
u16 *csum;
- txphdr = iphdr + ip4h->ihl * 4;
+ txphdr = ip4h + ip4h->ihl * 4;
if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
@@ -193,15 +177,14 @@ static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
}
static void
-rmnet_map_ipv4_ul_csum_header(void *iphdr,
+rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct iphdr *ip4h = iphdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
- if (ip4h->protocol == IPPROTO_UDP)
+ if (iphdr->protocol == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
@@ -214,13 +197,13 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
}
#if IS_ENABLED(CONFIG_IPV6)
-static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+static void
+rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
{
- struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
void *txphdr;
u16 *csum;
- txphdr = ip6hdr + sizeof(struct ipv6hdr);
+ txphdr = ip6h + sizeof(struct ipv6hdr);
if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
@@ -229,15 +212,14 @@ static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
}
static void
-rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
- struct ipv6hdr *ip6h = ip6hdr;
u16 val;
val = MAP_CSUM_UL_ENABLED_FLAG;
- if (ip6h->nexthdr == IPPROTO_UDP)
+ if (ipv6hdr->nexthdr == IPPROTO_UDP)
val |= MAP_CSUM_UL_UDP_FLAG;
val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
@@ -246,16 +228,73 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
skb->ip_summed = CHECKSUM_NONE;
- rmnet_map_complement_ipv6_txporthdr_csum_field(ip6hdr);
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
+}
+#else
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
}
#endif
+static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
+ struct rmnet_map_v5_csum_header *ul_header;
+
+ ul_header = skb_push(skb, sizeof(*ul_header));
+ memset(ul_header, 0, sizeof(*ul_header));
+ ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ void *iph = ip_hdr(skb);
+ __sum16 *check;
+ void *trans;
+ u8 proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
+
+ proto = ((struct iphdr *)iph)->protocol;
+ trans = iph + ip_len;
+ } else if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
+ u16 ip_len = sizeof(struct ipv6hdr);
+
+ proto = ((struct ipv6hdr *)iph)->nexthdr;
+ trans = iph + ip_len;
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
+ goto sw_csum;
+ }
+
+ check = rmnet_map_get_csum_field(proto, trans);
+ if (check) {
+ skb->ip_summed = CHECKSUM_NONE;
+ /* Ask for checksum offloading */
+ ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
+ priv->stats.csum_hw++;
+ return;
+ }
+ }
+
+sw_csum:
+ priv->stats.csum_sw++;
+}
+
/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
- int hdrlen, int pad)
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
@@ -266,6 +305,10 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));
+ /* Set next_hdr bit for csum offload packets */
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
+ map_header->flags |= MAP_NEXT_HEADER_FLAG;
+
if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
@@ -300,8 +343,11 @@ done:
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
+ struct rmnet_map_v5_csum_header *next_hdr = NULL;
struct rmnet_map_header *maph;
+ void *data = skb->data;
struct sk_buff *skbn;
+ u8 nexthdr_type;
u32 packet_len;
if (skb->len == 0)
@@ -310,8 +356,18 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
maph = (struct rmnet_map_header *)skb->data;
packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
- if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+ } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+ if (!(maph->flags & MAP_CMD_FLAG)) {
+ packet_len += sizeof(*next_hdr);
+ if (maph->flags & MAP_NEXT_HEADER_FLAG)
+ next_hdr = data + sizeof(*maph);
+ else
+ /* Mapv5 data pkt without csum hdr is invalid */
+ return NULL;
+ }
+ }
if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -320,6 +376,13 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
if (!maph->pkt_len)
return NULL;
+ if (next_hdr) {
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return NULL;
+ }
+
skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
if (!skbn)
return NULL;
@@ -355,28 +418,19 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
return -EINVAL;
}
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb->protocol == htons(ETH_P_IP))
return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
-#if IS_ENABLED(CONFIG_IPV6)
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
-#else
- priv->stats.csum_err_invalid_ip_version++;
- return -EPROTONOSUPPORT;
-#endif
- } else {
- priv->stats.csum_err_invalid_ip_version++;
- return -EPROTONOSUPPORT;
- }
- return 0;
+ priv->stats.csum_err_invalid_ip_version++;
+
+ return -EPROTONOSUPPORT;
}
-/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
- * packets that are supported for UL checksum offload.
- */
-void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
- struct net_device *orig_dev)
+static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
@@ -389,28 +443,80 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
goto sw_csum;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- iphdr = (char *)ul_header +
- sizeof(struct rmnet_map_ul_csum_header);
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto sw_csum;
- if (skb->protocol == htons(ETH_P_IP)) {
- rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
- return;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
-#if IS_ENABLED(CONFIG_IPV6)
- rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
- return;
-#else
- priv->stats.csum_err_invalid_ip_version++;
- goto sw_csum;
-#endif
- } else {
- priv->stats.csum_err_invalid_ip_version++;
- }
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
}
+ priv->stats.csum_err_invalid_ip_version++;
+
sw_csum:
memset(ul_header, 0, sizeof(*ul_header));
priv->stats.csum_sw++;
}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type)
+{
+ switch (csum_type) {
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
+ rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
+ break;
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
+ rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Process a MAPv5 packet header */
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+ u16 len)
+{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+ struct rmnet_map_v5_csum_header *next_hdr;
+ u8 nexthdr_type;
+
+ next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
+ sizeof(struct rmnet_map_header));
+
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return -EINVAL;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
+ } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
+ priv->stats.csum_ok++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ priv->stats.csum_valid_unset++;
+ }
+
+ /* Pull csum v5 header */
+ skb_pull(skb, sizeof(*next_hdr));
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 41fbd2ceeede..6556b5381ce8 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -166,6 +166,7 @@ static const struct net_device_ops rmnet_vnd_ops = {
static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum ok",
+ "Bad IPv4 header checksum",
"Checksum valid bit not set",
"Checksum validation failed",
"Checksum error bad buffer",
@@ -174,6 +175,7 @@ static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
"Checksum skipped on ip fragment",
"Checksum skipped",
"Checksum computed in software",
+ "Checksum computed in hardware",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 1663e0486496..6a9fe9f7e0be 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -3508,7 +3508,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000);
rtl_pcie_state_l2l3_disable(tp);
- rtl_hw_aspm_clkreq_enable(tp, true);
}
DECLARE_RTL_COND(rtl_mac_ocp_e00e_cond)
@@ -4115,6 +4114,7 @@ static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp,
case RTL_GIGA_MAC_VER_61:
case RTL_GIGA_MAC_VER_63:
padto = max_t(unsigned int, padto, ETH_ZLEN);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4afff320dfd0..69c50f81e1cb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2047,13 +2047,6 @@ static int ravb_probe(struct platform_device *pdev)
return -EINVAL;
}
- /* Get base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "invalid resource\n");
- return -EINVAL;
- }
-
ndev = alloc_etherdev_mqs(sizeof(struct ravb_private),
NUM_TX_QUEUE, NUM_RX_QUEUE);
if (!ndev)
@@ -2065,9 +2058,6 @@ static int ravb_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- /* The Ether-specific entries in the device structure. */
- ndev->base_addr = res->start;
-
chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
if (chip_id == RCAR_GEN3)
@@ -2089,12 +2079,15 @@ static int ravb_probe(struct platform_device *pdev)
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
- priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
goto out_release;
}
+ /* The Ether-specific entries in the device structure. */
+ ndev->base_addr = res->start;
+
spin_lock_init(&priv->lock);
INIT_WORK(&priv->work, ravb_tx_timeout_work);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c5b154868c1f..177523be4fb6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3225,9 +3225,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
int ret;
- /* get base addr */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
ndev = alloc_etherdev(sizeof(struct sh_eth_private));
if (!ndev)
return -ENOMEM;
@@ -3245,7 +3242,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
- mdp->addr = devm_ioremap_resource(&pdev->dev, res);
+ mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(mdp->addr)) {
ret = PTR_ERR(mdp->addr);
goto out_release;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index d1e908846f5d..22fbb0ae77fb 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -90,6 +90,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx->pci_dev->irq);
goto fail1;
}
+ efx->irqs_hooked = true;
return 0;
}
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 6eef0f45b133..2b29fd4cbdf4 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -835,6 +835,10 @@ static int ioc3eth_probe(struct platform_device *pdev)
int err;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "Invalid resource\n");
+ return -EINVAL;
+ }
/* get mac addr from one wire prom */
if (ioc3eth_get_mac_addr(regs, mac_addr))
return -EPROBE_DEFER; /* not available yet */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 7737e4d0bb9e..9a19e4d9da02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -66,6 +66,18 @@ config DWMAC_ANARION
This selects the Anarion SoC glue layer support for the stmmac driver.
+config DWMAC_INGENIC
+ tristate "Ingenic MAC support"
+ default MACH_INGENIC
+ depends on OF && HAS_IOMEM && (MACH_INGENIC || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Ingenic SoCs.
+
+ This selects Ingenic SoCs glue layer support for the stmmac
+ device driver. This driver is used on for the Ingenic SoCs
+ MAC ethernet controller.
+
config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index f2e478b884b0..6471f93889ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -14,6 +14,7 @@ stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
+obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o
obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 619e3c0760d6..5fecc83f175b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -503,8 +503,7 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
const struct stmmac_tc_ops *tc;
const struct stmmac_mmc_ops *mmc;
- const struct mdio_xpcs_ops *xpcs;
- struct mdio_xpcs_args xpcs_args;
+ struct dw_xpcs *xpcs;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
new file mode 100644
index 000000000000..9a6d819b84ae
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwmac-ingenic.c - Ingenic SoCs DWMAC specific glue layer
+ *
+ * Copyright (c) 2021 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define MACPHYC_TXCLK_SEL_MASK GENMASK(31, 31)
+#define MACPHYC_TXCLK_SEL_OUTPUT 0x1
+#define MACPHYC_TXCLK_SEL_INPUT 0x0
+#define MACPHYC_MODE_SEL_MASK GENMASK(31, 31)
+#define MACPHYC_MODE_SEL_RMII 0x0
+#define MACPHYC_TX_SEL_MASK GENMASK(19, 19)
+#define MACPHYC_TX_SEL_ORIGIN 0x0
+#define MACPHYC_TX_SEL_DELAY 0x1
+#define MACPHYC_TX_DELAY_MASK GENMASK(18, 12)
+#define MACPHYC_RX_SEL_MASK GENMASK(11, 11)
+#define MACPHYC_RX_SEL_ORIGIN 0x0
+#define MACPHYC_RX_SEL_DELAY 0x1
+#define MACPHYC_RX_DELAY_MASK GENMASK(10, 4)
+#define MACPHYC_SOFT_RST_MASK GENMASK(3, 3)
+#define MACPHYC_PHY_INFT_MASK GENMASK(2, 0)
+#define MACPHYC_PHY_INFT_RMII 0x4
+#define MACPHYC_PHY_INFT_RGMII 0x1
+#define MACPHYC_PHY_INFT_GMII 0x0
+#define MACPHYC_PHY_INFT_MII 0x0
+
+#define MACPHYC_TX_DELAY_PS_MAX 2496
+#define MACPHYC_TX_DELAY_PS_MIN 20
+
+#define MACPHYC_RX_DELAY_PS_MAX 2496
+#define MACPHYC_RX_DELAY_PS_MIN 20
+
+enum ingenic_mac_version {
+ ID_JZ4775,
+ ID_X1000,
+ ID_X1600,
+ ID_X1830,
+ ID_X2000,
+};
+
+struct ingenic_mac {
+ const struct ingenic_soc_info *soc_info;
+ struct device *dev;
+ struct regmap *regmap;
+
+ int rx_delay;
+ int tx_delay;
+};
+
+struct ingenic_soc_info {
+ enum ingenic_mac_version version;
+ u32 mask;
+
+ int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
+};
+
+static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ int ret;
+
+ if (mac->soc_info->set_mode) {
+ ret = mac->soc_info->set_mode(plat_dat);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_MII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_GMII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_GMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_GMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, 0);
+}
+
+static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+
+ if (mac->tx_delay == 0)
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN);
+ else
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_DELAY) |
+ FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1);
+
+ if (mac->rx_delay == 0)
+ val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN);
+ else
+ val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_DELAY) |
+ FIELD_PREP(MACPHYC_RX_DELAY_MASK, (mac->rx_delay + 9750) / 19500 - 1);
+
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int ingenic_mac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct ingenic_mac *mac;
+ const struct ingenic_soc_info *data;
+ u32 tx_delay_ps, rx_delay_ps;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ mac = devm_kzalloc(&pdev->dev, sizeof(*mac), GFP_KERNEL);
+ if (!mac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No of match data provided\n");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ /* Get MAC PHY control register */
+ mac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mode-reg");
+ if (IS_ERR(mac->regmap)) {
+ dev_err(&pdev->dev, "%s: Failed to get syscon regmap\n", __func__);
+ ret = PTR_ERR(mac->regmap);
+ goto err_remove_config_dt;
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node, "tx-clk-delay-ps", &tx_delay_ps)) {
+ if (tx_delay_ps >= MACPHYC_TX_DELAY_PS_MIN &&
+ tx_delay_ps <= MACPHYC_TX_DELAY_PS_MAX) {
+ mac->tx_delay = tx_delay_ps * 1000;
+ } else {
+ dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node, "rx-clk-delay-ps", &rx_delay_ps)) {
+ if (rx_delay_ps >= MACPHYC_RX_DELAY_PS_MIN &&
+ rx_delay_ps <= MACPHYC_RX_DELAY_PS_MAX) {
+ mac->rx_delay = rx_delay_ps * 1000;
+ } else {
+ dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ mac->soc_info = data;
+ mac->dev = &pdev->dev;
+
+ plat_dat->bsp_priv = mac;
+
+ ret = ingenic_mac_init(plat_dat);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ingenic_mac_suspend(struct device *dev)
+{
+ int ret;
+
+ ret = stmmac_suspend(dev);
+
+ return ret;
+}
+
+static int ingenic_mac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = ingenic_mac_init(priv->plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_resume(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
+
+static struct ingenic_soc_info jz4775_soc_info = {
+ .version = ID_JZ4775,
+ .mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = jz4775_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1000_soc_info = {
+ .version = ID_X1000,
+ .mask = MACPHYC_SOFT_RST_MASK,
+
+ .set_mode = x1000_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1600_soc_info = {
+ .version = ID_X1600,
+ .mask = MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x1600_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1830_soc_info = {
+ .version = ID_X1830,
+ .mask = MACPHYC_MODE_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x1830_mac_set_mode,
+};
+
+static struct ingenic_soc_info x2000_soc_info = {
+ .version = ID_X2000,
+ .mask = MACPHYC_TX_SEL_MASK | MACPHYC_TX_DELAY_MASK | MACPHYC_RX_SEL_MASK |
+ MACPHYC_RX_DELAY_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x2000_mac_set_mode,
+};
+
+static const struct of_device_id ingenic_mac_of_matches[] = {
+ { .compatible = "ingenic,jz4775-mac", .data = &jz4775_soc_info },
+ { .compatible = "ingenic,x1000-mac", .data = &x1000_soc_info },
+ { .compatible = "ingenic,x1600-mac", .data = &x1600_soc_info },
+ { .compatible = "ingenic,x1830-mac", .data = &x1830_soc_info },
+ { .compatible = "ingenic,x2000-mac", .data = &x2000_soc_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
+
+static struct platform_driver ingenic_mac_driver = {
+ .probe = ingenic_mac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ingenic-mac",
+ .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .of_match_table = ingenic_mac_of_matches,
+ },
+};
+module_platform_driver(ingenic_mac_driver);
+
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic SoCs DWMAC specific glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index e36a8cc59ad0..e0a7d2b17921 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -10,22 +10,6 @@
#include "stmmac.h"
#include "stmmac_ptp.h"
-#define INTEL_MGBE_ADHOC_ADDR 0x15
-#define INTEL_MGBE_XPCS_ADDR 0x16
-
-/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
-#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3)
-#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
-#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3)
-#define PSE_PTP_CLK_FREQ_256MHZ (0)
-#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0)
-#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
-#define PCH_PTP_CLK_FREQ_200MHZ (0)
-
-/* Cross-timestamping defines */
-#define ART_CPUID_LEAF 0x15
-#define EHL_PSE_ART_MHZ 19200000
-
struct intel_priv_data {
int mdio_adhoc_addr; /* mdio address for serdes & etc */
unsigned long crossts_adj;
@@ -102,6 +86,22 @@ static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+ /* Set the serdes rate and the PCLK rate */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data &= ~SERDES_RATE_MASK;
+ data &= ~SERDES_PCLK_MASK;
+
+ if (priv->plat->max_speed == 2500)
+ data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
+ SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
+ else
+ data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
+ SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
+
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
/* assert clk_req */
data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
data |= SERDES_PLL_CLK;
@@ -230,6 +230,32 @@ static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
}
}
+static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int serdes_phy_addr = 0;
+ u32 data = 0;
+
+ serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+
+ /* Determine the link speed mode: 2.5Gbps/1Gbps */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR);
+
+ if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
+ SERDES_LINK_MODE_2G5) {
+ dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
+ priv->plat->max_speed = 2500;
+ priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
+ priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ } else {
+ priv->plat->max_speed = 1000;
+ priv->plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ priv->plat->mdio_bus_data->xpcs_an_inband = true;
+ }
+}
+
/* Program PTP Clock Frequency for different variant of
* Intel mGBE that has slightly different GPO mapping
*/
@@ -568,6 +594,16 @@ static int ehl_common_data(struct pci_dev *pdev,
plat->tx_queues_to_use = 8;
plat->clk_ptp_rate = 200000000;
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 1;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
return intel_mgbe_common_data(pdev, plat);
}
@@ -576,7 +612,7 @@ static int ehl_sgmii_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
-
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
@@ -629,6 +665,7 @@ static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return ehl_pse0_common_data(pdev, plat);
@@ -667,6 +704,7 @@ static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return ehl_pse1_common_data(pdev, plat);
@@ -683,6 +721,16 @@ static int tgl_common_data(struct pci_dev *pdev,
plat->tx_queues_to_use = 4;
plat->clk_ptp_rate = 200000000;
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
return intel_mgbe_common_data(pdev, plat);
}
@@ -691,6 +739,7 @@ static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
{
plat->bus_id = 1;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -705,6 +754,7 @@ static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
{
plat->bus_id = 2;
plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
plat->serdes_powerup = intel_serdes_powerup;
plat->serdes_powerdown = intel_serdes_powerdown;
return tgl_common_data(pdev, plat);
@@ -959,6 +1009,12 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (!plat->dma_cfg)
return -ENOMEM;
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
/* Enable pci device */
ret = pcim_enable_device(pdev);
if (ret) {
@@ -1031,7 +1087,7 @@ err_alloc_irq:
/**
* intel_eth_pci_remove
*
- * @pdev: platform device pointer
+ * @pdev: pci device pointer
* Description: this function calls the main to free the net resources
* and releases the PCI resources.
*/
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
index 542acb8ce467..0a37987478c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -9,6 +9,7 @@
#define POLL_DELAY_US 8
/* SERDES Register */
+#define SERDES_GCR 0x0 /* Global Conguration */
#define SERDES_GSR0 0x5 /* Global Status Reg0 */
#define SERDES_GCR0 0xb /* Global Configuration Reg0 */
@@ -17,8 +18,36 @@
#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */
#define SERDES_RST BIT(2) /* Serdes Reset */
#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/
+#define SERDES_RATE_MASK GENMASK(9, 8)
+#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
+#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
+#define SERDES_LINK_MODE_SHIFT 1
#define SERDES_PWR_ST_SHIFT 4
#define SERDES_PWR_ST_P0 0x0
#define SERDES_PWR_ST_P3 0x3
+#define SERDES_LINK_MODE_2G5 0x3
+#define SERSED_LINK_MODE_1G 0x2
+#define SERDES_PCLK_37p5MHZ 0x0
+#define SERDES_PCLK_70MHZ 0x1
+#define SERDES_RATE_PCIE_GEN1 0x0
+#define SERDES_RATE_PCIE_GEN2 0x1
+#define SERDES_RATE_PCIE_SHIFT 8
+#define SERDES_PCLK_SHIFT 12
+
+#define INTEL_MGBE_ADHOC_ADDR 0x15
+#define INTEL_MGBE_XPCS_ADDR 0x16
+
+/* Cross-timestamping defines */
+#define ART_CPUID_LEAF 0x15
+#define EHL_PSE_ART_MHZ 19200000
+
+/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
+#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_256MHZ (0)
+#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_200MHZ (0)
#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 84382fc5cc4d..5c74b6279d69 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -454,7 +454,6 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct stmmac_resources stmmac_res;
const struct ethqos_emac_driver_data *data;
struct qcom_ethqos *ethqos;
- struct resource *res;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -474,8 +473,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
}
ethqos->pdev = pdev;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii");
- ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res);
+ ethqos->rgmii_base = devm_platform_ioremap_resource_byname(pdev, "rgmii");
if (IS_ERR(ethqos->rgmii_base)) {
ret = PTR_ERR(ethqos->rgmii_base);
goto err_mem;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 527077c98ebc..fc3b0acc8f99 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -30,7 +30,7 @@ struct sunxi_priv_data {
static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
{
struct sunxi_priv_data *gmac = priv;
- int ret;
+ int ret = 0;
if (gmac->regulator) {
ret = regulator_enable(gmac->regulator);
@@ -51,11 +51,11 @@ static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
} else {
clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
ret = clk_prepare(gmac->tx_clk);
- if (ret)
- return ret;
+ if (ret && gmac->regulator)
+ regulator_disable(gmac->regulator);
}
- return 0;
+ return ret;
}
static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index f35c03c9f91e..67ba083eb90c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -1358,6 +1358,7 @@ int dwmac4_setup(struct stmmac_priv *priv)
mac->link.speed10 = GMAC_CONFIG_PS;
mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
mac->link.speed1000 = 0;
+ mac->link.speed2500 = GMAC_CONFIG_FES;
mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
mac->mii.addr = GMAC_MDIO_ADDR;
mac->mii.data = GMAC_MDIO_DATA;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index d8c6ff725237..9c2d40f853ed 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -183,7 +183,8 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
STAT_OFF(dma_errors), stats);
}
-int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_feat_cfg)
{
u32 value;
@@ -193,11 +194,16 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
/* 1. Enable Safety Features */
value = readl(ioaddr + MTL_ECC_CONTROL);
value |= MEEAO; /* MTL ECC Error Addr Status Override */
- value |= TSOEE; /* TSO ECC */
- value |= MRXPEE; /* MTL RX Parser ECC */
- value |= MESTEE; /* MTL EST ECC */
- value |= MRXEE; /* MTL RX FIFO ECC */
- value |= MTXEE; /* MTL TX FIFO ECC */
+ if (safety_feat_cfg->tsoee)
+ value |= TSOEE; /* TSO ECC */
+ if (safety_feat_cfg->mrxpee)
+ value |= MRXPEE; /* MTL RX Parser ECC */
+ if (safety_feat_cfg->mestee)
+ value |= MESTEE; /* MTL EST ECC */
+ if (safety_feat_cfg->mrxee)
+ value |= MRXEE; /* MTL RX FIFO ECC */
+ if (safety_feat_cfg->mtxee)
+ value |= MTXEE; /* MTL TX FIFO ECC */
writel(value, ioaddr + MTL_ECC_CONTROL);
/* 2. Enable MTL Safety Interrupts */
@@ -219,13 +225,16 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
/* 5. Enable Parity and Timeout for FSM */
value = readl(ioaddr + MAC_FSM_CONTROL);
- value |= PRTYEN; /* FSM Parity Feature */
- value |= TMOUTEN; /* FSM Timeout Feature */
+ if (safety_feat_cfg->prtyen)
+ value |= PRTYEN; /* FSM Parity Feature */
+ if (safety_feat_cfg->tmouten)
+ value |= TMOUTEN; /* FSM Timeout Feature */
writel(value, ioaddr + MAC_FSM_CONTROL);
/* 4. Enable Data Parity Protection */
value = readl(ioaddr + MTL_DPP_CONTROL);
- value |= EDPP;
+ if (safety_feat_cfg->edpp)
+ value |= EDPP;
writel(value, ioaddr + MTL_DPP_CONTROL);
/*
@@ -235,7 +244,8 @@ int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
if (asp <= 0x2)
return 0;
- value |= EPSI;
+ if (safety_feat_cfg->epsi)
+ value |= EPSI;
writel(value, ioaddr + MTL_DPP_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 6b2fd37b29ad..53c138d0ff48 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -137,7 +137,8 @@
#define GMAC_INT_FPE_EN BIT(17)
-int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp);
+int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg);
int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_stats *stats);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index ad4df9bddcf3..c4d78fa93663 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -801,7 +801,9 @@ static void dwxgmac3_handle_dma_err(struct net_device *ndev,
dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
}
-static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+static int
+dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg)
{
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 75a8b90c202a..6dc1c98ebec8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -348,7 +348,8 @@ struct stmmac_ops {
void (*pcs_rane)(void __iomem *ioaddr, bool restart);
void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
/* Safety Features */
- int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp);
+ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg);
int (*safety_feat_irq_status)(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
struct stmmac_safety_stats *stats);
@@ -612,20 +613,6 @@ struct stmmac_mmc_ops {
#define stmmac_mmc_read(__priv, __args...) \
stmmac_do_void_callback(__priv, mmc, read, __args)
-/* XPCS callbacks */
-#define stmmac_xpcs_validate(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, validate, __args)
-#define stmmac_xpcs_config(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, config, __args)
-#define stmmac_xpcs_get_state(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, get_state, __args)
-#define stmmac_xpcs_link_up(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, link_up, __args)
-#define stmmac_xpcs_probe(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, probe, __args)
-#define stmmac_xpcs_config_eee(__priv, __args...) \
- stmmac_do_callback(__priv, xpcs, config_eee, __args)
-
struct stmmac_regs_off {
u32 ptp_off;
u32 mmc_off;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b6cd43eda7ac..e735134e8487 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -75,7 +75,7 @@ struct stmmac_tx_queue {
unsigned int cur_tx;
unsigned int dirty_tx;
dma_addr_t dma_tx_phy;
- u32 tx_tail_addr;
+ dma_addr_t tx_tail_addr;
u32 mss;
};
@@ -311,6 +311,7 @@ enum stmmac_state {
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
+int stmmac_xpcs_setup(struct mii_bus *mii);
void stmmac_set_ethtool_ops(struct net_device *netdev);
void stmmac_ptp_register(struct stmmac_priv *priv);
@@ -338,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
{
if (stmmac_xdp_is_enabled(priv))
- return XDP_PACKET_HEADROOM;
+ return XDP_PACKET_HEADROOM + NET_IP_ALIGN;
- return 0;
+ return NET_SKB_PAD + NET_IP_ALIGN;
}
void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 1f6d749fd9a3..d0ce608b81c3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -720,11 +720,13 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
netdev_warn(priv->dev,
"Setting EEE tx-lpi is not supported\n");
- ret = stmmac_xpcs_config_eee(priv, &priv->hw->xpcs_args,
- priv->plat->mult_fact_100ns,
- edata->eee_enabled);
- if (ret)
- return ret;
+ if (priv->hw->xpcs) {
+ ret = xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ edata->eee_enabled);
+ if (ret)
+ return ret;
+ }
if (!edata->eee_enabled)
stmmac_disable_eee_mode(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index bf9fe25fed69..16820873b01d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -931,6 +931,11 @@ static void stmmac_validate(struct phylink_config *config,
if ((max_speed > 0) && (max_speed < 1000)) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
+ } else if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 2500baseX_Full);
+ }
} else if (priv->plat->has_xgmac) {
if (!max_speed || (max_speed >= 2500)) {
phylink_set(mac_supported, 2500baseT_Full);
@@ -996,29 +1001,14 @@ static void stmmac_validate(struct phylink_config *config,
linkmode_andnot(state->advertising, state->advertising, mask);
/* If PCS is supported, check which modes it supports. */
- stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state);
-}
-
-static void stmmac_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-
- state->link = 0;
- stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state);
+ if (priv->hw->xpcs)
+ xpcs_validate(priv->hw->xpcs, supported, state);
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
-
- stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state);
-}
-
-static void stmmac_mac_an_restart(struct phylink_config *config)
-{
- /* Not Supported */
+ /* Nothing to do, xpcs_config() handles everything */
}
static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
@@ -1031,8 +1021,8 @@ static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
if (is_up && *hs_enable) {
stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
} else {
- *lo_state = FPE_EVENT_UNKNOWN;
- *lp_state = FPE_EVENT_UNKNOWN;
+ *lo_state = FPE_STATE_OFF;
+ *lp_state = FPE_STATE_OFF;
}
}
@@ -1060,8 +1050,6 @@ static void stmmac_mac_link_up(struct phylink_config *config,
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
u32 ctrl;
- stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface);
-
ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
ctrl &= ~priv->hw->link.speed_mask;
@@ -1154,9 +1142,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
.validate = stmmac_validate,
- .mac_pcs_get_state = stmmac_mac_pcs_get_state,
.mac_config = stmmac_mac_config,
- .mac_an_restart = stmmac_mac_an_restart,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
};
@@ -1196,7 +1182,6 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
*/
static int stmmac_init_phy(struct net_device *dev)
{
- struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
struct stmmac_priv *priv = netdev_priv(dev);
struct device_node *node;
int ret;
@@ -1222,14 +1207,19 @@ static int stmmac_init_phy(struct net_device *dev)
ret = phylink_connect_phy(priv->phylink, phydev);
}
- phylink_ethtool_get_wol(priv->phylink, &wol);
- device_set_wakeup_capable(priv->device, !!wol.supported);
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ }
return ret;
}
static int stmmac_phy_setup(struct stmmac_priv *priv)
{
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
int mode = priv->plat->phy_interface;
struct phylink *phylink;
@@ -1237,8 +1227,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.pcs_poll = true;
- priv->phylink_config.ovr_an_inband =
- priv->plat->mdio_bus_data->xpcs_an_inband;
+ if (priv->plat->mdio_bus_data)
+ priv->phylink_config.ovr_an_inband =
+ mdio_bus_data->xpcs_an_inband;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -1248,6 +1239,9 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (IS_ERR(phylink))
return PTR_ERR(phylink);
+ if (priv->hw->xpcs)
+ phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
+
priv->phylink = phylink;
return 0;
}
@@ -3169,7 +3163,8 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
{
if (priv->dma_cap.asp) {
netdev_info(priv->dev, "Enabling Safety Features\n");
- stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp);
+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
+ priv->plat->safety_feat_cfg);
} else {
netdev_info(priv->dev, "No Safety Features support found\n");
}
@@ -3411,8 +3406,8 @@ static void stmmac_free_irq(struct net_device *dev,
static int stmmac_request_irq_multi_msi(struct net_device *dev)
{
- enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
cpumask_t cpu_mask;
int irq_idx = 0;
char *int_name;
@@ -3559,8 +3554,8 @@ irq_error:
static int stmmac_request_irq_single(struct net_device *dev)
{
- enum request_irq_err irq_err = REQ_IRQ_ERR_NO;
struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
int ret;
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -3570,7 +3565,7 @@ static int stmmac_request_irq_single(struct net_device *dev)
"%s: ERROR: allocating the IRQ %d (error: %d)\n",
__func__, dev->irq, ret);
irq_err = REQ_IRQ_ERR_MAC;
- return ret;
+ goto irq_error;
}
/* Request the Wake IRQ in case of another line
@@ -3584,7 +3579,7 @@ static int stmmac_request_irq_single(struct net_device *dev)
"%s: ERROR: allocating the WoL IRQ %d (%d)\n",
__func__, priv->wol_irq, ret);
irq_err = REQ_IRQ_ERR_WOL;
- return ret;
+ goto irq_error;
}
}
@@ -3634,6 +3629,7 @@ static int stmmac_request_irq(struct net_device *dev)
int stmmac_open(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
int bfsize = 0;
u32 chan;
int ret;
@@ -3646,7 +3642,8 @@ int stmmac_open(struct net_device *dev)
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
- priv->hw->xpcs_args.an_mode != DW_AN_C73) {
+ (!priv->hw->xpcs ||
+ xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
@@ -5134,7 +5131,7 @@ read_again:
/* Buffer is good. Go on. */
- prefetch(page_address(buf->page));
+ prefetch(page_address(buf->page) + buf->page_offset);
if (buf->sec_page)
prefetch(page_address(buf->sec_page));
@@ -5885,12 +5882,21 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
ret = eth_mac_addr(ndev, addr);
if (ret)
- return ret;
+ goto set_mac_error;
stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+set_mac_error:
+ pm_runtime_put(priv->device);
+
return ret;
}
@@ -6185,12 +6191,6 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
- return ret;
- }
-
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6216,6 +6216,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6838,6 +6844,11 @@ int stmmac_dvr_probe(struct device *device,
reset_control_reset(priv->plat->stmmac_rst);
}
+ ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
+ if (ret == -ENOTSUPP)
+ dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ ERR_PTR(ret));
+
/* Init MAC and get the capabilities */
ret = stmmac_hw_init(priv);
if (ret)
@@ -6989,6 +7000,15 @@ int stmmac_dvr_probe(struct device *device,
}
}
+ if (priv->plat->speed_mode_2500)
+ priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
+
+ if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
+ ret = stmmac_xpcs_setup(priv->mii);
+ if (ret)
+ goto error_xpcs_setup;
+ }
+
ret = stmmac_phy_setup(priv);
if (ret) {
netdev_err(ndev, "failed to setup phy (%d)\n", ret);
@@ -7025,6 +7045,7 @@ error_serdes_powerup:
unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
+error_xpcs_setup:
error_phy_setup:
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
@@ -7033,7 +7054,6 @@ error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
- stmmac_bus_clks_config(priv, false);
bitmap_free(priv->af_xdp_zc_qps);
return ret;
@@ -7070,6 +7090,7 @@ int stmmac_dvr_remove(struct device *dev)
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_assert(priv->plat->stmmac_ahb_rst);
pm_runtime_put(dev);
pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b750074f8f9c..a5d150c5f3d8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -397,6 +397,41 @@ int stmmac_mdio_reset(struct mii_bus *bus)
return 0;
}
+int stmmac_xpcs_setup(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ struct mdio_device *mdiodev;
+ struct stmmac_priv *priv;
+ struct dw_xpcs *xpcs;
+ int mode, addr;
+
+ priv = netdev_priv(ndev);
+ mode = priv->plat->phy_interface;
+
+ /* Try to probe the XPCS by scanning all addresses. */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ mdiodev = mdio_device_create(bus, addr);
+ if (IS_ERR(mdiodev))
+ continue;
+
+ xpcs = xpcs_create(mdiodev, mode);
+ if (IS_ERR_OR_NULL(xpcs)) {
+ mdio_device_free(mdiodev);
+ continue;
+ }
+
+ priv->hw->xpcs = xpcs;
+ break;
+ }
+
+ if (!priv->hw->xpcs) {
+ dev_warn(priv->device, "No xPCS found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
/**
* stmmac_mdio_register
* @ndev: net device structure
@@ -444,14 +479,6 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
- if (mdio_bus_data->has_xpcs) {
- priv->hw->xpcs = mdio_xpcs_get_ops();
- if (!priv->hw->xpcs) {
- err = -ENODEV;
- goto bus_register_fail;
- }
- }
-
if (mdio_bus_data->needs_reset)
new_bus->reset = &stmmac_mdio_reset;
@@ -503,30 +530,10 @@ int stmmac_mdio_register(struct net_device *ndev)
found = 1;
}
- /* Try to probe the XPCS by scanning all addresses. */
- if (priv->hw->xpcs) {
- struct mdio_xpcs_args *xpcs = &priv->hw->xpcs_args;
- int ret, mode = priv->plat->phy_interface;
- max_addr = PHY_MAX_ADDR;
-
- xpcs->bus = new_bus;
-
- for (addr = 0; addr < max_addr; addr++) {
- xpcs->addr = addr;
-
- ret = stmmac_xpcs_probe(priv, xpcs, mode);
- if (!ret) {
- found = 1;
- break;
- }
- }
- }
-
if (!found && !mdio_node) {
dev_warn(dev, "No PHY found\n");
- mdiobus_unregister(new_bus);
- mdiobus_free(new_bus);
- return -ENODEV;
+ err = -ENODEV;
+ goto no_phy_found;
}
bus_register_done:
@@ -534,6 +541,8 @@ bus_register_done:
return 0;
+no_phy_found:
+ mdiobus_unregister(new_bus);
bus_register_fail:
mdiobus_free(new_bus);
return err;
@@ -551,6 +560,11 @@ int stmmac_mdio_unregister(struct net_device *ndev)
if (!priv->mii)
return 0;
+ if (priv->hw->xpcs) {
+ mdio_device_free(priv->hw->xpcs->mdiodev);
+ xpcs_destroy(priv->hw->xpcs);
+ }
+
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 95e0e4d6f74d..fcf17d8a0494 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -174,6 +174,12 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
if (!plat->dma_cfg)
return -ENOMEM;
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
/* Enable pci device */
ret = pci_enable_device(pdev);
if (ret) {
@@ -203,6 +209,16 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
res.wol_irq = pdev->irq;
res.irq = pdev->irq;
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 1;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 1;
+ plat->safety_feat_cfg->edpp = 1;
+ plat->safety_feat_cfg->prtyen = 1;
+ plat->safety_feat_cfg->tmouten = 1;
+
return stmmac_dvr_probe(&pdev->dev, plat, &res);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 97a1fedcc9ac..d8ae58bdbbe3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -600,6 +600,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
goto error_hw_init;
}
+ plat->stmmac_ahb_rst = devm_reset_control_get_optional_shared(
+ &pdev->dev, "ahb");
+ if (IS_ERR(plat->stmmac_ahb_rst)) {
+ ret = plat->stmmac_ahb_rst;
+ goto error_hw_init;
+ }
+
return plat;
error_hw_init:
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 707ccdd03b19..74e748662ec0 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8144,10 +8144,10 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
"VPD_SCAN: Reading in property [%s] len[%d]\n",
namebuf, prop_len);
for (i = 0; i < prop_len; i++) {
- err = niu_pci_eeprom_read(np, off + i);
- if (err >= 0)
- *prop_buf = err;
- ++prop_buf;
+ err = niu_pci_eeprom_read(np, off + i);
+ if (err < 0)
+ return err;
+ *prop_buf++ = err;
}
}
@@ -8158,14 +8158,14 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
}
/* ESPC_PIO_EN_ENABLE must be set */
-static void niu_pci_vpd_fetch(struct niu *np, u32 start)
+static int niu_pci_vpd_fetch(struct niu *np, u32 start)
{
u32 offset;
int err;
err = niu_pci_eeprom_read16_swp(np, start + 1);
if (err < 0)
- return;
+ return err;
offset = err + 3;
@@ -8174,12 +8174,14 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
u32 end;
err = niu_pci_eeprom_read(np, here);
+ if (err < 0)
+ return err;
if (err != 0x90)
- return;
+ return -EINVAL;
err = niu_pci_eeprom_read16_swp(np, here + 1);
if (err < 0)
- return;
+ return err;
here = start + offset + 3;
end = start + offset + err;
@@ -8187,9 +8189,12 @@ static void niu_pci_vpd_fetch(struct niu *np, u32 start)
offset += err;
err = niu_pci_vpd_scan_props(np, here, end);
- if (err < 0 || err == 1)
- return;
+ if (err < 0)
+ return err;
+ if (err == 1)
+ return -EINVAL;
}
+ return 0;
}
/* ESPC_PIO_EN_ENABLE must be set */
@@ -9280,8 +9285,11 @@ static int niu_get_invariants(struct niu *np)
offset = niu_pci_vpd_offset(np);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"%s() VPD offset [%08x]\n", __func__, offset);
- if (offset)
- niu_pci_vpd_fetch(np, offset);
+ if (offset) {
+ err = niu_pci_vpd_fetch(np, offset);
+ if (err < 0)
+ return err;
+ }
nw64(ESPC_PIO_EN, 0);
if (np->flags & NIU_FLAGS_VPD_VALID) {
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 9caaae79fc95..c30a6e510aa3 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -1037,11 +1037,9 @@ static int am65_cpts_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct am65_cpts *cpts;
- struct resource *res;
void __iomem *base;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource_byname(pdev, "cpts");
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 6e72ecbe5cf7..e8f38e3f7706 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -206,7 +206,6 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
static int cpsw_phy_sel_probe(struct platform_device *pdev)
{
- struct resource *res;
const struct of_device_id *of_id;
struct cpsw_phy_sel_priv *priv;
@@ -223,8 +222,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->cpsw_phy_sel = of_id->data;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
- priv->gmii_sel = devm_ioremap_resource(&pdev->dev, res);
+ priv->gmii_sel = devm_platform_ioremap_resource_byname(pdev, "gmii-sel");
if (IS_ERR(priv->gmii_sel))
return PTR_ERR(priv->gmii_sel);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c0cd7de88316..cbbd0f665796 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -430,8 +430,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
- /* unmap page as no netstack skb page recycling */
- page_pool_release_page(pool, page);
+ /* mark skb for recycling */
+ skb_mark_for_recycle(skb, page, pool);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1532,8 +1532,7 @@ static int cpsw_probe(struct platform_device *pdev)
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
- ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ss_regs = devm_ioremap_resource(dev, ss_res);
+ ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
if (IS_ERR(ss_regs))
return PTR_ERR(ss_regs);
cpsw->regs = ss_regs;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 69b7a4e0220a..57d279fdcc9f 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -373,8 +373,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
- /* unmap page as no netstack skb page recycling */
- page_pool_release_page(pool, page);
+ /* mark skb for recycling */
+ skb_mark_for_recycle(skb, page, pool);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1883,8 +1883,7 @@ static int cpsw_probe(struct platform_device *pdev)
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
- ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ss_regs = devm_ioremap_resource(dev, ss_res);
+ ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
if (IS_ERR(ss_regs)) {
ret = PTR_ERR(ss_regs);
return ret;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f9417b44cae8..c674e34b6839 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1814,13 +1814,12 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
/* Get EMAC platform data */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
- priv->remap_addr = devm_ioremap_resource(&pdev->dev, res);
+ priv->remap_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->remap_addr)) {
rc = PTR_ERR(priv->remap_addr);
goto no_pdata;
}
+ priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res_ctrl) {
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 9030e619e543..97942b0e3897 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1350,8 +1350,8 @@ int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
KNAV_QUEUE_SHARED);
if (IS_ERR(tx_pipe->dma_queue)) {
- dev_err(dev, "Could not open DMA queue for channel \"%s\": %d\n",
- name, ret);
+ dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
+ name, tx_pipe->dma_queue);
ret = PTR_ERR(tx_pipe->dma_queue);
goto err;
}
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index ec5db481c9cd..811815f8cd3b 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -263,19 +263,14 @@ static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
static int w5100_mmio_init(struct net_device *ndev)
{
struct platform_device *pdev = to_platform_device(ndev->dev.parent);
- struct w5100_priv *priv = netdev_priv(ndev);
struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
- struct resource *mem;
spin_lock_init(&mmio_priv->reg_lock);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ mmio_priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mmio_priv->base))
return PTR_ERR(mmio_priv->base);
- netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
-
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b508c9453f40..13cd799541aa 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1543,6 +1543,7 @@ static void axienet_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_MII:
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 10baseT_Full);
+ fallthrough;
default:
break;
}
@@ -1893,8 +1894,7 @@ static int axienet_probe(struct platform_device *pdev)
goto cleanup_clk;
/* Map device registers */
- ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+ lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
if (IS_ERR(lp->regs)) {
ret = PTR_ERR(lp->regs);
goto cleanup_clk;
@@ -2009,9 +2009,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->eth_irq = platform_get_irq_optional(pdev, 0);
} else {
/* Check for these resources directly on the Ethernet node. */
- struct resource *res = platform_get_resource(pdev,
- IORESOURCE_MEM, 1);
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
+ lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
lp->rx_irq = platform_get_irq(pdev, 1);
lp->tx_irq = platform_get_irq(pdev, 0);
lp->eth_irq = platform_get_irq_optional(pdev, 2);
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 1ecceeb9700d..85c66af9e56d 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1425,7 +1425,6 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct eth_plat_info *plat;
struct net_device *ndev;
- struct resource *res;
struct port *port;
int err;
@@ -1482,10 +1481,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->id = plat->npe;
/* Get the port resource and remap */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- port->regs = devm_ioremap_resource(dev, res);
+ port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(port->regs))
return PTR_ERR(port->regs);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 466622664424..185c8a398681 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -90,16 +90,8 @@ static struct platform_driver fjes_driver = {
};
static struct resource fjes_resource[] = {
- {
- .flags = IORESOURCE_MEM,
- .start = 0,
- .end = 0,
- },
- {
- .flags = IORESOURCE_IRQ,
- .start = 0,
- .end = 0,
- },
+ DEFINE_RES_MEM(0, 1),
+ DEFINE_RES_IRQ(0)
};
static bool is_extended_socket_device(struct acpi_device *device)
@@ -1262,6 +1254,10 @@ static int fjes_probe(struct platform_device *plat_dev)
adapter->interrupt_watch_enable = false;
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_free_control_wq;
+ }
hw->hw_res.start = res->start;
hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 39c00f050fbd..1c9023d47e00 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -436,7 +436,7 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
gtp1->length = htons(payload_len);
gtp1->tid = htonl(pctx->u.v1.o_tei);
- /* TODO: Suppport for extension header, sequence number and N-PDU.
+ /* TODO: Support for extension header, sequence number and N-PDU.
* Update the length field if any of them is available.
*/
}
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 80f41945709f..a15cc5e50290 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -716,11 +716,11 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
err = 0;
break;
- case SIOCSIFHWADDR: {
- char addr[AX25_ADDR_LEN];
+ case SIOCSIFHWADDR: {
+ char addr[AX25_ADDR_LEN];
- if (copy_from_user(&addr,
- (void __user *) arg, AX25_ADDR_LEN)) {
+ if (copy_from_user(&addr,
+ (void __user *)arg, AX25_ADDR_LEN)) {
err = -EFAULT;
break;
}
@@ -728,11 +728,9 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
netif_tx_lock_bh(dev);
memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
-
err = 0;
break;
}
-
default:
err = tty_mode_ioctl(tty, file, cmd, arg);
}
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 1ad6085994b1..0e623c2e8b2d 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -368,7 +368,7 @@ static int bpq_close(struct net_device *dev)
/* ------------------------------------------------------------------------ */
-
+#ifdef CONFIG_PROC_FS
/*
* Proc filesystem
*/
@@ -440,7 +440,7 @@ static const struct seq_operations bpq_seqops = {
.stop = bpq_seq_stop,
.show = bpq_seq_show,
};
-
+#endif
/* ------------------------------------------------------------------------ */
static const struct net_device_ops bpq_netdev_ops = {
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index b9be530b285f..ff83e00b77af 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -8,8 +8,8 @@
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/ieee802154.h>
#include <linux/irq.h>
@@ -1388,7 +1388,7 @@ MODULE_DEVICE_TABLE(spi, mrf24j40_ids);
static struct spi_driver mrf24j40_driver = {
.driver = {
- .of_match_table = of_match_ptr(mrf24j40_of_match),
+ .of_match_table = mrf24j40_of_match,
.name = "mrf24j40",
},
.id_table = mrf24j40_ids,
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 1efe1a88104b..bd34fce8f6e6 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -7,7 +7,8 @@ ipa-y := ipa_main.o ipa_clock.o ipa_reg.o ipa_mem.o \
ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
ipa_gsi.o ipa_smp2p.o ipa_uc.o \
ipa_endpoint.o ipa_cmd.o ipa_modem.o \
- ipa_resource.o ipa_qmi.o ipa_qmi_msg.o
+ ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
+ ipa_sysfs.o
ipa-y += ipa_data-v3.5.1.o ipa_data-v4.2.o \
ipa_data-v4.5.o ipa_data-v4.9.o \
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index e7ff376cb5b7..744406832a77 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -58,6 +58,7 @@ enum ipa_flag {
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
* @mem_size: Total size (bytes) of memory at @mem_virt
+ * @mem_count: Number of entries in the mem array
* @mem: Array of IPA-local memory region descriptors
* @imem_iova: I/O virtual address of IPA region in IMEM
* @imem_size: Size of IMEM region
@@ -103,6 +104,7 @@ struct ipa {
void *mem_virt;
u32 mem_offset;
u32 mem_size;
+ u32 mem_count;
const struct ipa_mem *mem;
unsigned long imem_iova;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 525cdf28d9ea..af44ca41189e 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -200,41 +200,55 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
/* Validate the memory region that holds headers */
static bool ipa_cmd_header_valid(struct ipa *ipa)
{
- const struct ipa_mem *mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_mem *mem;
u32 offset_max;
u32 size_max;
+ u32 offset;
u32 size;
- /* In ipa_cmd_hdr_init_local_add() we record the offset and size
- * of the header table memory area. Make sure the offset and size
- * fit in the fields that need to hold them, and that the entire
- * range is within the overall IPA memory range.
+ /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
+ * the header table memory area in an immediate command. Make sure
+ * the offset and size fit in the fields that need to hold them, and
+ * that the entire range is within the overall IPA memory range.
*/
offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
- if (mem->offset > offset_max ||
- ipa->mem_offset > offset_max - mem->offset) {
+ size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
+
+ /* The header memory area contains both the modem and AP header
+ * regions. The modem portion defines the address of the region.
+ */
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+
+ /* Make sure the offset fits in the IPA command */
+ if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "header table region offset too large\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- ipa->mem_offset, mem->offset, offset_max);
+ ipa->mem_offset, offset, offset_max);
return false;
}
- size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
- size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
- size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ /* Add the size of the AP portion (if defined) to the combined size */
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
+ /* Make sure the combined size fits in the IPA command */
if (size > size_max) {
dev_err(dev, "header table region size too large\n");
dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
return false;
}
- if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
+
+ /* Make sure the entire combined area fits in IPA memory */
+ if (size > ipa->mem_size || offset > ipa->mem_size - size) {
dev_err(dev, "header table region out of range\n");
dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
- mem->offset, size, ipa->mem_size);
+ offset, size, ipa->mem_size);
return false;
}
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/ipa_data-v3.5.1.c
index ead1a82f32f5..af536ef8c120 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/ipa_data-v3.5.1.c
@@ -271,77 +271,92 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v3.5.1 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x07d0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x09d0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x0bd8,
.size = 0x1024,
.canary_count = 0,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x1c00,
.size = 0x0400,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/ipa_data-v4.11.c
index 05806ceae8b5..9353efbd504f 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/ipa_data-v4.11.c
@@ -220,112 +220,134 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.11 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x0cd0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x0ee0,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x1be8,
.size = 0x0050,
.canary_count = 0,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x1c40,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x1c70,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x1cb8,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x1ef0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x1f18,
.size = 0x100c,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_END_MARKER,
.offset = 0x3000,
.size = 0x0000,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/ipa_data-v4.2.c
index 8744f19c6401..3b09b7baa95f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/ipa_data-v4.2.c
@@ -219,92 +219,110 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.2 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0290,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0310,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0318,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0398,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x03a0,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0420,
.size = 0,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0428,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x04a8,
.size = 0x0140,
.canary_count = 2,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x05f0,
.size = 0x0200,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x07f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x09f8,
.size = 0x0050,
.canary_count = 2,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x0a50,
.size = 0x0060,
.canary_count = 2,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x0ab0,
.size = 0x0140,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x0bf0,
.size = 0x140c,
.canary_count = 0,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_END_MARKER,
.offset = 0x2000,
.size = 0,
.canary_count = 1,
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/ipa_data-v4.5.c
index 5f67a3a909ee..a99b6478fa3a 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/ipa_data-v4.5.c
@@ -265,117 +265,140 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.5 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
.offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_FILTER_ROUTE] = {
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/ipa_data-v4.9.c
index e41be790f45e..798d43e1eb13 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/ipa_data-v4.9.c
@@ -263,116 +263,140 @@ static const struct ipa_resource_data ipa_resource_data = {
/* IPA-resident memory region data for an SoC having IPA v4.9 */
static const struct ipa_mem ipa_mem_local_data[] = {
- [IPA_MEM_UC_SHARED] = {
+ {
+ .id = IPA_MEM_UC_SHARED,
.offset = 0x0000,
.size = 0x0080,
.canary_count = 0,
},
- [IPA_MEM_UC_INFO] = {
+ {
+ .id = IPA_MEM_UC_INFO,
.offset = 0x0080,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_V4_FILTER_HASHED] = { .offset = 0x0288,
+ {
+ .id = IPA_MEM_V4_FILTER_HASHED,
+ .offset = 0x0288,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_FILTER] = {
+ {
+ .id = IPA_MEM_V4_FILTER,
.offset = 0x0308,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER_HASHED] = {
+ {
+ .id = IPA_MEM_V6_FILTER_HASHED,
.offset = 0x0388,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_FILTER] = {
+ {
+ .id = IPA_MEM_V6_FILTER,
.offset = 0x0408,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V4_ROUTE_HASHED,
.offset = 0x0488,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V4_ROUTE] = {
+ {
+ .id = IPA_MEM_V4_ROUTE,
.offset = 0x0508,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE_HASHED] = {
+ {
+ .id = IPA_MEM_V6_ROUTE_HASHED,
.offset = 0x0588,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_V6_ROUTE] = {
+ {
+ .id = IPA_MEM_V6_ROUTE,
.offset = 0x0608,
.size = 0x0078,
.canary_count = 2,
},
- [IPA_MEM_MODEM_HEADER] = {
+ {
+ .id = IPA_MEM_MODEM_HEADER,
.offset = 0x0688,
.size = 0x0240,
.canary_count = 2,
},
- [IPA_MEM_AP_HEADER] = {
+ {
+ .id = IPA_MEM_AP_HEADER,
.offset = 0x08c8,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_MODEM_PROC_CTX] = {
+ {
+ .id = IPA_MEM_MODEM_PROC_CTX,
.offset = 0x0ad0,
.size = 0x0b20,
.canary_count = 2,
},
- [IPA_MEM_AP_PROC_CTX] = {
+ {
+ .id = IPA_MEM_AP_PROC_CTX,
.offset = 0x15f0,
.size = 0x0200,
.canary_count = 0,
},
- [IPA_MEM_NAT_TABLE] = {
+ {
+ .id = IPA_MEM_NAT_TABLE,
.offset = 0x1800,
.size = 0x0d00,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_MODEM] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_MODEM,
.offset = 0x2510,
.size = 0x0030,
.canary_count = 4,
},
- [IPA_MEM_STATS_QUOTA_AP] = {
+ {
+ .id = IPA_MEM_STATS_QUOTA_AP,
.offset = 0x2540,
.size = 0x0048,
.canary_count = 0,
},
- [IPA_MEM_STATS_TETHERING] = {
+ {
+ .id = IPA_MEM_STATS_TETHERING,
.offset = 0x2588,
.size = 0x0238,
.canary_count = 0,
},
- [IPA_MEM_STATS_FILTER_ROUTE] = {
+ {
+ .id = IPA_MEM_STATS_FILTER_ROUTE,
.offset = 0x27c0,
.size = 0x0800,
.canary_count = 0,
},
- [IPA_MEM_STATS_DROP] = {
+ {
+ .id = IPA_MEM_STATS_DROP,
.offset = 0x2fc0,
.size = 0x0020,
.canary_count = 0,
},
- [IPA_MEM_MODEM] = {
+ {
+ .id = IPA_MEM_MODEM,
.offset = 0x2fe8,
.size = 0x0800,
.canary_count = 2,
},
- [IPA_MEM_UC_EVENT_RING] = {
+ {
+ .id = IPA_MEM_UC_EVENT_RING,
.offset = 0x3800,
.size = 0x1000,
.canary_count = 1,
},
- [IPA_MEM_PDN_CONFIG] = {
+ {
+ .id = IPA_MEM_PDN_CONFIG,
.offset = 0x4800,
.size = 0x0050,
.canary_count = 0,
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index ccc99ad983eb..3520852936ed 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -75,8 +75,6 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
-#ifdef IPA_VALIDATE
-
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -88,11 +86,6 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (ipa_gsi_endpoint_data_empty(data))
return true;
- /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
- if (ipa->version >= IPA_VERSION_4_5)
- if (data->endpoint.config.checksum)
- return false;
-
if (!data->toward_ipa) {
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
@@ -230,27 +223,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return true;
}
-#else /* !IPA_VALIDATE */
-
-static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
- const struct ipa_gsi_endpoint_data *data)
-{
- const struct ipa_gsi_endpoint_data *dp = data;
- enum ipa_endpoint_name name;
-
- if (ipa->version < IPA_VERSION_4_5)
- return true;
-
- /* IPA v4.5+ uses checksum offload, not yet supported by RMNet */
- for (name = 0; name < count; name++, dp++)
- if (data->endpoint.config.checksum)
- return false;
-
- return true;
-}
-
-#endif /* !IPA_VALIDATE */
-
/* Allocate a transaction to use on a non-command endpoint */
static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
u32 tre_count)
@@ -457,28 +429,34 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ enum ipa_cs_offload_en enabled;
u32 val = 0;
/* FRAG_OFFLOAD_EN is 0 */
if (endpoint->data->checksum) {
+ enum ipa_version version = endpoint->ipa->version;
+
if (endpoint->toward_ipa) {
u32 checksum_offset;
- val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
- CS_OFFLOAD_EN_FMASK);
/* Checksum header offset is in 4-byte units */
checksum_offset = sizeof(struct rmnet_map_header);
checksum_offset /= sizeof(u32);
val |= u32_encode_bits(checksum_offset,
CS_METADATA_HDR_OFFSET_FMASK);
+
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_UL
+ : IPA_CS_OFFLOAD_INLINE;
} else {
- val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
- CS_OFFLOAD_EN_FMASK);
+ enabled = version < IPA_VERSION_4_5
+ ? IPA_CS_OFFLOAD_DL
+ : IPA_CS_OFFLOAD_INLINE;
}
} else {
- val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
- CS_OFFLOAD_EN_FMASK);
+ enabled = IPA_CS_OFFLOAD_NONE;
}
+ val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
/* CS_GEN_QMB_MASTER_SEL is 0 */
iowrite32(val, endpoint->ipa->reg_virt + offset);
@@ -498,6 +476,27 @@ static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
+static u32
+ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
+{
+ u32 header_size = sizeof(struct rmnet_map_header);
+
+ /* Without checksum offload, we just have the MAP header */
+ if (!endpoint->data->checksum)
+ return header_size;
+
+ if (version < IPA_VERSION_4_5) {
+ /* Checksum header inserted for AP TX endpoints only */
+ if (endpoint->toward_ipa)
+ header_size += sizeof(struct rmnet_map_ul_csum_header);
+ } else {
+ /* Checksum header is used in both directions */
+ header_size += sizeof(struct rmnet_map_v5_csum_header);
+ }
+
+ return header_size;
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -526,13 +525,11 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
u32 val = 0;
if (endpoint->data->qmap) {
- size_t header_size = sizeof(struct rmnet_map_header);
enum ipa_version version = ipa->version;
+ size_t header_size;
- /* We might supply a checksum header after the QMAP header */
- if (endpoint->toward_ipa && endpoint->data->checksum)
- header_size += sizeof(struct rmnet_map_ul_csum_header);
- val |= ipa_header_size_encoded(version, header_size);
+ header_size = ipa_qmap_header_size(version, endpoint);
+ val = ipa_header_size_encoded(version, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 9915603ed10b..f82130db32f6 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -31,6 +31,7 @@
#include "ipa_uc.h"
#include "ipa_interrupt.h"
#include "gsi_trans.h"
+#include "ipa_sysfs.h"
/**
* DOC: The IP Accelerator
@@ -529,6 +530,7 @@ static int ipa_firmware_load(struct device *dev)
}
ret = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
if (ret) {
dev_err(dev, "error %d getting \"memory-region\" resource\n",
ret);
@@ -639,6 +641,27 @@ static void ipa_validate_build(void)
#endif /* IPA_VALIDATE */
}
+static bool ipa_version_valid(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_0:
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_0:
+ case IPA_VERSION_4_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
/**
* ipa_probe() - IPA platform driver probe function
* @pdev: Platform device pointer
@@ -676,11 +699,15 @@ static int ipa_probe(struct platform_device *pdev)
/* Get configuration data early; needed for clock initialization */
data = of_device_get_match_data(dev);
if (!data) {
- /* This is really IPA_VALIDATE (should never happen) */
dev_err(dev, "matched hardware not supported\n");
return -ENODEV;
}
+ if (!ipa_version_valid(data->version)) {
+ dev_err(dev, "invalid IPA version\n");
+ return -EINVAL;
+ }
+
/* If we need Trust Zone, make sure it's available */
modem_init = of_property_read_bool(dev->of_node, "modem-init");
if (!modem_init)
@@ -881,6 +908,13 @@ static const struct dev_pm_ops ipa_pm_ops = {
.resume = ipa_resume,
};
+static const struct attribute_group *ipa_attribute_groups[] = {
+ &ipa_attribute_group,
+ &ipa_feature_attribute_group,
+ &ipa_modem_attribute_group,
+ NULL,
+};
+
static struct platform_driver ipa_driver = {
.probe = ipa_probe,
.remove = ipa_remove,
@@ -889,6 +923,7 @@ static struct platform_driver ipa_driver = {
.name = "ipa",
.pm = &ipa_pm_ops,
.of_match_table = ipa_match,
+ .dev_groups = ipa_attribute_groups,
},
};
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index c5c3b1b7e67d..4337b0920d3d 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -26,11 +26,26 @@
/* SMEM host id representing the modem. */
#define QCOM_SMEM_HOST_MODEM 1
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->id == mem_id)
+ return mem;
+ }
+
+ return NULL;
+}
+
/* Add an immediate command to a transaction that zeroes a memory region */
static void
-ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
+ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr = ipa->zero_addr;
if (!mem->size)
@@ -60,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, const struct ipa_mem *mem)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
u16 size;
@@ -74,39 +90,136 @@ int ipa_mem_setup(struct ipa *ipa)
return -EBUSY;
}
- /* Initialize IPA-local header memory. The modem and AP header
- * regions are contiguous, and initialized together.
+ /* Initialize IPA-local header memory. The AP header region, if
+ * present, is contiguous with and follows the modem header region,
+ * and they are initialized together.
*/
- offset = ipa->mem[IPA_MEM_MODEM_HEADER].offset;
- size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
- size += ipa->mem[IPA_MEM_AP_HEADER].size;
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
+ offset = mem->offset;
+ size = mem->size;
+ mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
+ if (mem)
+ size += mem->size;
ipa_cmd_hdr_init_local_add(trans, offset, size, addr);
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_AP_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_AP_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
/* Tell the hardware where the processing context area is located */
- offset = ipa->mem_offset + ipa->mem[IPA_MEM_MODEM_PROC_CTX].offset;
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
+ offset = ipa->mem_offset + mem->offset;
val = proc_cntxt_base_addr_encoded(ipa->version, offset);
iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
return 0;
}
-#ifdef IPA_VALIDATE
+/* Is the given memory region ID is valid for the current IPA version? */
+static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ enum ipa_version version = ipa->version;
+
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_AP_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ case IPA_MEM_UC_EVENT_RING:
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ case IPA_MEM_STATS_QUOTA_AP:
+ case IPA_MEM_END_MARKER: /* pseudo region */
+ break;
+
+ case IPA_MEM_STATS_TETHERING:
+ case IPA_MEM_STATS_DROP:
+ if (version < IPA_VERSION_4_0)
+ return false;
+ break;
+
+ case IPA_MEM_STATS_V4_FILTER:
+ case IPA_MEM_STATS_V6_FILTER:
+ case IPA_MEM_STATS_V4_ROUTE:
+ case IPA_MEM_STATS_V6_ROUTE:
+ if (version < IPA_VERSION_4_0 || version > IPA_VERSION_4_2)
+ return false;
+ break;
+
+ case IPA_MEM_NAT_TABLE:
+ case IPA_MEM_STATS_FILTER_ROUTE:
+ if (version < IPA_VERSION_4_5)
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/* Must the given memory region be present in the configuration? */
+static bool ipa_mem_id_required(struct ipa *ipa, enum ipa_mem_id mem_id)
+{
+ switch (mem_id) {
+ case IPA_MEM_UC_SHARED:
+ case IPA_MEM_UC_INFO:
+ case IPA_MEM_V4_FILTER_HASHED:
+ case IPA_MEM_V4_FILTER:
+ case IPA_MEM_V6_FILTER_HASHED:
+ case IPA_MEM_V6_FILTER:
+ case IPA_MEM_V4_ROUTE_HASHED:
+ case IPA_MEM_V4_ROUTE:
+ case IPA_MEM_V6_ROUTE_HASHED:
+ case IPA_MEM_V6_ROUTE:
+ case IPA_MEM_MODEM_HEADER:
+ case IPA_MEM_MODEM_PROC_CTX:
+ case IPA_MEM_AP_PROC_CTX:
+ case IPA_MEM_MODEM:
+ return true;
+
+ case IPA_MEM_PDN_CONFIG:
+ case IPA_MEM_STATS_QUOTA_MODEM:
+ case IPA_MEM_STATS_TETHERING:
+ return ipa->version >= IPA_VERSION_4_0;
-static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+ default:
+ return false; /* Anything else is optional */
+ }
+}
+
+static bool ipa_mem_valid_one(struct ipa *ipa, const struct ipa_mem *mem)
{
- const struct ipa_mem *mem = &ipa->mem[mem_id];
struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id = mem->id;
u16 size_multiple;
+ /* Make sure the memory region is valid for this version of IPA */
+ if (!ipa_mem_id_valid(ipa, mem_id)) {
+ dev_err(dev, "region id %u not valid\n", mem_id);
+ return false;
+ }
+
+ if (!mem->size && !mem->canary_count) {
+ dev_err(dev, "empty memory region %u\n", mem_id);
+ return false;
+ }
+
/* Other than modem memory, sizes must be a multiple of 8 */
size_multiple = mem_id == IPA_MEM_MODEM ? 4 : 8;
if (mem->size % size_multiple)
@@ -117,23 +230,74 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
else if (mem->offset < mem->canary_count * sizeof(__le32))
dev_err(dev, "region %u offset too small for %hu canaries\n",
mem_id, mem->canary_count);
- else if (mem->offset + mem->size > ipa->mem_size)
- dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
- mem_id, ipa->mem_size);
+ else if (mem_id == IPA_MEM_END_MARKER && mem->size)
+ dev_err(dev, "non-zero end marker region size\n");
else
return true;
return false;
}
-#else /* !IPA_VALIDATE */
-
-static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
+/* Verify each defined memory region is valid. */
+static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
{
+ DECLARE_BITMAP(regions, IPA_MEM_COUNT) = { };
+ struct device *dev = &ipa->pdev->dev;
+ enum ipa_mem_id mem_id;
+ u32 i;
+
+ if (mem_data->local_count > IPA_MEM_COUNT) {
+ dev_err(dev, "too many memory regions (%u > %u)\n",
+ mem_data->local_count, IPA_MEM_COUNT);
+ return false;
+ }
+
+ for (i = 0; i < mem_data->local_count; i++) {
+ const struct ipa_mem *mem = &mem_data->local[i];
+
+ if (__test_and_set_bit(mem->id, regions)) {
+ dev_err(dev, "duplicate memory region %u\n", mem->id);
+ return false;
+ }
+
+ /* Defined regions have non-zero size and/or canary count */
+ if (!ipa_mem_valid_one(ipa, mem))
+ return false;
+ }
+
+ /* Now see if any required regions are not defined */
+ for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT);
+ mem_id < IPA_MEM_COUNT;
+ mem_id = find_next_zero_bit(regions, IPA_MEM_COUNT, mem_id + 1)) {
+ if (ipa_mem_id_required(ipa, mem_id))
+ dev_err(dev, "required memory region %u missing\n",
+ mem_id);
+ }
+
return true;
}
-#endif /*! IPA_VALIDATE */
+/* Do all memory regions fit within the IPA local memory? */
+static bool ipa_mem_size_valid(struct ipa *ipa)
+{
+ struct device *dev = &ipa->pdev->dev;
+ u32 limit = ipa->mem_size;
+ u32 i;
+
+ for (i = 0; i < ipa->mem_count; i++) {
+ const struct ipa_mem *mem = &ipa->mem[i];
+
+ if (mem->offset + mem->size <= limit)
+ continue;
+
+ dev_err(dev, "region %u ends beyond memory limit (0x%08x)\n",
+ mem->id, limit);
+
+ return false;
+ }
+
+ return true;
+}
/**
* ipa_mem_config() - Configure IPA shared memory
@@ -144,11 +308,12 @@ static bool ipa_mem_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- enum ipa_mem_id mem_id;
+ const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
void *virt;
u32 val;
+ u32 i;
/* Check the advertised location and size of the shared memory area */
val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
@@ -168,6 +333,10 @@ int ipa_mem_config(struct ipa *ipa)
mem_size);
}
+ /* We know our memory size; make sure regions are all in range */
+ if (!ipa_mem_size_valid(ipa))
+ return -EINVAL;
+
/* Prealloc DMA memory for zeroing regions */
virt = dma_alloc_coherent(dev, IPA_MEM_MAX, &addr, GFP_KERNEL);
if (!virt)
@@ -176,29 +345,18 @@ int ipa_mem_config(struct ipa *ipa)
ipa->zero_virt = virt;
ipa->zero_size = IPA_MEM_MAX;
- /* Verify each defined memory region is valid, and if indicated
- * for the region, write "canary" values in the space prior to
- * the region's base address.
+ /* For each defined region, write "canary" values in the
+ * space prior to the region's base address if indicated.
*/
- for (mem_id = 0; mem_id < IPA_MEM_COUNT; mem_id++) {
- const struct ipa_mem *mem = &ipa->mem[mem_id];
- u16 canary_count;
+ for (i = 0; i < ipa->mem_count; i++) {
+ u16 canary_count = ipa->mem[i].canary_count;
__le32 *canary;
- /* Validate all regions (even undefined ones) */
- if (!ipa_mem_valid(ipa, mem_id))
- goto err_dma_free;
-
- /* Skip over undefined regions */
- if (!mem->offset && !mem->size)
- continue;
-
- canary_count = mem->canary_count;
if (!canary_count)
continue;
/* Write canary values in the space before the region */
- canary = ipa->mem_virt + ipa->mem_offset + mem->offset;
+ canary = ipa->mem_virt + ipa->mem_offset + ipa->mem[i].offset;
do
*--canary = IPA_MEM_CANARY_VAL;
while (--canary_count);
@@ -212,8 +370,9 @@ int ipa_mem_config(struct ipa *ipa)
if (!ipa_cmd_data_valid(ipa))
goto err_dma_free;
- /* Verify the microcontroller ring alignment (0 is OK too) */
- if (ipa->mem[IPA_MEM_UC_EVENT_RING].offset % 1024) {
+ /* Verify the microcontroller ring alignment (if defined) */
+ mem = ipa_mem_find(ipa, IPA_MEM_UC_EVENT_RING);
+ if (mem && mem->offset % 1024) {
dev_err(dev, "microcontroller ring not 1024-byte aligned\n");
goto err_dma_free;
}
@@ -261,11 +420,9 @@ int ipa_mem_zero_modem(struct ipa *ipa)
return -EBUSY;
}
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_HEADER]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM_PROC_CTX]);
-
- ipa_mem_zero_region_add(trans, &ipa->mem[IPA_MEM_MODEM]);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_HEADER);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM_PROC_CTX);
+ ipa_mem_zero_region_add(trans, IPA_MEM_MODEM);
gsi_trans_commit_wait(trans);
@@ -380,7 +537,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
* (in this case, the modem). An allocation from SMEM is persistent
* until the AP reboots; there is no way to free an allocated SMEM
* region. Allocation only reserves the space; to use it you need
- * to "get" a pointer it (this implies no reference counting).
+ * to "get" a pointer it (this does not imply reference counting).
* The item might have already been allocated, in which case we
* use it unless the size isn't what we expect.
*/
@@ -457,11 +614,12 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
struct resource *res;
int ret;
- if (mem_data->local_count > IPA_MEM_COUNT) {
- dev_err(dev, "to many memory regions (%u > %u)\n",
- mem_data->local_count, IPA_MEM_COUNT);
+ /* Make sure the set of defined memory regions is valid */
+ if (!ipa_mem_valid(ipa, mem_data))
return -EINVAL;
- }
+
+ ipa->mem_count = mem_data->local_count;
+ ipa->mem = mem_data->local;
ret = dma_set_mask_and_coherent(&ipa->pdev->dev, DMA_BIT_MASK(64));
if (ret) {
@@ -486,9 +644,6 @@ int ipa_mem_init(struct ipa *ipa, const struct ipa_mem_data *mem_data)
ipa->mem_addr = res->start;
ipa->mem_size = resource_size(res);
- /* The ipa->mem[] array is indexed by enum ipa_mem_id values */
- ipa->mem = mem_data->local;
-
ret = ipa_imem_init(ipa, mem_data->imem_addr, mem_data->imem_size);
if (ret)
goto err_unmap;
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index a422aec69e5d..570bfdd99bff 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -54,37 +54,43 @@ enum ipa_mem_id {
IPA_MEM_V6_ROUTE_HASHED, /* 2 canaries */
IPA_MEM_V6_ROUTE, /* 2 canaries */
IPA_MEM_MODEM_HEADER, /* 2 canaries */
- IPA_MEM_AP_HEADER, /* 0 canaries */
+ IPA_MEM_AP_HEADER, /* 0 canaries, optional */
IPA_MEM_MODEM_PROC_CTX, /* 2 canaries */
IPA_MEM_AP_PROC_CTX, /* 0 canaries */
- IPA_MEM_NAT_TABLE, /* 4 canaries (IPA v4.5 and above) */
- IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_QUOTA_AP, /* 0 canaries (IPA v4.0 and above) */
- IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0 and above) */
+ IPA_MEM_MODEM, /* 0/2 canaries */
+ IPA_MEM_UC_EVENT_RING, /* 1 canary, optional */
+ IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
+ /* The next 5 filter and route statistics regions are optional */
IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
- IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5 and above) */
- IPA_MEM_STATS_DROP, /* 0 canaries (IPA v4.0 and above) */
- IPA_MEM_MODEM, /* 0/2 canaries */
- IPA_MEM_UC_EVENT_RING, /* 1 canary */
+ IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
+ IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
+ IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
IPA_MEM_COUNT, /* Number of regions (not an index) */
};
/**
* struct ipa_mem - IPA local memory region description
+ * @id: memory region identifier
* @offset: offset in IPA memory space to base of the region
* @size: size in bytes base of the region
* @canary_count: Number of 32-bit "canary" values that precede region
*/
struct ipa_mem {
+ enum ipa_mem_id id;
u32 offset;
u16 size;
u16 canary_count;
};
+const struct ipa_mem *ipa_mem_find(struct ipa *ipa, enum ipa_mem_id mem_id);
+
int ipa_mem_config(struct ipa *ipa);
void ipa_mem_deconfig(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 593665efbcf9..4661105ce7ab 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -298,32 +298,32 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.platform_type_valid = 1;
req.platform_type = IPA_QMI_PLATFORM_TYPE_MSM_ANDROID;
- mem = &ipa->mem[IPA_MEM_MODEM_HEADER];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
if (mem->size) {
req.hdr_tbl_info_valid = 1;
req.hdr_tbl_info.start = ipa->mem_offset + mem->offset;
req.hdr_tbl_info.end = req.hdr_tbl_info.start + mem->size - 1;
}
- mem = &ipa->mem[IPA_MEM_V4_ROUTE];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
- mem = &ipa->mem[IPA_MEM_V6_ROUTE];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
- mem = &ipa->mem[IPA_MEM_V4_FILTER];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
req.v4_filter_tbl_start = ipa->mem_offset + mem->offset;
- mem = &ipa->mem[IPA_MEM_V6_FILTER];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER);
req.v6_filter_tbl_start_valid = 1;
req.v6_filter_tbl_start = ipa->mem_offset + mem->offset;
- mem = &ipa->mem[IPA_MEM_MODEM];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM);
if (mem->size) {
req.modem_mem_info_valid = 1;
req.modem_mem_info.start = ipa->mem_offset + mem->offset;
@@ -336,7 +336,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* skip_uc_load_valid and skip_uc_load are set above */
- mem = &ipa->mem[IPA_MEM_MODEM_PROC_CTX];
+ mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
if (mem->size) {
req.hdr_proc_ctx_tbl_info_valid = 1;
req.hdr_proc_ctx_tbl_info.start =
@@ -347,7 +347,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
/* Nothing to report for the compression table (zip_tbl_info) */
- mem = &ipa->mem[IPA_MEM_V4_ROUTE_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE_HASHED);
if (mem->size) {
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
@@ -355,7 +355,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
- mem = &ipa->mem[IPA_MEM_V6_ROUTE_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
if (mem->size) {
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
@@ -363,22 +363,21 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
}
- mem = &ipa->mem[IPA_MEM_V4_FILTER_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
if (mem->size) {
req.v4_hash_filter_tbl_start_valid = 1;
req.v4_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
- mem = &ipa->mem[IPA_MEM_V6_FILTER_HASHED];
+ mem = ipa_mem_find(ipa, IPA_MEM_V6_FILTER_HASHED);
if (mem->size) {
req.v6_hash_filter_tbl_start_valid = 1;
req.v6_hash_filter_tbl_start = ipa->mem_offset + mem->offset;
}
- /* None of the stats fields are valid (IPA v4.0 and above) */
-
+ /* The stats fields are only valid for IPA v4.0+ */
if (ipa->version >= IPA_VERSION_4_0) {
- mem = &ipa->mem[IPA_MEM_STATS_QUOTA_MODEM];
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_QUOTA_MODEM);
if (mem->size) {
req.hw_stats_quota_base_addr_valid = 1;
req.hw_stats_quota_base_addr =
@@ -387,8 +386,9 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.hw_stats_quota_size = ipa->mem_offset + mem->size;
}
- mem = &ipa->mem[IPA_MEM_STATS_DROP];
- if (mem->size) {
+ /* If the DROP stats region is defined, include it */
+ mem = ipa_mem_find(ipa, IPA_MEM_STATS_DROP);
+ if (mem && mem->size) {
req.hw_stats_drop_base_addr_valid = 1;
req.hw_stats_drop_base_addr =
ipa->mem_offset + mem->offset;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 286ea9634c49..b89dec5865a5 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -368,6 +368,7 @@ enum ipa_cs_offload_en {
IPA_CS_OFFLOAD_NONE = 0x0,
IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
+ IPA_CS_OFFLOAD_INLINE = 0x1, /* IPA v4.5 (TX and RX) */
};
/* Valid only for TX (IPA consumer) endpoints */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index a5f7a79a1923..cf709df70d28 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -176,11 +176,8 @@ static int ipa_smp2p_irq_init(struct ipa_smp2p *smp2p, const char *name,
int ret;
ret = platform_get_irq_byname(smp2p->ipa->pdev, name);
- if (ret <= 0) {
- dev_err(dev, "DT error %d getting \"%s\" IRQ property\n",
- ret, name);
+ if (ret <= 0)
return ret ? : -EINVAL;
- }
irq = ret;
ret = request_threaded_irq(irq, NULL, handler, 0, name, smp2p);
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
new file mode 100644
index 000000000000..ff61dbdd70d8
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2021 Linaro Ltd. */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "ipa.h"
+#include "ipa_version.h"
+#include "ipa_sysfs.h"
+
+static const char *ipa_version_string(struct ipa *ipa)
+{
+ switch (ipa->version) {
+ case IPA_VERSION_3_0:
+ return "3.0";
+ case IPA_VERSION_3_1:
+ return "3.1";
+ case IPA_VERSION_3_5:
+ return "3.5";
+ case IPA_VERSION_3_5_1:
+ return "3.5.1";
+ case IPA_VERSION_4_0:
+ return "4.0";
+ case IPA_VERSION_4_1:
+ return "4.1";
+ case IPA_VERSION_4_2:
+ return "4.2";
+ case IPA_VERSION_4_5:
+ return "4.5";
+ case IPA_VERSION_4_7:
+ return "4.7";
+ case IPA_VERSION_4_9:
+ return "4.9";
+ case IPA_VERSION_4_11:
+ return "4.11";
+ default:
+ return "0.0"; /* Won't happen (checked at probe time) */
+ }
+}
+
+static ssize_t
+version_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_version_string(ipa));
+}
+
+static DEVICE_ATTR_RO(version);
+
+static struct attribute *ipa_attrs[] = {
+ &dev_attr_version.attr,
+ NULL
+};
+
+const struct attribute_group ipa_attribute_group = {
+ .attrs = ipa_attrs,
+};
+
+static const char *ipa_offload_string(struct ipa *ipa)
+{
+ return ipa->version < IPA_VERSION_4_5 ? "MAPv4" : "MAPv5";
+}
+
+static ssize_t rx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(rx_offload);
+
+static ssize_t tx_offload_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", ipa_offload_string(ipa));
+}
+
+static DEVICE_ATTR_RO(tx_offload);
+
+static struct attribute *ipa_feature_attrs[] = {
+ &dev_attr_rx_offload.attr,
+ &dev_attr_tx_offload.attr,
+ NULL
+};
+
+const struct attribute_group ipa_feature_attribute_group = {
+ .name = "feature",
+ .attrs = ipa_feature_attrs,
+};
+
+static ssize_t
+ipa_endpoint_id_show(struct ipa *ipa, char *buf, enum ipa_endpoint_name name)
+{
+ u32 endpoint_id = ipa->name_map[name]->endpoint_id;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", endpoint_id);
+}
+
+static ssize_t rx_endpoint_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_RX);
+}
+
+static DEVICE_ATTR_RO(rx_endpoint_id);
+
+static ssize_t tx_endpoint_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ipa *ipa = dev_get_drvdata(dev);
+
+ return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_TX);
+}
+
+static DEVICE_ATTR_RO(tx_endpoint_id);
+
+static struct attribute *ipa_modem_attrs[] = {
+ &dev_attr_rx_endpoint_id.attr,
+ &dev_attr_tx_endpoint_id.attr,
+ NULL
+};
+
+const struct attribute_group ipa_modem_attribute_group = {
+ .name = "modem",
+ .attrs = ipa_modem_attrs,
+};
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
new file mode 100644
index 000000000000..b34e5650bf8c
--- /dev/null
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2019-2021 Linaro Ltd.
+ */
+#ifndef _IPA_SYSFS_H_
+#define _IPA_SYSFS_H_
+
+struct attribute_group;
+
+extern const struct attribute_group ipa_attribute_group;
+extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_modem_attribute_group;
+
+#endif /* _IPA_SYSFS_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 3168d72f4245..c617a9156f26 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -150,29 +150,16 @@ static void ipa_table_validate_build(void)
}
static bool
-ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
+ipa_table_valid_one(struct ipa *ipa, enum ipa_mem_id mem_id, bool route)
{
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
struct device *dev = &ipa->pdev->dev;
- const struct ipa_mem *mem;
u32 size;
- if (route) {
- if (ipv6)
- mem = hashed ? &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]
- : &ipa->mem[IPA_MEM_V6_ROUTE];
- else
- mem = hashed ? &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]
- : &ipa->mem[IPA_MEM_V4_ROUTE];
+ if (route)
size = IPA_ROUTE_COUNT_MAX * sizeof(__le64);
- } else {
- if (ipv6)
- mem = hashed ? &ipa->mem[IPA_MEM_V6_FILTER_HASHED]
- : &ipa->mem[IPA_MEM_V6_FILTER];
- else
- mem = hashed ? &ipa->mem[IPA_MEM_V4_FILTER_HASHED]
- : &ipa->mem[IPA_MEM_V4_FILTER];
+ else
size = (1 + IPA_FILTER_COUNT_MAX) * sizeof(__le64);
- }
if (!ipa_cmd_table_valid(ipa, mem, route, ipv6, hashed))
return false;
@@ -185,9 +172,8 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
if (hashed && !mem->size)
return true;
- dev_err(dev, "IPv%c %s%s table region size 0x%02x, expected 0x%02x\n",
- ipv6 ? '6' : '4', hashed ? "hashed " : "",
- route ? "route" : "filter", mem->size, size);
+ dev_err(dev, "%s table region %u size 0x%02x, expected 0x%02x\n",
+ route ? "route" : "filter", mem_id, mem->size, size);
return false;
}
@@ -195,16 +181,16 @@ ipa_table_valid_one(struct ipa *ipa, bool route, bool ipv6, bool hashed)
/* Verify the filter and route table memory regions are the expected size */
bool ipa_table_valid(struct ipa *ipa)
{
- bool valid = true;
+ bool valid;
- valid = valid && ipa_table_valid_one(ipa, false, false, false);
- valid = valid && ipa_table_valid_one(ipa, false, false, true);
- valid = valid && ipa_table_valid_one(ipa, false, true, false);
- valid = valid && ipa_table_valid_one(ipa, false, true, true);
- valid = valid && ipa_table_valid_one(ipa, true, false, false);
- valid = valid && ipa_table_valid_one(ipa, true, false, true);
- valid = valid && ipa_table_valid_one(ipa, true, true, false);
- valid = valid && ipa_table_valid_one(ipa, true, true, true);
+ valid = ipa_table_valid_one(IPA_MEM_V4_FILTER, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_FILTER_HASHED, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_FILTER_HASHED, false);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V4_ROUTE_HASHED, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE, true);
+ valid = valid && ipa_table_valid_one(IPA_MEM_V6_ROUTE_HASHED, true);
return valid;
}
@@ -256,14 +242,15 @@ static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
}
static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
- u16 first, u16 count, const struct ipa_mem *mem)
+ u16 first, u16 count, enum ipa_mem_id mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t addr;
u32 offset;
u16 size;
- /* Nothing to do if the table memory regions is empty */
+ /* Nothing to do if the table memory region is empty */
if (!mem->size)
return;
@@ -282,16 +269,13 @@ static void ipa_table_reset_add(struct gsi_trans *trans, bool filter,
* for the IPv4 and IPv6 non-hashed and hashed filter tables.
*/
static int
-ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
+ipa_filter_reset_table(struct ipa *ipa, enum ipa_mem_id mem_id, bool modem)
{
u32 ep_mask = ipa->filter_map;
u32 count = hweight32(ep_mask);
struct gsi_trans *trans;
enum gsi_ee_id ee_id;
- if (!mem->size)
- return 0;
-
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -311,7 +295,7 @@ ipa_filter_reset_table(struct ipa *ipa, const struct ipa_mem *mem, bool modem)
if (endpoint->ee_id != ee_id)
continue;
- ipa_table_reset_add(trans, true, endpoint_id, 1, mem);
+ ipa_table_reset_add(trans, true, endpoint_id, 1, mem_id);
}
gsi_trans_commit_wait(trans);
@@ -327,20 +311,18 @@ static int ipa_filter_reset(struct ipa *ipa, bool modem)
{
int ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER], modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V4_FILTER_HASHED],
- modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V4_FILTER_HASHED, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER], modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER, modem);
if (ret)
return ret;
- ret = ipa_filter_reset_table(ipa, &ipa->mem[IPA_MEM_V6_FILTER_HASHED],
- modem);
+ ret = ipa_filter_reset_table(ipa, IPA_MEM_V6_FILTER_HASHED, modem);
return ret;
}
@@ -371,15 +353,13 @@ static int ipa_route_reset(struct ipa *ipa, bool modem)
count = IPA_ROUTE_AP_COUNT;
}
+ ipa_table_reset_add(trans, false, first, count, IPA_MEM_V4_ROUTE);
ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V4_ROUTE]);
- ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+ IPA_MEM_V4_ROUTE_HASHED);
+ ipa_table_reset_add(trans, false, first, count, IPA_MEM_V6_ROUTE);
ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V6_ROUTE]);
- ipa_table_reset_add(trans, false, first, count,
- &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+ IPA_MEM_V6_ROUTE_HASHED);
gsi_trans_commit_wait(trans);
@@ -433,10 +413,12 @@ int ipa_table_hash_flush(struct ipa *ipa)
static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
enum ipa_cmd_opcode opcode,
- const struct ipa_mem *mem,
- const struct ipa_mem *hash_mem)
+ enum ipa_mem_id mem_id,
+ enum ipa_mem_id hash_mem_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
+ const struct ipa_mem *hash_mem = ipa_mem_find(ipa, hash_mem_id);
+ const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
u16 hash_count;
@@ -477,20 +459,16 @@ int ipa_table_setup(struct ipa *ipa)
}
ipa_table_init_add(trans, false, IPA_CMD_IP_V4_ROUTING_INIT,
- &ipa->mem[IPA_MEM_V4_ROUTE],
- &ipa->mem[IPA_MEM_V4_ROUTE_HASHED]);
+ IPA_MEM_V4_ROUTE, IPA_MEM_V4_ROUTE_HASHED);
ipa_table_init_add(trans, false, IPA_CMD_IP_V6_ROUTING_INIT,
- &ipa->mem[IPA_MEM_V6_ROUTE],
- &ipa->mem[IPA_MEM_V6_ROUTE_HASHED]);
+ IPA_MEM_V6_ROUTE, IPA_MEM_V6_ROUTE_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V4_FILTER_INIT,
- &ipa->mem[IPA_MEM_V4_FILTER],
- &ipa->mem[IPA_MEM_V4_FILTER_HASHED]);
+ IPA_MEM_V4_FILTER, IPA_MEM_V4_FILTER_HASHED);
ipa_table_init_add(trans, true, IPA_CMD_IP_V6_FILTER_INIT,
- &ipa->mem[IPA_MEM_V6_FILTER],
- &ipa->mem[IPA_MEM_V6_FILTER_HASHED]);
+ IPA_MEM_V6_FILTER, IPA_MEM_V6_FILTER_HASHED);
gsi_trans_commit_wait(trans);
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 2756363e6938..fd9219863234 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -116,7 +116,8 @@ enum ipa_uc_event {
static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
{
- u32 offset = ipa->mem_offset + ipa->mem[IPA_MEM_UC_SHARED].offset;
+ const struct ipa_mem *mem = ipa_mem_find(ipa, IPA_MEM_UC_SHARED);
+ u32 offset = ipa->mem_offset + mem->offset;
return ipa->mem_virt + offset;
}
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index ee2b3d02f3cd..6c16c895d842 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -21,6 +21,8 @@
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
*
* Defines the version of IPA (and GSI) hardware present on the platform.
+ * Please update ipa_version_valid() and ipa_version_string() whenever a
+ * new version is added.
*/
enum ipa_version {
IPA_VERSION_3_0,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1b998aa481f8..80de9768ecd4 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1781,7 +1781,7 @@ static int macvlan_device_event(struct notifier_block *unused,
unregister_netdevice_many(&list_kill);
break;
case NETDEV_PRE_TYPE_CHANGE:
- /* Forbid underlaying device to change its type. */
+ /* Forbid underlying device to change its type. */
return NOTIFY_BAD;
case NETDEV_NOTIFY_PEERS:
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index d06e06f5e31a..99a6c13a11af 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -19,6 +19,13 @@ config MDIO_BUS
reflects whether the mdio_bus/mdio_device code is built as a
loadable module or built-in.
+config FWNODE_MDIO
+ def_tristate PHYLIB
+ depends on (ACPI || OF) || COMPILE_TEST
+ select FIXED_PHY
+ help
+ FWNODE MDIO bus (Ethernet PHY) accessors
+
config OF_MDIO
def_tristate PHYLIB
depends on OF
@@ -27,6 +34,13 @@ config OF_MDIO
help
OpenFirmware MDIO bus (Ethernet PHY) accessors
+config ACPI_MDIO
+ def_tristate PHYLIB
+ depends on ACPI
+ depends on PHYLIB
+ help
+ ACPI MDIO bus (Ethernet PHY) accessors
+
if MDIO_BUS
config MDIO_DEVRES
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index c3ec0ef989df..15f8dc4042ce 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux MDIO bus drivers
-obj-$(CONFIG_OF_MDIO) += of_mdio.o
+obj-$(CONFIG_ACPI_MDIO) += acpi_mdio.o
+obj-$(CONFIG_FWNODE_MDIO) += fwnode_mdio.o
+obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c
new file mode 100644
index 000000000000..d77c987fda9c
--- /dev/null
+++ b/drivers/net/mdio/acpi_mdio.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ACPI helpers for the MDIO (Ethernet PHY) API
+ *
+ * This file provides helper functions for extracting PHY device information
+ * out of the ACPI ASL and using it to populate an mii_bus.
+ */
+
+#include <linux/acpi.h>
+#include <linux/acpi_mdio.h>
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/fwnode_mdio.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
+MODULE_LICENSE("GPL");
+
+/**
+ * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+ * @mdio: pointer to mii_bus structure
+ * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
+ * an ACPI device object corresponding to the MDIO bus and its children are
+ * expected to correspond to the PHY devices on that bus.
+ *
+ * This function registers the mii_bus structure and registers a phy_device
+ * for each child node of @fwnode.
+ */
+int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *child;
+ u32 addr;
+ int ret;
+
+ /* Mask out all PHYs from auto probing. */
+ mdio->phy_mask = GENMASK(31, 0);
+ ret = mdiobus_register(mdio);
+ if (ret)
+ return ret;
+
+ ACPI_COMPANION_SET(&mdio->dev, to_acpi_device_node(fwnode));
+
+ /* Loop over the child nodes and register a phy_device for each PHY */
+ fwnode_for_each_child_node(fwnode, child) {
+ ret = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &addr);
+ if (ret || addr >= PHY_MAX_ADDR)
+ continue;
+
+ ret = fwnode_mdiobus_register_phy(mdio, child, addr);
+ if (ret == -ENODEV)
+ dev_err(&mdio->dev,
+ "MDIO device at address %d is missing.\n",
+ addr);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(acpi_mdiobus_register);
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
new file mode 100644
index 000000000000..e96766da8de4
--- /dev/null
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * fwnode helpers for the MDIO (Ethernet PHY) API
+ *
+ * This file provides helper functions for extracting PHY device information
+ * out of the fwnode and using it to populate an mii_bus.
+ */
+
+#include <linux/acpi.h>
+#include <linux/fwnode_mdio.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
+MODULE_LICENSE("GPL");
+
+static struct mii_timestamper *
+fwnode_find_mii_timestamper(struct fwnode_handle *fwnode)
+{
+ struct of_phandle_args arg;
+ int err;
+
+ if (is_acpi_node(fwnode))
+ return NULL;
+
+ err = of_parse_phandle_with_fixed_args(to_of_node(fwnode),
+ "timestamper", 1, 0, &arg);
+ if (err == -ENOENT)
+ return NULL;
+ else if (err)
+ return ERR_PTR(err);
+
+ if (arg.args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ return register_mii_timestamper(arg.np, arg.args[0]);
+}
+
+int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
+ struct phy_device *phy,
+ struct fwnode_handle *child, u32 addr)
+{
+ int rc;
+
+ rc = fwnode_irq_get(child, 0);
+ if (rc == -EPROBE_DEFER)
+ return rc;
+
+ if (rc > 0) {
+ phy->irq = rc;
+ mdio->irq[addr] = rc;
+ } else {
+ phy->irq = mdio->irq[addr];
+ }
+
+ if (fwnode_property_read_bool(child, "broken-turn-around"))
+ mdio->phy_ignore_ta_mask |= 1 << addr;
+
+ fwnode_property_read_u32(child, "reset-assert-us",
+ &phy->mdio.reset_assert_delay);
+ fwnode_property_read_u32(child, "reset-deassert-us",
+ &phy->mdio.reset_deassert_delay);
+
+ /* Associate the fwnode with the device structure so it
+ * can be looked up later
+ */
+ fwnode_handle_get(child);
+ phy->mdio.dev.fwnode = child;
+
+ /* All data is now stored in the phy struct;
+ * register it
+ */
+ rc = phy_device_register(phy);
+ if (rc) {
+ fwnode_handle_put(child);
+ return rc;
+ }
+
+ dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n",
+ child, addr);
+ return 0;
+}
+EXPORT_SYMBOL(fwnode_mdiobus_phy_device_register);
+
+int fwnode_mdiobus_register_phy(struct mii_bus *bus,
+ struct fwnode_handle *child, u32 addr)
+{
+ struct mii_timestamper *mii_ts = NULL;
+ struct phy_device *phy;
+ bool is_c45 = false;
+ u32 phy_id;
+ int rc;
+
+ mii_ts = fwnode_find_mii_timestamper(child);
+ if (IS_ERR(mii_ts))
+ return PTR_ERR(mii_ts);
+
+ rc = fwnode_property_match_string(child, "compatible",
+ "ethernet-phy-ieee802.3-c45");
+ if (rc >= 0)
+ is_c45 = true;
+
+ if (is_c45 || fwnode_get_phy_id(child, &phy_id))
+ phy = get_phy_device(bus, addr, is_c45);
+ else
+ phy = phy_device_create(bus, addr, phy_id, 0, NULL);
+ if (IS_ERR(phy)) {
+ unregister_mii_timestamper(mii_ts);
+ return PTR_ERR(phy);
+ }
+
+ if (is_acpi_node(child)) {
+ phy->irq = bus->irq[addr];
+
+ /* Associate the fwnode with the device structure so it
+ * can be looked up later.
+ */
+ phy->mdio.dev.fwnode = child;
+
+ /* All data is now stored in the phy struct, so register it */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ fwnode_handle_put(phy->mdio.dev.fwnode);
+ return rc;
+ }
+ } else if (is_of_node(child)) {
+ rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr);
+ if (rc) {
+ unregister_mii_timestamper(mii_ts);
+ phy_device_free(phy);
+ return rc;
+ }
+ }
+
+ /* phy->mii_ts may already be defined by the PHY driver. A
+ * mii_timestamper probed via the device tree will still have
+ * precedence.
+ */
+ if (mii_ts)
+ phy->mii_ts = mii_ts;
+ return 0;
+}
+EXPORT_SYMBOL(fwnode_mdiobus_register_phy);
diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index 5d171e7f118d..bfc9be23c973 100644
--- a/drivers/net/mdio/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
@@ -203,7 +203,7 @@ static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
return;
}
- /* The MDIO clock is the reference clock (typicaly 250Mhz) divided by
+ /* The MDIO clock is the reference clock (typically 250Mhz) divided by
* 2 x (MDIO_CLK_DIV + 1)
*/
reg = unimac_mdio_readl(priv, MDIO_CFG);
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index b36e5ea04ddf..2d67e12c8262 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -139,10 +139,6 @@ static int mscc_miim_probe(struct platform_device *pdev)
struct mscc_miim_dev *dev;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
if (!bus)
return -ENOMEM;
@@ -155,7 +151,7 @@ static int mscc_miim_probe(struct platform_device *pdev)
bus->parent = &pdev->dev;
dev = bus->priv;
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(dev->regs)) {
dev_err(&pdev->dev, "Unable to map MIIM registers\n");
return PTR_ERR(dev->regs);
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 03261e6b9ceb..014c0baedbd2 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -65,7 +65,7 @@ static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md)
writel(val, md->base + MDIO_SCAN_CTRL_OFFSET);
if (md->core_clk) {
- /* use rate adjust regs to derrive the mdio's operating
+ /* use rate adjust regs to derive the mdio's operating
* frequency from the specified core clock
*/
divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY;
@@ -187,7 +187,9 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
return -ENOMEM;
md->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ md->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(md->base))
+ return PTR_ERR(md->base);
if (res->start & 0xfff) {
/* For backward compatibility in case the
* base address is specified with an offset.
@@ -196,9 +198,6 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
res->start &= ~0xfff;
res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
}
- md->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(md->base))
- return PTR_ERR(md->base);
md->mii_bus = devm_mdiobus_alloc(&pdev->dev);
if (!md->mii_bus) {
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index bf86c9c7a288..b8866bc3f2e8 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -95,7 +95,7 @@ static int g12a_ephy_pll_enable(struct clk_hw *hw)
/* Poll on the digital lock instead of the usual analog lock
* This is done because bit 31 is unreliable on some SoC. Bit
- * 31 may indicate that the PLL is not lock eventhough the clock
+ * 31 may indicate that the PLL is not lock even though the clock
* is actually running
*/
return readl_poll_timeout(pll->base + ETH_PLL_CTL0, val,
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index 8ce99c4888e1..e096e68ac667 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -71,7 +71,6 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
return 0;
fail_register:
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return err;
@@ -85,7 +84,6 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
smi_en.u64 = 0;
oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN);
return 0;
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index cb1761693b69..822d2cdd2f35 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -126,7 +126,6 @@ static void thunder_mdiobus_pci_remove(struct pci_dev *pdev)
continue;
mdiobus_unregister(bus->mii_bus);
- mdiobus_free(bus->mii_bus);
oct_mdio_writeq(0, bus->register_base + SMI_EN);
}
pci_release_regions(pdev);
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 094494a68ddf..8744b1e1c2b1 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -10,6 +10,7 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/fwnode_mdio.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
@@ -29,123 +30,22 @@ MODULE_LICENSE("GPL");
* ethernet-phy-idAAAA.BBBB */
static int of_get_phy_id(struct device_node *device, u32 *phy_id)
{
- struct property *prop;
- const char *cp;
- unsigned int upper, lower;
-
- of_property_for_each_string(device, "compatible", prop, cp) {
- if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) == 2) {
- *phy_id = ((upper & 0xFFFF) << 16) | (lower & 0xFFFF);
- return 0;
- }
- }
- return -EINVAL;
-}
-
-static struct mii_timestamper *of_find_mii_timestamper(struct device_node *node)
-{
- struct of_phandle_args arg;
- int err;
-
- err = of_parse_phandle_with_fixed_args(node, "timestamper", 1, 0, &arg);
-
- if (err == -ENOENT)
- return NULL;
- else if (err)
- return ERR_PTR(err);
-
- if (arg.args_count != 1)
- return ERR_PTR(-EINVAL);
-
- return register_mii_timestamper(arg.np, arg.args[0]);
+ return fwnode_get_phy_id(of_fwnode_handle(device), phy_id);
}
int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy,
- struct device_node *child, u32 addr)
+ struct device_node *child, u32 addr)
{
- int rc;
-
- rc = of_irq_get(child, 0);
- if (rc == -EPROBE_DEFER)
- return rc;
-
- if (rc > 0) {
- phy->irq = rc;
- mdio->irq[addr] = rc;
- } else {
- phy->irq = mdio->irq[addr];
- }
-
- if (of_property_read_bool(child, "broken-turn-around"))
- mdio->phy_ignore_ta_mask |= 1 << addr;
-
- of_property_read_u32(child, "reset-assert-us",
- &phy->mdio.reset_assert_delay);
- of_property_read_u32(child, "reset-deassert-us",
- &phy->mdio.reset_deassert_delay);
-
- /* Associate the OF node with the device structure so it
- * can be looked up later */
- of_node_get(child);
- phy->mdio.dev.of_node = child;
- phy->mdio.dev.fwnode = of_fwnode_handle(child);
-
- /* All data is now stored in the phy struct;
- * register it */
- rc = phy_device_register(phy);
- if (rc) {
- of_node_put(child);
- return rc;
- }
-
- dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
- child, addr);
- return 0;
+ return fwnode_mdiobus_phy_device_register(mdio, phy,
+ of_fwnode_handle(child),
+ addr);
}
EXPORT_SYMBOL(of_mdiobus_phy_device_register);
static int of_mdiobus_register_phy(struct mii_bus *mdio,
struct device_node *child, u32 addr)
{
- struct mii_timestamper *mii_ts;
- struct phy_device *phy;
- bool is_c45;
- int rc;
- u32 phy_id;
-
- mii_ts = of_find_mii_timestamper(child);
- if (IS_ERR(mii_ts))
- return PTR_ERR(mii_ts);
-
- is_c45 = of_device_is_compatible(child,
- "ethernet-phy-ieee802.3-c45");
-
- if (!is_c45 && !of_get_phy_id(child, &phy_id))
- phy = phy_device_create(mdio, addr, phy_id, 0, NULL);
- else
- phy = get_phy_device(mdio, addr, is_c45);
- if (IS_ERR(phy)) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- return PTR_ERR(phy);
- }
-
- rc = of_mdiobus_phy_device_register(mdio, phy, child, addr);
- if (rc) {
- if (mii_ts)
- unregister_mii_timestamper(mii_ts);
- phy_device_free(phy);
- return rc;
- }
-
- /* phy->mii_ts may already be defined by the PHY driver. A
- * mii_timestamper probed via the device tree will still have
- * precedence.
- */
- if (mii_ts)
- phy->mii_ts = mii_ts;
-
- return 0;
+ return fwnode_mdiobus_register_phy(mdio, of_fwnode_handle(child), addr);
}
static int of_mdiobus_register_device(struct mii_bus *mdio,
@@ -347,16 +247,7 @@ EXPORT_SYMBOL(of_mdiobus_register);
*/
struct mdio_device *of_mdio_find_device(struct device_node *np)
{
- struct device *d;
-
- if (!np)
- return NULL;
-
- d = bus_find_device_by_of_node(&mdio_bus_type, np);
- if (!d)
- return NULL;
-
- return to_mdio_device(d);
+ return fwnode_mdio_find_device(of_fwnode_handle(np));
}
EXPORT_SYMBOL(of_mdio_find_device);
@@ -369,18 +260,7 @@ EXPORT_SYMBOL(of_mdio_find_device);
*/
struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
- struct mdio_device *mdiodev;
-
- mdiodev = of_mdio_find_device(phy_np);
- if (!mdiodev)
- return NULL;
-
- if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
- return to_phy_device(&mdiodev->dev);
-
- put_device(&mdiodev->dev);
-
- return NULL;
+ return fwnode_phy_find_device(of_fwnode_handle(phy_np));
}
EXPORT_SYMBOL(of_phy_find_device);
@@ -466,7 +346,7 @@ EXPORT_SYMBOL(of_phy_get_and_connect);
* of_phy_is_fixed_link() and of_phy_register_fixed_link() must
* support two DT bindings:
* - the old DT binding, where 'fixed-link' was a property with 5
- * cells encoding various informations about the fixed PHY
+ * cells encoding various information about the fixed PHY
* - the new DT binding, where 'fixed-link' is a sub-node of the
* Ethernet device.
*/
diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
index 0d8293a47a56..832d9de42f62 100644
--- a/drivers/net/mhi/net.c
+++ b/drivers/net/mhi/net.c
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/u64_stats_sync.h>
+#include <linux/wwan.h>
#include "mhi.h"
@@ -18,6 +19,12 @@
#define MHI_NET_MAX_MTU 0xffff
#define MHI_NET_DEFAULT_MTU 0x4000
+/* When set to false, the default netdev (link 0) is not created, and it's up
+ * to user to create the link (via wwan rtnetlink).
+ */
+static bool create_default_iface = true;
+module_param(create_default_iface, bool, 0);
+
struct mhi_device_info {
const char *netname;
const struct mhi_net_proto *proto;
@@ -295,32 +302,33 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
schedule_delayed_work(&mhi_netdev->rx_refill, HZ / 2);
}
-static struct device_type wwan_type = {
- .name = "wwan",
-};
-
-static int mhi_net_probe(struct mhi_device *mhi_dev,
- const struct mhi_device_id *id)
+static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
+ struct netlink_ext_ack *extack)
{
- const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
- struct device *dev = &mhi_dev->dev;
+ const struct mhi_device_info *info;
+ struct mhi_device *mhi_dev = ctxt;
struct mhi_net_dev *mhi_netdev;
- struct net_device *ndev;
int err;
- ndev = alloc_netdev(sizeof(*mhi_netdev), info->netname,
- NET_NAME_PREDICTABLE, mhi_net_setup);
- if (!ndev)
- return -ENOMEM;
+ info = (struct mhi_device_info *)mhi_dev->id->driver_data;
+
+ /* For now we only support one link (link context 0), driver must be
+ * reworked to break 1:1 relationship for net MBIM and to forward setup
+ * call to rmnet(QMAP) otherwise.
+ */
+ if (if_id != 0)
+ return -EINVAL;
+
+ if (dev_get_drvdata(&mhi_dev->dev))
+ return -EBUSY;
mhi_netdev = netdev_priv(ndev);
- dev_set_drvdata(dev, mhi_netdev);
+
+ dev_set_drvdata(&mhi_dev->dev, mhi_netdev);
mhi_netdev->ndev = ndev;
mhi_netdev->mdev = mhi_dev;
mhi_netdev->skbagg_head = NULL;
mhi_netdev->proto = info->proto;
- SET_NETDEV_DEV(ndev, &mhi_dev->dev);
- SET_NETDEV_DEVTYPE(ndev, &wwan_type);
INIT_DELAYED_WORK(&mhi_netdev->rx_refill, mhi_net_rx_refill_work);
u64_stats_init(&mhi_netdev->stats.rx_syncp);
@@ -334,7 +342,10 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
/* Number of transfer descriptors determines size of the queue */
mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
- err = register_netdev(ndev);
+ if (extack)
+ err = register_netdevice(ndev);
+ else
+ err = register_netdev(ndev);
if (err)
goto out_err;
@@ -347,23 +358,89 @@ static int mhi_net_probe(struct mhi_device *mhi_dev,
return 0;
out_err_proto:
- unregister_netdev(ndev);
+ unregister_netdevice(ndev);
out_err:
free_netdev(ndev);
return err;
}
-static void mhi_net_remove(struct mhi_device *mhi_dev)
+static void mhi_net_dellink(void *ctxt, struct net_device *ndev,
+ struct list_head *head)
{
- struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
+ struct mhi_device *mhi_dev = ctxt;
- unregister_netdev(mhi_netdev->ndev);
+ if (head)
+ unregister_netdevice_queue(ndev, head);
+ else
+ unregister_netdev(ndev);
- mhi_unprepare_from_transfer(mhi_netdev->mdev);
+ mhi_unprepare_from_transfer(mhi_dev);
kfree_skb(mhi_netdev->skbagg_head);
- free_netdev(mhi_netdev->ndev);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static const struct wwan_ops mhi_wwan_ops = {
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mhi_net_dev),
+ .setup = mhi_net_setup,
+ .newlink = mhi_net_newlink,
+ .dellink = mhi_net_dellink,
+};
+
+static int mhi_net_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ const struct mhi_device_info *info = (struct mhi_device_info *)id->driver_data;
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+ struct net_device *ndev;
+ int err;
+
+ err = wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_wwan_ops, mhi_dev);
+ if (err)
+ return err;
+
+ if (!create_default_iface)
+ return 0;
+
+ /* Create a default interface which is used as either RMNET real-dev,
+ * MBIM link 0 or ip link 0)
+ */
+ ndev = alloc_netdev(sizeof(struct mhi_net_dev), info->netname,
+ NET_NAME_PREDICTABLE, mhi_net_setup);
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_unregister;
+ }
+
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ err = mhi_net_newlink(mhi_dev, ndev, 0, NULL);
+ if (err)
+ goto err_release;
+
+ return 0;
+
+err_release:
+ free_netdev(ndev);
+err_unregister:
+ wwan_unregister_ops(&cntrl->mhi_dev->dev);
+
+ return err;
+}
+
+static void mhi_net_remove(struct mhi_device *mhi_dev)
+{
+ struct mhi_net_dev *mhi_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
+
+ /* rtnetlink takes care of removing remaining links */
+ wwan_unregister_ops(&cntrl->mhi_dev->dev);
+
+ if (create_default_iface)
+ mhi_net_dellink(mhi_dev, mhi_netdev->ndev, NULL);
}
static const struct mhi_device_info mhi_hwip0 = {
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 0e9511661601..ccec29970d5b 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -27,21 +27,34 @@ static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
unsigned int num_vfs)
{
- nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
- sizeof(struct nsim_vf_config),
- GFP_KERNEL | __GFP_NOWARN);
+ struct nsim_dev *nsim_dev;
+ int err = 0;
+
+ if (nsim_bus_dev->max_vfs < num_vfs)
+ return -ENOMEM;
+
if (!nsim_bus_dev->vfconfigs)
return -ENOMEM;
nsim_bus_dev->num_vfs = num_vfs;
- return 0;
+ nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ if (nsim_esw_mode_is_switchdev(nsim_dev)) {
+ err = nsim_esw_switchdev_enable(nsim_dev, NULL);
+ if (err)
+ nsim_bus_dev->num_vfs = 0;
+ }
+
+ return err;
}
-static void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
+void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
{
- kfree(nsim_bus_dev->vfconfigs);
- nsim_bus_dev->vfconfigs = NULL;
+ struct nsim_dev *nsim_dev;
+
nsim_bus_dev->num_vfs = 0;
+ nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ if (nsim_esw_mode_is_switchdev(nsim_dev))
+ nsim_esw_legacy_enable(nsim_dev, NULL);
}
static ssize_t
@@ -56,7 +69,7 @@ nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- rtnl_lock();
+ mutex_lock(&nsim_bus_dev->vfs_lock);
if (nsim_bus_dev->num_vfs == num_vfs)
goto exit_good;
if (nsim_bus_dev->num_vfs && num_vfs) {
@@ -74,7 +87,7 @@ nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
exit_good:
ret = count;
exit_unlock:
- rtnl_unlock();
+ mutex_unlock(&nsim_bus_dev->vfs_lock);
return ret;
}
@@ -92,6 +105,79 @@ static struct device_attribute nsim_bus_dev_numvfs_attr =
__ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show,
nsim_bus_dev_numvfs_store);
+ssize_t nsim_bus_dev_max_vfs_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_bus_dev *nsim_bus_dev = file->private_data;
+ char buf[11];
+ ssize_t len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", nsim_bus_dev->max_vfs);
+ if (len < 0)
+ return len;
+
+ return simple_read_from_buffer(data, count, ppos, buf, len);
+}
+
+ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_bus_dev *nsim_bus_dev = file->private_data;
+ struct nsim_vf_config *vfconfigs;
+ ssize_t ret;
+ char buf[10];
+ u32 val;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(buf))
+ return -ENOSPC;
+
+ mutex_lock(&nsim_bus_dev->vfs_lock);
+ /* Reject if VFs are configured */
+ if (nsim_bus_dev->num_vfs) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = copy_from_user(buf, data, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto unlock;
+ }
+
+ buf[count] = '\0';
+ ret = kstrtouint(buf, 10, &val);
+ if (ret) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ /* max_vfs limited by the maximum number of provided port indexes */
+ if (val > NSIM_DEV_VF_PORT_INDEX_MAX - NSIM_DEV_VF_PORT_INDEX_BASE) {
+ ret = -ERANGE;
+ goto unlock;
+ }
+
+ vfconfigs = kcalloc(val, sizeof(struct nsim_vf_config), GFP_KERNEL | __GFP_NOWARN);
+ if (!vfconfigs) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ kfree(nsim_bus_dev->vfconfigs);
+ nsim_bus_dev->vfconfigs = vfconfigs;
+ nsim_bus_dev->max_vfs = val;
+ *ppos += count;
+ ret = count;
+unlock:
+ mutex_unlock(&nsim_bus_dev->vfs_lock);
+ return ret;
+}
+
static ssize_t
new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -113,7 +199,7 @@ new_port_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
devlink_reload_disable(devlink);
- ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+ ret = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
@@ -142,7 +228,7 @@ del_port_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
devlink_reload_disable(devlink);
- ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+ ret = nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
devlink_reload_enable(devlink);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
@@ -168,9 +254,6 @@ static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
static void nsim_bus_dev_release(struct device *dev)
{
- struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
-
- nsim_bus_dev_vfs_disable(nsim_bus_dev);
}
static struct device_type nsim_bus_dev_type = {
@@ -311,6 +394,8 @@ static struct bus_type nsim_bus = {
.num_vf = nsim_num_vf,
};
+#define NSIM_BUS_DEV_MAX_VFS 4
+
static struct nsim_bus_dev *
nsim_bus_dev_new(unsigned int id, unsigned int port_count)
{
@@ -329,15 +414,28 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count)
nsim_bus_dev->dev.type = &nsim_bus_dev_type;
nsim_bus_dev->port_count = port_count;
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
+ nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
+ mutex_init(&nsim_bus_dev->vfs_lock);
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
+ nsim_bus_dev->vfconfigs = kcalloc(nsim_bus_dev->max_vfs,
+ sizeof(struct nsim_vf_config),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!nsim_bus_dev->vfconfigs) {
+ err = -ENOMEM;
+ goto err_nsim_bus_dev_id_free;
+ }
+
err = device_register(&nsim_bus_dev->dev);
if (err)
- goto err_nsim_bus_dev_id_free;
+ goto err_nsim_vfs_free;
+
return nsim_bus_dev;
+err_nsim_vfs_free:
+ kfree(nsim_bus_dev->vfconfigs);
err_nsim_bus_dev_id_free:
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
err_nsim_bus_dev_free:
@@ -351,6 +449,7 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
smp_store_release(&nsim_bus_dev->init, false);
device_unregister(&nsim_bus_dev->dev);
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+ kfree(nsim_bus_dev->vfconfigs);
kfree(nsim_bus_dev);
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 6189a4c0d39e..d85521989753 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -35,6 +35,25 @@
#include "netdevsim.h"
+static unsigned int
+nsim_dev_port_index(enum nsim_dev_port_type type, unsigned int port_index)
+{
+ switch (type) {
+ case NSIM_DEV_PORT_TYPE_VF:
+ port_index = NSIM_DEV_VF_PORT_INDEX_BASE + port_index;
+ break;
+ case NSIM_DEV_PORT_TYPE_PF:
+ break;
+ }
+
+ return port_index;
+}
+
+static inline unsigned int nsim_dev_port_index_to_vf_index(unsigned int port_index)
+{
+ return port_index - NSIM_DEV_VF_PORT_INDEX_BASE;
+}
+
static struct dentry *nsim_dev_ddir;
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
@@ -192,9 +211,18 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
.owner = THIS_MODULE,
};
+static const struct file_operations nsim_dev_max_vfs_fops = {
+ .open = simple_open,
+ .read = nsim_bus_dev_max_vfs_read,
+ .write = nsim_bus_dev_max_vfs_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
{
char dev_ddir_name[sizeof(DRV_NAME) + 10];
+ int err;
sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
@@ -231,30 +259,84 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
debugfs_create_bool("fail_trap_policer_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_counter_get);
+ nsim_dev->max_vfs = debugfs_create_file("max_vfs",
+ 0600,
+ nsim_dev->ddir,
+ nsim_dev->nsim_bus_dev,
+ &nsim_dev_max_vfs_fops);
+ nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir);
+ if (IS_ERR(nsim_dev->nodes_ddir)) {
+ err = PTR_ERR(nsim_dev->nodes_ddir);
+ goto err_out;
+ }
+ debugfs_create_bool("fail_trap_counter_get", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_counter_get);
nsim_udp_tunnels_debugfs_create(nsim_dev);
return 0;
+
+err_out:
+ debugfs_remove_recursive(nsim_dev->ports_ddir);
+ debugfs_remove_recursive(nsim_dev->ddir);
+ return err;
}
static void nsim_dev_debugfs_exit(struct nsim_dev *nsim_dev)
{
+ debugfs_remove_recursive(nsim_dev->nodes_ddir);
debugfs_remove_recursive(nsim_dev->ports_ddir);
debugfs_remove_recursive(nsim_dev->ddir);
}
+static ssize_t nsim_dev_rate_parent_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ char **name_ptr = file->private_data;
+ size_t len;
+
+ if (!*name_ptr)
+ return 0;
+
+ len = strlen(*name_ptr);
+ return simple_read_from_buffer(data, count, ppos, *name_ptr, len);
+}
+
+static const struct file_operations nsim_dev_rate_parent_fops = {
+ .open = simple_open,
+ .read = nsim_dev_rate_parent_read,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
struct nsim_dev_port *nsim_dev_port)
{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ unsigned int port_index = nsim_dev_port->port_index;
char port_ddir_name[16];
char dev_link_name[32];
- sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
+ sprintf(port_ddir_name, "%u", port_index);
nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
nsim_dev->ports_ddir);
if (IS_ERR(nsim_dev_port->ddir))
return PTR_ERR(nsim_dev_port->ddir);
- sprintf(dev_link_name, "../../../" DRV_NAME "%u",
- nsim_dev->nsim_bus_dev->dev.id);
+ sprintf(dev_link_name, "../../../" DRV_NAME "%u", nsim_bus_dev->dev.id);
+ if (nsim_dev_port_is_vf(nsim_dev_port)) {
+ unsigned int vf_id = nsim_dev_port_index_to_vf_index(port_index);
+
+ debugfs_create_u16("tx_share", 0400, nsim_dev_port->ddir,
+ &nsim_bus_dev->vfconfigs[vf_id].min_tx_rate);
+ debugfs_create_u16("tx_max", 0400, nsim_dev_port->ddir,
+ &nsim_bus_dev->vfconfigs[vf_id].max_tx_rate);
+ nsim_dev_port->rate_parent = debugfs_create_file("rate_parent",
+ 0400,
+ nsim_dev_port->ddir,
+ &nsim_dev_port->parent_name,
+ &nsim_dev_rate_parent_fops);
+ }
debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name);
return 0;
@@ -407,6 +489,74 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
devlink_region_destroy(nsim_dev->dummy_region);
}
+static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
+int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack)
+{
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+ struct nsim_dev_port *nsim_dev_port, *tmp;
+
+ devlink_rate_nodes_destroy(devlink);
+ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry_safe(nsim_dev_port, tmp, &nsim_dev->port_list, list)
+ if (nsim_dev_port_is_vf(nsim_dev_port))
+ __nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ return 0;
+}
+
+int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack)
+{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ int i, err;
+
+ for (i = 0; i < nsim_bus_dev->num_vfs; i++) {
+ err = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
+ pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
+ goto err_port_add_vfs;
+ }
+ }
+ nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ return 0;
+
+err_port_add_vfs:
+ for (i--; i >= 0; i--)
+ nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ return err;
+}
+
+static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ int err = 0;
+
+ mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ if (mode == nsim_dev->esw_mode)
+ goto unlock;
+
+ if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ err = nsim_esw_legacy_enable(nsim_dev, extack);
+ else if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ err = nsim_esw_switchdev_enable(nsim_dev, extack);
+ else
+ err = -EINVAL;
+
+unlock:
+ mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ return err;
+}
+
+static int nsim_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ *mode = nsim_dev->esw_mode;
+ return 0;
+}
+
struct nsim_trap_item {
void *trap_ctx;
enum devlink_trap_action action;
@@ -416,6 +566,7 @@ struct nsim_trap_data {
struct delayed_work trap_report_dw;
struct nsim_trap_item *trap_items_arr;
u64 *trap_policers_cnt_arr;
+ u64 trap_pkt_cnt;
struct nsim_dev *nsim_dev;
spinlock_t trap_lock; /* Protects trap_items_arr */
};
@@ -892,7 +1043,190 @@ nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
return 0;
}
+#define NSIM_LINK_SPEED_MAX 5000 /* Mbps */
+#define NSIM_LINK_SPEED_UNIT 125000 /* 1 Mbps given in bytes/sec to avoid
+ * u64 overflow during conversion from
+ * bytes to bits.
+ */
+
+static int nsim_rate_bytes_to_units(char *name, u64 *rate, struct netlink_ext_ack *extack)
+{
+ u64 val;
+ u32 rem;
+
+ val = div_u64_rem(*rate, NSIM_LINK_SPEED_UNIT, &rem);
+ if (rem) {
+ pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
+ name, *rate);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps.");
+ return -EINVAL;
+ }
+
+ if (val > NSIM_LINK_SPEED_MAX) {
+ pr_err("%s rate value %lluMbps exceed link maximum speed 5000Mbps.\n",
+ name, val);
+ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed 5000Mbps.");
+ return -EINVAL;
+ }
+ *rate = val;
+ return 0;
+}
+
+static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_port *nsim_dev_port = priv;
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev;
+ int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_share", &tx_share, extack);
+ if (err)
+ return err;
+
+ nsim_bus_dev->vfconfigs[vf_id].min_tx_rate = tx_share;
+ return 0;
+}
+
+static int nsim_leaf_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_port *nsim_dev_port = priv;
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev;
+ int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_max", &tx_max, extack);
+ if (err)
+ return err;
+
+ nsim_bus_dev->vfconfigs[vf_id].max_tx_rate = tx_max;
+ return 0;
+}
+
+struct nsim_rate_node {
+ struct dentry *ddir;
+ struct dentry *rate_parent;
+ char *parent_name;
+ u16 tx_share;
+ u16 tx_max;
+};
+
+static int nsim_node_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_share, struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv;
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_share", &tx_share, extack);
+ if (err)
+ return err;
+
+ nsim_node->tx_share = tx_share;
+ return 0;
+}
+
+static int nsim_node_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
+ u64 tx_max, struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv;
+ int err;
+
+ err = nsim_rate_bytes_to_units("tx_max", &tx_max, extack);
+ if (err)
+ return err;
+
+ nsim_node->tx_max = tx_max;
+ return 0;
+}
+
+static int nsim_rate_node_new(struct devlink_rate *node, void **priv,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(node->devlink);
+ struct nsim_rate_node *nsim_node;
+
+ if (!nsim_esw_mode_is_switchdev(nsim_dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Node creation allowed only in switchdev mode.");
+ return -EOPNOTSUPP;
+ }
+
+ nsim_node = kzalloc(sizeof(*nsim_node), GFP_KERNEL);
+ if (!nsim_node)
+ return -ENOMEM;
+
+ nsim_node->ddir = debugfs_create_dir(node->name, nsim_dev->nodes_ddir);
+
+ debugfs_create_u16("tx_share", 0400, nsim_node->ddir, &nsim_node->tx_share);
+ debugfs_create_u16("tx_max", 0400, nsim_node->ddir, &nsim_node->tx_max);
+ nsim_node->rate_parent = debugfs_create_file("rate_parent", 0400,
+ nsim_node->ddir,
+ &nsim_node->parent_name,
+ &nsim_dev_rate_parent_fops);
+
+ *priv = nsim_node;
+ return 0;
+}
+
+static int nsim_rate_node_del(struct devlink_rate *node, void *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv;
+
+ debugfs_remove(nsim_node->rate_parent);
+ debugfs_remove_recursive(nsim_node->ddir);
+ kfree(nsim_node);
+ return 0;
+}
+
+static int nsim_rate_leaf_parent_set(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_port *nsim_dev_port = priv_child;
+
+ if (parent)
+ nsim_dev_port->parent_name = parent->name;
+ else
+ nsim_dev_port->parent_name = NULL;
+ return 0;
+}
+
+static int nsim_rate_node_parent_set(struct devlink_rate *child,
+ struct devlink_rate *parent,
+ void *priv_child, void *priv_parent,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_rate_node *nsim_node = priv_child;
+
+ if (parent)
+ nsim_node->parent_name = parent->name;
+ else
+ nsim_node->parent_name = NULL;
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_hw_counter_get(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ u64 *p_drops)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ u64 *cnt;
+
+ if (nsim_dev->fail_trap_counter_get)
+ return -EINVAL;
+
+ cnt = &nsim_dev->trap_data->trap_pkt_cnt;
+ *p_drops = (*cnt)++;
+
+ return 0;
+}
+
static const struct devlink_ops nsim_dev_devlink_ops = {
+ .eswitch_mode_set = nsim_devlink_eswitch_mode_set,
+ .eswitch_mode_get = nsim_devlink_eswitch_mode_get,
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
@@ -905,32 +1239,52 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
.trap_group_set = nsim_dev_devlink_trap_group_set,
.trap_policer_set = nsim_dev_devlink_trap_policer_set,
.trap_policer_counter_get = nsim_dev_devlink_trap_policer_counter_get,
+ .rate_leaf_tx_share_set = nsim_leaf_tx_share_set,
+ .rate_leaf_tx_max_set = nsim_leaf_tx_max_set,
+ .rate_node_tx_share_set = nsim_node_tx_share_set,
+ .rate_node_tx_max_set = nsim_node_tx_max_set,
+ .rate_node_new = nsim_rate_node_new,
+ .rate_node_del = nsim_rate_node_del,
+ .rate_leaf_parent_set = nsim_rate_leaf_parent_set,
+ .rate_node_parent_set = nsim_rate_node_parent_set,
+ .trap_drop_counter_get = nsim_dev_devlink_trap_hw_counter_get,
};
#define NSIM_DEV_MAX_MACS_DEFAULT 32
#define NSIM_DEV_TEST1_DEFAULT true
-static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
+static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
struct devlink_port_attrs attrs = {};
struct nsim_dev_port *nsim_dev_port;
struct devlink_port *devlink_port;
int err;
+ if (type == NSIM_DEV_PORT_TYPE_VF && !nsim_bus_dev->num_vfs)
+ return -EINVAL;
+
nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
if (!nsim_dev_port)
return -ENOMEM;
- nsim_dev_port->port_index = port_index;
+ nsim_dev_port->port_index = nsim_dev_port_index(type, port_index);
+ nsim_dev_port->port_type = type;
devlink_port = &nsim_dev_port->devlink_port;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = port_index + 1;
+ if (nsim_dev_port_is_pf(nsim_dev_port)) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port_index + 1;
+ } else {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = 0;
+ attrs.pci_vf.vf = port_index;
+ }
memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
devlink_port_attrs_set(devlink_port, &attrs);
err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
- port_index);
+ nsim_dev_port->port_index);
if (err)
goto err_port_free;
@@ -944,11 +1298,20 @@ static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
goto err_port_debugfs_exit;
}
+ if (nsim_dev_port_is_vf(nsim_dev_port)) {
+ err = devlink_rate_leaf_create(&nsim_dev_port->devlink_port,
+ nsim_dev_port);
+ if (err)
+ goto err_nsim_destroy;
+ }
+
devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
list_add(&nsim_dev_port->list, &nsim_dev->port_list);
return 0;
+err_nsim_destroy:
+ nsim_destroy(nsim_dev_port->ns);
err_port_debugfs_exit:
nsim_dev_port_debugfs_exit(nsim_dev_port);
err_dl_port_unregister:
@@ -963,6 +1326,8 @@ static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
list_del(&nsim_dev_port->list);
+ if (nsim_dev_port_is_vf(nsim_dev_port))
+ devlink_rate_leaf_destroy(&nsim_dev_port->devlink_port);
devlink_port_type_clear(devlink_port);
nsim_destroy(nsim_dev_port->ns);
nsim_dev_port_debugfs_exit(nsim_dev_port);
@@ -987,7 +1352,7 @@ static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
int i, err;
for (i = 0; i < port_count; i++) {
- err = __nsim_dev_port_add(nsim_dev, i);
+ err = __nsim_dev_port_add(nsim_dev, NSIM_DEV_PORT_TYPE_PF, i);
if (err)
goto err_port_del_all;
}
@@ -1134,6 +1499,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
devlink_params_publish(devlink);
devlink_reload_enable(devlink);
+ nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
return 0;
err_psample_exit:
@@ -1169,6 +1535,12 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
if (devlink_is_reload_failed(devlink))
return;
debugfs_remove(nsim_dev->take_snapshot);
+
+ mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ if (nsim_dev->nsim_bus_dev->num_vfs)
+ nsim_bus_dev_vfs_disable(nsim_dev->nsim_bus_dev);
+ mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock);
+
nsim_dev_port_del_all(nsim_dev);
nsim_dev_psample_exit(nsim_dev);
nsim_dev_health_exit(nsim_dev);
@@ -1197,32 +1569,34 @@ void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
}
static struct nsim_dev_port *
-__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, unsigned int port_index)
+__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
+ unsigned int port_index)
{
struct nsim_dev_port *nsim_dev_port;
+ port_index = nsim_dev_port_index(type, port_index);
list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list)
if (nsim_dev_port->port_index == port_index)
return nsim_dev_port;
return NULL;
}
-int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
int err;
mutex_lock(&nsim_dev->port_list_lock);
- if (__nsim_dev_port_lookup(nsim_dev, port_index))
+ if (__nsim_dev_port_lookup(nsim_dev, type, port_index))
err = -EEXIST;
else
- err = __nsim_dev_port_add(nsim_dev, port_index);
+ err = __nsim_dev_port_add(nsim_dev, type, port_index);
mutex_unlock(&nsim_dev->port_list_lock);
return err;
}
-int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
@@ -1230,7 +1604,7 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
int err = 0;
mutex_lock(&nsim_dev->port_list_lock);
- nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, port_index);
+ nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, type, port_index);
if (!nsim_dev_port)
err = -ENOENT;
else
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 659d3dceb687..c3aeb15843e2 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -113,6 +113,11 @@ static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
struct netdevsim *ns = netdev_priv(dev);
struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ if (nsim_esw_mode_is_switchdev(ns->nsim_dev)) {
+ pr_err("Not supported in switchdev mode. Please use devlink API.\n");
+ return -EOPNOTSUPP;
+ }
+
if (vf >= nsim_bus_dev->num_vfs)
return -EINVAL;
@@ -261,6 +266,18 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_get_devlink_port = nsim_get_devlink_port,
};
+static const struct net_device_ops nsim_vf_netdev_ops = {
+ .ndo_start_xmit = nsim_start_xmit,
+ .ndo_set_rx_mode = nsim_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = nsim_change_mtu,
+ .ndo_get_stats64 = nsim_get_stats64,
+ .ndo_setup_tc = nsim_setup_tc,
+ .ndo_set_features = nsim_set_features,
+ .ndo_get_devlink_port = nsim_get_devlink_port,
+};
+
static void nsim_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -280,6 +297,49 @@ static void nsim_setup(struct net_device *dev)
dev->max_mtu = ETH_MAX_MTU;
}
+static int nsim_init_netdevsim(struct netdevsim *ns)
+{
+ int err;
+
+ ns->netdev->netdev_ops = &nsim_netdev_ops;
+
+ err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
+ if (err)
+ return err;
+
+ rtnl_lock();
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_utn_destroy;
+
+ nsim_ipsec_init(ns);
+
+ err = register_netdevice(ns->netdev);
+ if (err)
+ goto err_ipsec_teardown;
+ rtnl_unlock();
+ return 0;
+
+err_ipsec_teardown:
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+err_utn_destroy:
+ rtnl_unlock();
+ nsim_udp_tunnels_info_destroy(ns->netdev);
+ return err;
+}
+
+static int nsim_init_netdevsim_vf(struct netdevsim *ns)
+{
+ int err;
+
+ ns->netdev->netdev_ops = &nsim_vf_netdev_ops;
+ rtnl_lock();
+ err = register_netdevice(ns->netdev);
+ rtnl_unlock();
+ return err;
+}
+
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
{
@@ -299,33 +359,15 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->nsim_dev_port = nsim_dev_port;
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
- dev->netdev_ops = &nsim_netdev_ops;
nsim_ethtool_init(ns);
-
- err = nsim_udp_tunnels_info_create(nsim_dev, dev);
+ if (nsim_dev_port_is_pf(nsim_dev_port))
+ err = nsim_init_netdevsim(ns);
+ else
+ err = nsim_init_netdevsim_vf(ns);
if (err)
goto err_free_netdev;
-
- rtnl_lock();
- err = nsim_bpf_init(ns);
- if (err)
- goto err_utn_destroy;
-
- nsim_ipsec_init(ns);
-
- err = register_netdevice(dev);
- if (err)
- goto err_ipsec_teardown;
- rtnl_unlock();
-
return ns;
-err_ipsec_teardown:
- nsim_ipsec_teardown(ns);
- nsim_bpf_uninit(ns);
-err_utn_destroy:
- rtnl_unlock();
- nsim_udp_tunnels_info_destroy(dev);
err_free_netdev:
free_netdev(dev);
return ERR_PTR(err);
@@ -337,10 +379,13 @@ void nsim_destroy(struct netdevsim *ns)
rtnl_lock();
unregister_netdevice(dev);
- nsim_ipsec_teardown(ns);
- nsim_bpf_uninit(ns);
+ if (nsim_dev_port_is_pf(ns->nsim_dev_port)) {
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+ }
rtnl_unlock();
- nsim_udp_tunnels_info_destroy(dev);
+ if (nsim_dev_port_is_pf(ns->nsim_dev_port))
+ nsim_udp_tunnels_info_destroy(dev);
free_netdev(dev);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 7ff24e03577b..f2304e61919a 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -197,11 +197,22 @@ static inline void nsim_dev_psample_exit(struct nsim_dev *nsim_dev)
}
#endif
+enum nsim_dev_port_type {
+ NSIM_DEV_PORT_TYPE_PF,
+ NSIM_DEV_PORT_TYPE_VF,
+};
+
+#define NSIM_DEV_VF_PORT_INDEX_BASE 128
+#define NSIM_DEV_VF_PORT_INDEX_MAX UINT_MAX
+
struct nsim_dev_port {
struct list_head list;
struct devlink_port devlink_port;
unsigned int port_index;
+ enum nsim_dev_port_type port_type;
struct dentry *ddir;
+ struct dentry *rate_parent;
+ char *parent_name;
struct netdevsim *ns;
};
@@ -212,6 +223,8 @@ struct nsim_dev {
struct dentry *ddir;
struct dentry *ports_ddir;
struct dentry *take_snapshot;
+ struct dentry *max_vfs;
+ struct dentry *nodes_ddir;
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
bool bpf_bind_verifier_accept;
@@ -236,6 +249,7 @@ struct nsim_dev {
bool fail_trap_group_set;
bool fail_trap_policer_set;
bool fail_trap_policer_counter_get;
+ bool fail_trap_counter_get;
struct {
struct udp_tunnel_nic_shared utn_shared;
u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
@@ -247,8 +261,22 @@ struct nsim_dev {
u32 sleep;
} udp_ports;
struct nsim_dev_psample *psample;
+ u16 esw_mode;
};
+int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack);
+int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack);
+
+static inline bool nsim_esw_mode_is_legacy(struct nsim_dev *nsim_dev)
+{
+ return nsim_dev->esw_mode == DEVLINK_ESWITCH_MODE_LEGACY;
+}
+
+static inline bool nsim_esw_mode_is_switchdev(struct nsim_dev *nsim_dev)
+{
+ return nsim_dev->esw_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
{
return devlink_net(priv_to_devlink(nsim_dev));
@@ -259,8 +287,10 @@ void nsim_dev_exit(void);
int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev);
int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ enum nsim_dev_port_type type,
unsigned int port_index);
int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ enum nsim_dev_port_type type,
unsigned int port_index);
struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
@@ -269,6 +299,23 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data);
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
enum nsim_resource_id res_id, bool max);
+ssize_t nsim_bus_dev_max_vfs_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos);
+ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos);
+void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev);
+
+static inline bool nsim_dev_port_is_pf(struct nsim_dev_port *nsim_dev_port)
+{
+ return nsim_dev_port->port_type == NSIM_DEV_PORT_TYPE_PF;
+}
+
+static inline bool nsim_dev_port_is_vf(struct nsim_dev_port *nsim_dev_port)
+{
+ return nsim_dev_port->port_type == NSIM_DEV_PORT_TYPE_VF;
+}
#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
void nsim_ipsec_init(struct netdevsim *ns);
void nsim_ipsec_teardown(struct netdevsim *ns);
@@ -308,7 +355,9 @@ struct nsim_bus_dev {
struct net *initial_net; /* Purpose of this is to carry net pointer
* during the probe time only.
*/
+ unsigned int max_vfs;
unsigned int num_vfs;
+ struct mutex vfs_lock; /* Protects vfconfigs */
struct nsim_vf_config *vfconfigs;
/* Lock for devlink->reload_enabled in netdevsim module */
struct mutex nsim_bus_reload_lock;
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index c23146755972..0603d469bd57 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PCS drivers
-obj-$(CONFIG_PCS_XPCS) += pcs-xpcs.o
+pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o
+
+obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
diff --git a/drivers/net/pcs/pcs-xpcs-nxp.c b/drivers/net/pcs/pcs-xpcs-nxp.c
new file mode 100644
index 000000000000..984c9f7f16a8
--- /dev/null
+++ b/drivers/net/pcs/pcs-xpcs-nxp.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2021 NXP Semiconductors
+ */
+#include <linux/pcs/pcs-xpcs.h>
+#include "pcs-xpcs.h"
+
+/* LANE_DRIVER1_0 register */
+#define SJA1110_LANE_DRIVER1_0 0x8038
+#define SJA1110_TXDRV(x) (((x) << 12) & GENMASK(14, 12))
+
+/* LANE_DRIVER2_0 register */
+#define SJA1110_LANE_DRIVER2_0 0x803a
+#define SJA1110_TXDRVTRIM_LSB(x) ((x) & GENMASK_ULL(15, 0))
+
+/* LANE_DRIVER2_1 register */
+#define SJA1110_LANE_DRIVER2_1 0x803b
+#define SJA1110_LANE_DRIVER2_1_RSV BIT(9)
+#define SJA1110_TXDRVTRIM_MSB(x) (((x) & GENMASK_ULL(23, 16)) >> 16)
+
+/* LANE_TRIM register */
+#define SJA1110_LANE_TRIM 0x8040
+#define SJA1110_TXTEN BIT(11)
+#define SJA1110_TXRTRIM(x) (((x) << 8) & GENMASK(10, 8))
+#define SJA1110_TXPLL_BWSEL BIT(7)
+#define SJA1110_RXTEN BIT(6)
+#define SJA1110_RXRTRIM(x) (((x) << 3) & GENMASK(5, 3))
+#define SJA1110_CDR_GAIN BIT(2)
+#define SJA1110_ACCOUPLE_RXVCM_EN BIT(0)
+
+/* LANE_DATAPATH_1 register */
+#define SJA1110_LANE_DATAPATH_1 0x8037
+
+/* POWERDOWN_ENABLE register */
+#define SJA1110_POWERDOWN_ENABLE 0x8041
+#define SJA1110_TXPLL_PD BIT(12)
+#define SJA1110_TXPD BIT(11)
+#define SJA1110_RXPKDETEN BIT(10)
+#define SJA1110_RXCH_PD BIT(9)
+#define SJA1110_RXBIAS_PD BIT(8)
+#define SJA1110_RESET_SER_EN BIT(7)
+#define SJA1110_RESET_SER BIT(6)
+#define SJA1110_RESET_DES BIT(5)
+#define SJA1110_RCVEN BIT(4)
+
+/* RXPLL_CTRL0 register */
+#define SJA1110_RXPLL_CTRL0 0x8065
+#define SJA1110_RXPLL_FBDIV(x) (((x) << 2) & GENMASK(9, 2))
+
+/* RXPLL_CTRL1 register */
+#define SJA1110_RXPLL_CTRL1 0x8066
+#define SJA1110_RXPLL_REFDIV(x) ((x) & GENMASK(4, 0))
+
+/* TXPLL_CTRL0 register */
+#define SJA1110_TXPLL_CTRL0 0x806d
+#define SJA1110_TXPLL_FBDIV(x) ((x) & GENMASK(11, 0))
+
+/* TXPLL_CTRL1 register */
+#define SJA1110_TXPLL_CTRL1 0x806e
+#define SJA1110_TXPLL_REFDIV(x) ((x) & GENMASK(5, 0))
+
+/* RX_DATA_DETECT register */
+#define SJA1110_RX_DATA_DETECT 0x8045
+
+/* RX_CDR_CTLE register */
+#define SJA1110_RX_CDR_CTLE 0x8042
+
+/* In NXP SJA1105, the PCS is integrated with a PMA that has the TX lane
+ * polarity inverted by default (PLUS is MINUS, MINUS is PLUS). To obtain
+ * normal non-inverted behavior, the TX lane polarity must be inverted in the
+ * PCS, via the DIGITAL_CONTROL_2 register.
+ */
+int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs)
+{
+ return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL2,
+ DW_VR_MII_DIG_CTRL2_TX_POL_INV);
+}
+
+static int nxp_sja1110_pma_config(struct dw_xpcs *xpcs,
+ u16 txpll_fbdiv, u16 txpll_refdiv,
+ u16 rxpll_fbdiv, u16 rxpll_refdiv,
+ u16 rx_cdr_ctle)
+{
+ u16 val;
+ int ret;
+
+ /* Program TX PLL feedback divider and reference divider settings for
+ * correct oscillation frequency.
+ */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_TXPLL_CTRL0,
+ SJA1110_TXPLL_FBDIV(txpll_fbdiv));
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_TXPLL_CTRL1,
+ SJA1110_TXPLL_REFDIV(txpll_refdiv));
+ if (ret < 0)
+ return ret;
+
+ /* Program transmitter amplitude and disable amplitude trimming */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER1_0,
+ SJA1110_TXDRV(0x5));
+ if (ret < 0)
+ return ret;
+
+ val = SJA1110_TXDRVTRIM_LSB(0xffffffull);
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER2_0, val);
+ if (ret < 0)
+ return ret;
+
+ val = SJA1110_TXDRVTRIM_MSB(0xffffffull) | SJA1110_LANE_DRIVER2_1_RSV;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DRIVER2_1, val);
+ if (ret < 0)
+ return ret;
+
+ /* Enable input and output resistor terminations for low BER. */
+ val = SJA1110_ACCOUPLE_RXVCM_EN | SJA1110_CDR_GAIN |
+ SJA1110_RXRTRIM(4) | SJA1110_RXTEN | SJA1110_TXPLL_BWSEL |
+ SJA1110_TXRTRIM(3) | SJA1110_TXTEN;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_TRIM, val);
+ if (ret < 0)
+ return ret;
+
+ /* Select PCS as transmitter data source. */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_LANE_DATAPATH_1, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Program RX PLL feedback divider and reference divider for correct
+ * oscillation frequency.
+ */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RXPLL_CTRL0,
+ SJA1110_RXPLL_FBDIV(rxpll_fbdiv));
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RXPLL_CTRL1,
+ SJA1110_RXPLL_REFDIV(rxpll_refdiv));
+ if (ret < 0)
+ return ret;
+
+ /* Program threshold for receiver signal detector.
+ * Enable control of RXPLL by receiver signal detector to disable RXPLL
+ * when an input signal is not present.
+ */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_DATA_DETECT, 0x0005);
+ if (ret < 0)
+ return ret;
+
+ /* Enable TX and RX PLLs and circuits.
+ * Release reset of PMA to enable data flow to/from PCS.
+ */
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE);
+ if (ret < 0)
+ return ret;
+
+ val = ret & ~(SJA1110_TXPLL_PD | SJA1110_TXPD | SJA1110_RXCH_PD |
+ SJA1110_RXBIAS_PD | SJA1110_RESET_SER_EN |
+ SJA1110_RESET_SER | SJA1110_RESET_DES);
+ val |= SJA1110_RXPKDETEN | SJA1110_RCVEN;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_POWERDOWN_ENABLE, val);
+ if (ret < 0)
+ return ret;
+
+ /* Program continuous-time linear equalizer (CTLE) settings. */
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, SJA1110_RX_CDR_CTLE,
+ rx_cdr_ctle);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs)
+{
+ return nxp_sja1110_pma_config(xpcs, 0x19, 0x1, 0x19, 0x1, 0x212a);
+}
+
+int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs)
+{
+ return nxp_sja1110_pma_config(xpcs, 0x7d, 0x2, 0x7d, 0x2, 0x732a);
+}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index aa985a5aae8d..63fda3fc40aa 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -11,97 +11,10 @@
#include <linux/mdio.h>
#include <linux/phylink.h>
#include <linux/workqueue.h>
+#include "pcs-xpcs.h"
-#define SYNOPSYS_XPCS_USXGMII_ID 0x7996ced0
-#define SYNOPSYS_XPCS_10GKR_ID 0x7996ced0
-#define SYNOPSYS_XPCS_XLGMII_ID 0x7996ced0
-#define SYNOPSYS_XPCS_SGMII_ID 0x7996ced0
-#define SYNOPSYS_XPCS_MASK 0xffffffff
-
-/* Vendor regs access */
-#define DW_VENDOR BIT(15)
-
-/* VR_XS_PCS */
-#define DW_USXGMII_RST BIT(10)
-#define DW_USXGMII_EN BIT(9)
-#define DW_VR_XS_PCS_DIG_STS 0x0010
-#define DW_RXFIFO_ERR GENMASK(6, 5)
-
-/* SR_MII */
-#define DW_USXGMII_FULL BIT(8)
-#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5))
-#define DW_USXGMII_10000 (BIT(13) | BIT(6))
-#define DW_USXGMII_5000 (BIT(13) | BIT(5))
-#define DW_USXGMII_2500 (BIT(5))
-#define DW_USXGMII_1000 (BIT(6))
-#define DW_USXGMII_100 (BIT(13))
-#define DW_USXGMII_10 (0)
-
-/* SR_AN */
-#define DW_SR_AN_ADV1 0x10
-#define DW_SR_AN_ADV2 0x11
-#define DW_SR_AN_ADV3 0x12
-#define DW_SR_AN_LP_ABL1 0x13
-#define DW_SR_AN_LP_ABL2 0x14
-#define DW_SR_AN_LP_ABL3 0x15
-
-/* Clause 73 Defines */
-/* AN_LP_ABL1 */
-#define DW_C73_PAUSE BIT(10)
-#define DW_C73_ASYM_PAUSE BIT(11)
-#define DW_C73_AN_ADV_SF 0x1
-/* AN_LP_ABL2 */
-#define DW_C73_1000KX BIT(5)
-#define DW_C73_10000KX4 BIT(6)
-#define DW_C73_10000KR BIT(7)
-/* AN_LP_ABL3 */
-#define DW_C73_2500KX BIT(0)
-#define DW_C73_5000KR BIT(1)
-
-/* Clause 37 Defines */
-/* VR MII MMD registers offsets */
-#define DW_VR_MII_DIG_CTRL1 0x8000
-#define DW_VR_MII_AN_CTRL 0x8001
-#define DW_VR_MII_AN_INTR_STS 0x8002
-/* EEE Mode Control Register */
-#define DW_VR_MII_EEE_MCTRL0 0x8006
-#define DW_VR_MII_EEE_MCTRL1 0x800b
-
-/* VR_MII_DIG_CTRL1 */
-#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
-
-/* VR_MII_AN_CTRL */
-#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3
-#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
-#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
-#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0
-#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1
-#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1)
-#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0
-#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
-
-/* VR_MII_AN_INTR_STS */
-#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
-#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2
-#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
-#define DW_VR_MII_C37_ANSGM_SP_10 0x0
-#define DW_VR_MII_C37_ANSGM_SP_100 0x1
-#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
-#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
-
-/* VR MII EEE Control 0 defines */
-#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */
-#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */
-#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */
-#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */
-#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */
-#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */
-
-#define DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT 8
-#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8)
-
-/* VR MII EEE Control 1 defines */
-#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+#define phylink_pcs_to_xpcs(pl_pcs) \
+ container_of((pl_pcs), struct dw_xpcs, pcs)
static const int xpcs_usxgmii_features[] = {
ETHTOOL_LINK_MODE_Pause_BIT,
@@ -161,96 +74,141 @@ static const int xpcs_sgmii_features[] = {
__ETHTOOL_LINK_MODE_MASK_NBITS,
};
+static const int xpcs_2500basex_features[] = {
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+};
+
static const phy_interface_t xpcs_usxgmii_interfaces[] = {
PHY_INTERFACE_MODE_USXGMII,
- PHY_INTERFACE_MODE_MAX,
};
static const phy_interface_t xpcs_10gkr_interfaces[] = {
PHY_INTERFACE_MODE_10GKR,
- PHY_INTERFACE_MODE_MAX,
};
static const phy_interface_t xpcs_xlgmii_interfaces[] = {
PHY_INTERFACE_MODE_XLGMII,
- PHY_INTERFACE_MODE_MAX,
};
static const phy_interface_t xpcs_sgmii_interfaces[] = {
PHY_INTERFACE_MODE_SGMII,
+};
+
+static const phy_interface_t xpcs_2500basex_interfaces[] = {
+ PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_MAX,
};
-static struct xpcs_id {
- u32 id;
- u32 mask;
+enum {
+ DW_XPCS_USXGMII,
+ DW_XPCS_10GKR,
+ DW_XPCS_XLGMII,
+ DW_XPCS_SGMII,
+ DW_XPCS_2500BASEX,
+ DW_XPCS_INTERFACE_MAX,
+};
+
+struct xpcs_compat {
const int *supported;
const phy_interface_t *interface;
+ int num_interfaces;
int an_mode;
-} xpcs_id_list[] = {
- {
- .id = SYNOPSYS_XPCS_USXGMII_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_usxgmii_features,
- .interface = xpcs_usxgmii_interfaces,
- .an_mode = DW_AN_C73,
- }, {
- .id = SYNOPSYS_XPCS_10GKR_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_10gkr_features,
- .interface = xpcs_10gkr_interfaces,
- .an_mode = DW_AN_C73,
- }, {
- .id = SYNOPSYS_XPCS_XLGMII_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_xlgmii_features,
- .interface = xpcs_xlgmii_interfaces,
- .an_mode = DW_AN_C73,
- }, {
- .id = SYNOPSYS_XPCS_SGMII_ID,
- .mask = SYNOPSYS_XPCS_MASK,
- .supported = xpcs_sgmii_features,
- .interface = xpcs_sgmii_interfaces,
- .an_mode = DW_AN_C37_SGMII,
- },
+ int (*pma_config)(struct dw_xpcs *xpcs);
};
-static int xpcs_read(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+struct xpcs_id {
+ u32 id;
+ u32 mask;
+ const struct xpcs_compat *compat;
+};
+
+static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id,
+ phy_interface_t interface)
{
- u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+ int i, j;
+
+ for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
+ const struct xpcs_compat *compat = &id->compat[i];
+
+ for (j = 0; j < compat->num_interfaces; j++)
+ if (compat->interface[j] == interface)
+ return compat;
+ }
- return mdiobus_read(xpcs->bus, xpcs->addr, reg_addr);
+ return NULL;
}
-static int xpcs_write(struct mdio_xpcs_args *xpcs, int dev, u32 reg, u16 val)
+int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface)
{
- u32 reg_addr = MII_ADDR_C45 | dev << 16 | reg;
+ const struct xpcs_compat *compat;
+
+ compat = xpcs_find_compat(xpcs->id, interface);
+ if (!compat)
+ return -ENODEV;
- return mdiobus_write(xpcs->bus, xpcs->addr, reg_addr, val);
+ return compat->an_mode;
}
+EXPORT_SYMBOL_GPL(xpcs_get_an_mode);
-static int xpcs_read_vendor(struct mdio_xpcs_args *xpcs, int dev, u32 reg)
+static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat,
+ enum ethtool_link_mode_bit_indices linkmode)
+{
+ int i;
+
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ if (compat->supported[i] == linkmode)
+ return true;
+
+ return false;
+}
+
+#define xpcs_linkmode_supported(compat, mode) \
+ __xpcs_linkmode_supported(compat, ETHTOOL_LINK_MODE_ ## mode ## _BIT)
+
+int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg)
+{
+ u32 reg_addr = mdiobus_c45_addr(dev, reg);
+ struct mii_bus *bus = xpcs->mdiodev->bus;
+ int addr = xpcs->mdiodev->addr;
+
+ return mdiobus_read(bus, addr, reg_addr);
+}
+
+int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
+{
+ u32 reg_addr = mdiobus_c45_addr(dev, reg);
+ struct mii_bus *bus = xpcs->mdiodev->bus;
+ int addr = xpcs->mdiodev->addr;
+
+ return mdiobus_write(bus, addr, reg_addr, val);
+}
+
+static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
{
return xpcs_read(xpcs, dev, DW_VENDOR | reg);
}
-static int xpcs_write_vendor(struct mdio_xpcs_args *xpcs, int dev, int reg,
+static int xpcs_write_vendor(struct dw_xpcs *xpcs, int dev, int reg,
u16 val)
{
return xpcs_write(xpcs, dev, DW_VENDOR | reg, val);
}
-static int xpcs_read_vpcs(struct mdio_xpcs_args *xpcs, int reg)
+static int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg)
{
return xpcs_read_vendor(xpcs, MDIO_MMD_PCS, reg);
}
-static int xpcs_write_vpcs(struct mdio_xpcs_args *xpcs, int reg, u16 val)
+static int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val)
{
return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
}
-static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
+static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev)
{
/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
unsigned int retries = 12;
@@ -266,15 +224,17 @@ static int xpcs_poll_reset(struct mdio_xpcs_args *xpcs, int dev)
return (ret & MDIO_CTRL1_RESET) ? -ETIMEDOUT : 0;
}
-static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs)
+static int xpcs_soft_reset(struct dw_xpcs *xpcs,
+ const struct xpcs_compat *compat)
{
int ret, dev;
- switch (xpcs->an_mode) {
+ switch (compat->an_mode) {
case DW_AN_C73:
dev = MDIO_MMD_PCS;
break;
case DW_AN_C37_SGMII:
+ case DW_2500BASEX:
dev = MDIO_MMD_VEND2;
break;
default:
@@ -291,10 +251,10 @@ static int xpcs_soft_reset(struct mdio_xpcs_args *xpcs)
#define xpcs_warn(__xpcs, __state, __args...) \
({ \
if ((__state)->link) \
- dev_warn(&(__xpcs)->bus->dev, ##__args); \
+ dev_warn(&(__xpcs)->mdiodev->dev, ##__args); \
})
-static int xpcs_read_fault_c73(struct mdio_xpcs_args *xpcs,
+static int xpcs_read_fault_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int ret;
@@ -345,7 +305,7 @@ static int xpcs_read_fault_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_read_link_c73(struct mdio_xpcs_args *xpcs, bool an)
+static int xpcs_read_link_c73(struct dw_xpcs *xpcs, bool an)
{
bool link = true;
int ret;
@@ -385,7 +345,7 @@ static int xpcs_get_max_usxgmii_speed(const unsigned long *supported)
return max;
}
-static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
+static void xpcs_config_usxgmii(struct dw_xpcs *xpcs, int speed)
{
int ret, speed_sel;
@@ -410,36 +370,44 @@ static int xpcs_config_usxgmii(struct mdio_xpcs_args *xpcs, int speed)
break;
default:
/* Nothing to do here */
- return -EINVAL;
+ return;
}
ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
if (ret < 0)
- return ret;
+ goto out;
ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_EN);
if (ret < 0)
- return ret;
+ goto out;
ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1);
if (ret < 0)
- return ret;
+ goto out;
ret &= ~DW_USXGMII_SS_MASK;
ret |= speed_sel | DW_USXGMII_FULL;
ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, ret);
if (ret < 0)
- return ret;
+ goto out;
ret = xpcs_read_vpcs(xpcs, MDIO_CTRL1);
if (ret < 0)
- return ret;
+ goto out;
+
+ ret = xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
+ if (ret < 0)
+ goto out;
- return xpcs_write_vpcs(xpcs, MDIO_CTRL1, ret | DW_USXGMII_RST);
+ return;
+
+out:
+ pr_err("%s: XPCS access returned %pe\n", __func__, ERR_PTR(ret));
}
-static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
+ const struct xpcs_compat *compat)
{
int ret, adv;
@@ -451,7 +419,7 @@ static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
/* SR_AN_ADV3 */
adv = 0;
- if (phylink_test(xpcs->supported, 2500baseX_Full))
+ if (xpcs_linkmode_supported(compat, 2500baseX_Full))
adv |= DW_C73_2500KX;
/* TODO: 5000baseKR */
@@ -462,11 +430,11 @@ static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
/* SR_AN_ADV2 */
adv = 0;
- if (phylink_test(xpcs->supported, 1000baseKX_Full))
+ if (xpcs_linkmode_supported(compat, 1000baseKX_Full))
adv |= DW_C73_1000KX;
- if (phylink_test(xpcs->supported, 10000baseKX4_Full))
+ if (xpcs_linkmode_supported(compat, 10000baseKX4_Full))
adv |= DW_C73_10000KX4;
- if (phylink_test(xpcs->supported, 10000baseKR_Full))
+ if (xpcs_linkmode_supported(compat, 10000baseKR_Full))
adv |= DW_C73_10000KR;
ret = xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV2, adv);
@@ -475,19 +443,20 @@ static int _xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
/* SR_AN_ADV1 */
adv = DW_C73_AN_ADV_SF;
- if (phylink_test(xpcs->supported, Pause))
+ if (xpcs_linkmode_supported(compat, Pause))
adv |= DW_C73_PAUSE;
- if (phylink_test(xpcs->supported, Asym_Pause))
+ if (xpcs_linkmode_supported(compat, Asym_Pause))
adv |= DW_C73_ASYM_PAUSE;
return xpcs_write(xpcs, MDIO_MMD_AN, DW_SR_AN_ADV1, adv);
}
-static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
+static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
+ const struct xpcs_compat *compat)
{
int ret;
- ret = _xpcs_config_aneg_c73(xpcs);
+ ret = _xpcs_config_aneg_c73(xpcs, compat);
if (ret < 0)
return ret;
@@ -500,8 +469,9 @@ static int xpcs_config_aneg_c73(struct mdio_xpcs_args *xpcs)
return xpcs_write(xpcs, MDIO_MMD_AN, MDIO_CTRL1, ret);
}
-static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state,
+ const struct xpcs_compat *compat)
{
int ret;
@@ -516,7 +486,7 @@ static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs,
/* Check if Aneg outcome is valid */
if (!(ret & DW_C73_AN_ADV_SF)) {
- xpcs_config_aneg_c73(xpcs);
+ xpcs_config_aneg_c73(xpcs, compat);
return 0;
}
@@ -526,7 +496,7 @@ static int xpcs_aneg_done_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_read_lpa_c73(struct mdio_xpcs_args *xpcs,
+static int xpcs_read_lpa_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int ret;
@@ -575,7 +545,7 @@ static int xpcs_read_lpa_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static void xpcs_resolve_lpa_c73(struct mdio_xpcs_args *xpcs,
+static void xpcs_resolve_lpa_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int max_speed = xpcs_get_max_usxgmii_speed(state->lp_advertising);
@@ -585,7 +555,7 @@ static void xpcs_resolve_lpa_c73(struct mdio_xpcs_args *xpcs,
state->duplex = DUPLEX_FULL;
}
-static int xpcs_get_max_xlgmii_speed(struct mdio_xpcs_args *xpcs,
+static int xpcs_get_max_xlgmii_speed(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
unsigned long *adv = state->advertising;
@@ -639,7 +609,7 @@ static int xpcs_get_max_xlgmii_speed(struct mdio_xpcs_args *xpcs,
return speed;
}
-static void xpcs_resolve_pma(struct mdio_xpcs_args *xpcs,
+static void xpcs_resolve_pma(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
state->pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
@@ -658,17 +628,37 @@ static void xpcs_resolve_pma(struct mdio_xpcs_args *xpcs,
}
}
-static int xpcs_validate(struct mdio_xpcs_args *xpcs,
- unsigned long *supported,
- struct phylink_link_state *state)
+void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported,
+ struct phylink_link_state *state)
{
- linkmode_and(supported, supported, xpcs->supported);
- linkmode_and(state->advertising, state->advertising, xpcs->supported);
- return 0;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported);
+ const struct xpcs_compat *compat;
+ int i;
+
+ /* phylink expects us to report all supported modes with
+ * PHY_INTERFACE_MODE_NA, just don't limit the supported and
+ * advertising masks and exit.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_NA)
+ return;
+
+ bitmap_zero(xpcs_supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ compat = xpcs_find_compat(xpcs->id, state->interface);
+
+ /* Populate the supported link modes for this
+ * PHY interface type
+ */
+ if (compat)
+ for (i = 0; compat->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
+ set_bit(compat->supported[i], xpcs_supported);
+
+ linkmode_and(supported, supported, xpcs_supported);
+ linkmode_and(state->advertising, state->advertising, xpcs_supported);
}
+EXPORT_SYMBOL_GPL(xpcs_validate);
-static int xpcs_config_eee(struct mdio_xpcs_args *xpcs, int mult_fact_100ns,
- int enable)
+int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable)
{
int ret;
@@ -699,8 +689,9 @@ static int xpcs_config_eee(struct mdio_xpcs_args *xpcs, int mult_fact_100ns,
ret |= DW_VR_MII_EEE_TRN_LPI;
return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_EEE_MCTRL1, ret);
}
+EXPORT_SYMBOL_GPL(xpcs_config_eee);
-static int xpcs_config_aneg_c37_sgmii(struct mdio_xpcs_args *xpcs)
+static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, unsigned int mode)
{
int ret;
@@ -736,26 +727,61 @@ static int xpcs_config_aneg_c37_sgmii(struct mdio_xpcs_args *xpcs)
if (ret < 0)
return ret;
- ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ if (phylink_autoneg_inband(mode))
+ ret |= DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ else
+ ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
}
-static int xpcs_config(struct mdio_xpcs_args *xpcs,
- const struct phylink_link_state *state)
+static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
{
int ret;
- switch (xpcs->an_mode) {
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1);
+ if (ret < 0)
+ return ret;
+ ret |= DW_VR_MII_DIG_CTRL1_2G5_EN;
+ ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
+ if (ret < 0)
+ return ret;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL);
+ if (ret < 0)
+ return ret;
+ ret &= ~AN_CL37_EN;
+ ret |= SGMII_SPEED_SS6;
+ ret &= ~SGMII_SPEED_SS13;
+ return xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_MMD_CTRL, ret);
+}
+
+int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
+ unsigned int mode)
+{
+ const struct xpcs_compat *compat;
+ int ret;
+
+ compat = xpcs_find_compat(xpcs->id, interface);
+ if (!compat)
+ return -ENODEV;
+
+ switch (compat->an_mode) {
case DW_AN_C73:
- if (state->an_enabled) {
- ret = xpcs_config_aneg_c73(xpcs);
+ if (phylink_autoneg_inband(mode)) {
+ ret = xpcs_config_aneg_c73(xpcs, compat);
if (ret)
return ret;
}
break;
case DW_AN_C37_SGMII:
- ret = xpcs_config_aneg_c37_sgmii(xpcs);
+ ret = xpcs_config_aneg_c37_sgmii(xpcs, mode);
+ if (ret)
+ return ret;
+ break;
+ case DW_2500BASEX:
+ ret = xpcs_config_2500basex(xpcs);
if (ret)
return ret;
break;
@@ -763,11 +789,29 @@ static int xpcs_config(struct mdio_xpcs_args *xpcs,
return -1;
}
+ if (compat->pma_config) {
+ ret = compat->pma_config(xpcs);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
+EXPORT_SYMBOL_GPL(xpcs_do_config);
+
+static int xpcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
+ return xpcs_do_config(xpcs, interface, mode);
+}
-static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
+ struct phylink_link_state *state,
+ const struct xpcs_compat *compat)
{
int ret;
@@ -777,16 +821,16 @@ static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs,
/* ... and then we check the faults. */
ret = xpcs_read_fault_c73(xpcs, state);
if (ret) {
- ret = xpcs_soft_reset(xpcs);
+ ret = xpcs_soft_reset(xpcs, compat);
if (ret)
return ret;
state->link = 0;
- return xpcs_config(xpcs, state);
+ return xpcs_do_config(xpcs, state->interface, MLO_AN_INBAND);
}
- if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state)) {
+ if (state->an_enabled && xpcs_aneg_done_c73(xpcs, state, compat)) {
state->an_complete = true;
xpcs_read_lpa_c73(xpcs, state);
xpcs_resolve_lpa_c73(xpcs, state);
@@ -799,7 +843,7 @@ static int xpcs_get_state_c73(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_get_state_c37_sgmii(struct mdio_xpcs_args *xpcs,
+static int xpcs_get_state_c37_sgmii(struct dw_xpcs *xpcs,
struct phylink_link_state *state)
{
int ret;
@@ -840,39 +884,81 @@ static int xpcs_get_state_c37_sgmii(struct mdio_xpcs_args *xpcs,
return 0;
}
-static int xpcs_get_state(struct mdio_xpcs_args *xpcs,
- struct phylink_link_state *state)
+static void xpcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+ const struct xpcs_compat *compat;
int ret;
- switch (xpcs->an_mode) {
+ compat = xpcs_find_compat(xpcs->id, state->interface);
+ if (!compat)
+ return;
+
+ switch (compat->an_mode) {
case DW_AN_C73:
- ret = xpcs_get_state_c73(xpcs, state);
- if (ret)
- return ret;
+ ret = xpcs_get_state_c73(xpcs, state, compat);
+ if (ret) {
+ pr_err("xpcs_get_state_c73 returned %pe\n",
+ ERR_PTR(ret));
+ return;
+ }
break;
case DW_AN_C37_SGMII:
ret = xpcs_get_state_c37_sgmii(xpcs, state);
- if (ret)
- return ret;
+ if (ret) {
+ pr_err("xpcs_get_state_c37_sgmii returned %pe\n",
+ ERR_PTR(ret));
+ }
break;
default:
- return -1;
+ return;
}
+}
- return 0;
+static void xpcs_link_up_sgmii(struct dw_xpcs *xpcs, unsigned int mode,
+ int speed, int duplex)
+{
+ int val, ret;
+
+ if (phylink_autoneg_inband(mode))
+ return;
+
+ switch (speed) {
+ case SPEED_1000:
+ val = BMCR_SPEED1000;
+ break;
+ case SPEED_100:
+ val = BMCR_SPEED100;
+ break;
+ case SPEED_10:
+ val = BMCR_SPEED10;
+ break;
+ default:
+ return;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ val |= BMCR_FULLDPLX;
+
+ ret = xpcs_write(xpcs, MDIO_MMD_VEND2, MDIO_CTRL1, val);
+ if (ret)
+ pr_err("%s: xpcs_write returned %pe\n", __func__, ERR_PTR(ret));
}
-static int xpcs_link_up(struct mdio_xpcs_args *xpcs, int speed,
- phy_interface_t interface)
+void xpcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex)
{
+ struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
+
if (interface == PHY_INTERFACE_MODE_USXGMII)
return xpcs_config_usxgmii(xpcs, speed);
-
- return 0;
+ if (interface == PHY_INTERFACE_MODE_SGMII)
+ return xpcs_link_up_sgmii(xpcs, mode, speed, duplex);
}
+EXPORT_SYMBOL_GPL(xpcs_link_up);
-static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
+static u32 xpcs_get_id(struct dw_xpcs *xpcs)
{
int ret;
u32 id;
@@ -888,8 +974,10 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
if (ret < 0)
return 0xffffffff;
- /* If Device IDs are not all zeros, we found C73 AN-type device */
- if (id | ret)
+ /* If Device IDs are not all zeros or all ones,
+ * we found C73 AN-type device
+ */
+ if ((id | ret) && (id | ret) != 0xffffffff)
return id | ret;
/* Next, search C37 PCS using Vendor-Specific MII MMD */
@@ -910,61 +998,141 @@ static u32 xpcs_get_id(struct mdio_xpcs_args *xpcs)
return 0xffffffff;
}
-static bool xpcs_check_features(struct mdio_xpcs_args *xpcs,
- struct xpcs_id *match,
- phy_interface_t interface)
-{
- int i;
-
- for (i = 0; match->interface[i] != PHY_INTERFACE_MODE_MAX; i++) {
- if (match->interface[i] == interface)
- break;
- }
+static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_USXGMII] = {
+ .supported = xpcs_usxgmii_features,
+ .interface = xpcs_usxgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_usxgmii_interfaces),
+ .an_mode = DW_AN_C73,
+ },
+ [DW_XPCS_10GKR] = {
+ .supported = xpcs_10gkr_features,
+ .interface = xpcs_10gkr_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_10gkr_interfaces),
+ .an_mode = DW_AN_C73,
+ },
+ [DW_XPCS_XLGMII] = {
+ .supported = xpcs_xlgmii_features,
+ .interface = xpcs_xlgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_xlgmii_interfaces),
+ .an_mode = DW_AN_C73,
+ },
+ [DW_XPCS_SGMII] = {
+ .supported = xpcs_sgmii_features,
+ .interface = xpcs_sgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
+ .an_mode = DW_AN_C37_SGMII,
+ },
+ [DW_XPCS_2500BASEX] = {
+ .supported = xpcs_2500basex_features,
+ .interface = xpcs_2500basex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_2500basex_features),
+ .an_mode = DW_2500BASEX,
+ },
+};
- if (match->interface[i] == PHY_INTERFACE_MODE_MAX)
- return false;
+static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_SGMII] = {
+ .supported = xpcs_sgmii_features,
+ .interface = xpcs_sgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
+ .an_mode = DW_AN_C37_SGMII,
+ .pma_config = nxp_sja1105_sgmii_pma_config,
+ },
+};
- for (i = 0; match->supported[i] != __ETHTOOL_LINK_MODE_MASK_NBITS; i++)
- set_bit(match->supported[i], xpcs->supported);
+static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+ [DW_XPCS_SGMII] = {
+ .supported = xpcs_sgmii_features,
+ .interface = xpcs_sgmii_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_sgmii_interfaces),
+ .an_mode = DW_AN_C37_SGMII,
+ .pma_config = nxp_sja1110_sgmii_pma_config,
+ },
+ [DW_XPCS_2500BASEX] = {
+ .supported = xpcs_2500basex_features,
+ .interface = xpcs_2500basex_interfaces,
+ .num_interfaces = ARRAY_SIZE(xpcs_2500basex_interfaces),
+ .an_mode = DW_2500BASEX,
+ .pma_config = nxp_sja1110_2500basex_pma_config,
+ },
+};
- xpcs->an_mode = match->an_mode;
+static const struct xpcs_id xpcs_id_list[] = {
+ {
+ .id = SYNOPSYS_XPCS_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .compat = synopsys_xpcs_compat,
+ }, {
+ .id = NXP_SJA1105_XPCS_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .compat = nxp_sja1105_xpcs_compat,
+ }, {
+ .id = NXP_SJA1110_XPCS_ID,
+ .mask = SYNOPSYS_XPCS_MASK,
+ .compat = nxp_sja1110_xpcs_compat,
+ },
+};
- return true;
-}
+static const struct phylink_pcs_ops xpcs_phylink_ops = {
+ .pcs_config = xpcs_config,
+ .pcs_get_state = xpcs_get_state,
+ .pcs_link_up = xpcs_link_up,
+};
-static int xpcs_probe(struct mdio_xpcs_args *xpcs, phy_interface_t interface)
+struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface)
{
- u32 xpcs_id = xpcs_get_id(xpcs);
- struct xpcs_id *match = NULL;
- int i;
+ struct dw_xpcs *xpcs;
+ u32 xpcs_id;
+ int i, ret;
+
+ xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
+ if (!xpcs)
+ return NULL;
+
+ xpcs->mdiodev = mdiodev;
+
+ xpcs_id = xpcs_get_id(xpcs);
for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
- struct xpcs_id *entry = &xpcs_id_list[i];
+ const struct xpcs_id *entry = &xpcs_id_list[i];
+ const struct xpcs_compat *compat;
+
+ if ((xpcs_id & entry->mask) != entry->id)
+ continue;
- if ((xpcs_id & entry->mask) == entry->id) {
- match = entry;
+ xpcs->id = entry;
- if (xpcs_check_features(xpcs, match, interface))
- return xpcs_soft_reset(xpcs);
+ compat = xpcs_find_compat(entry, interface);
+ if (!compat) {
+ ret = -ENODEV;
+ goto out;
}
+
+ xpcs->pcs.ops = &xpcs_phylink_ops;
+ xpcs->pcs.poll = true;
+
+ ret = xpcs_soft_reset(xpcs, compat);
+ if (ret)
+ goto out;
+
+ return xpcs;
}
- return -ENODEV;
-}
+ ret = -ENODEV;
-static struct mdio_xpcs_ops xpcs_ops = {
- .validate = xpcs_validate,
- .config = xpcs_config,
- .get_state = xpcs_get_state,
- .link_up = xpcs_link_up,
- .probe = xpcs_probe,
- .config_eee = xpcs_config_eee,
-};
+out:
+ kfree(xpcs);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(xpcs_create);
-struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
+void xpcs_destroy(struct dw_xpcs *xpcs)
{
- return &xpcs_ops;
+ kfree(xpcs);
}
-EXPORT_SYMBOL_GPL(mdio_xpcs_get_ops);
+EXPORT_SYMBOL_GPL(xpcs_destroy);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
new file mode 100644
index 000000000000..35651d32a224
--- /dev/null
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XPCS helpers
+ *
+ * Author: Jose Abreu <Jose.Abreu@synopsys.com>
+ */
+
+#define SYNOPSYS_XPCS_ID 0x7996ced0
+#define SYNOPSYS_XPCS_MASK 0xffffffff
+
+/* Vendor regs access */
+#define DW_VENDOR BIT(15)
+
+/* VR_XS_PCS */
+#define DW_USXGMII_RST BIT(10)
+#define DW_USXGMII_EN BIT(9)
+#define DW_VR_XS_PCS_DIG_STS 0x0010
+#define DW_RXFIFO_ERR GENMASK(6, 5)
+
+/* SR_MII */
+#define DW_USXGMII_FULL BIT(8)
+#define DW_USXGMII_SS_MASK (BIT(13) | BIT(6) | BIT(5))
+#define DW_USXGMII_10000 (BIT(13) | BIT(6))
+#define DW_USXGMII_5000 (BIT(13) | BIT(5))
+#define DW_USXGMII_2500 (BIT(5))
+#define DW_USXGMII_1000 (BIT(6))
+#define DW_USXGMII_100 (BIT(13))
+#define DW_USXGMII_10 (0)
+
+/* SR_AN */
+#define DW_SR_AN_ADV1 0x10
+#define DW_SR_AN_ADV2 0x11
+#define DW_SR_AN_ADV3 0x12
+#define DW_SR_AN_LP_ABL1 0x13
+#define DW_SR_AN_LP_ABL2 0x14
+#define DW_SR_AN_LP_ABL3 0x15
+
+/* Clause 73 Defines */
+/* AN_LP_ABL1 */
+#define DW_C73_PAUSE BIT(10)
+#define DW_C73_ASYM_PAUSE BIT(11)
+#define DW_C73_AN_ADV_SF 0x1
+/* AN_LP_ABL2 */
+#define DW_C73_1000KX BIT(5)
+#define DW_C73_10000KX4 BIT(6)
+#define DW_C73_10000KR BIT(7)
+/* AN_LP_ABL3 */
+#define DW_C73_2500KX BIT(0)
+#define DW_C73_5000KR BIT(1)
+
+/* Clause 37 Defines */
+/* VR MII MMD registers offsets */
+#define DW_VR_MII_MMD_CTRL 0x0000
+#define DW_VR_MII_DIG_CTRL1 0x8000
+#define DW_VR_MII_AN_CTRL 0x8001
+#define DW_VR_MII_AN_INTR_STS 0x8002
+/* Enable 2.5G Mode */
+#define DW_VR_MII_DIG_CTRL1_2G5_EN BIT(2)
+/* EEE Mode Control Register */
+#define DW_VR_MII_EEE_MCTRL0 0x8006
+#define DW_VR_MII_EEE_MCTRL1 0x800b
+#define DW_VR_MII_DIG_CTRL2 0x80e1
+
+/* VR_MII_DIG_CTRL1 */
+#define DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW BIT(9)
+
+/* VR_MII_DIG_CTRL2 */
+#define DW_VR_MII_DIG_CTRL2_TX_POL_INV BIT(4)
+#define DW_VR_MII_DIG_CTRL2_RX_POL_INV BIT(0)
+
+/* VR_MII_AN_CTRL */
+#define DW_VR_MII_AN_CTRL_TX_CONFIG_SHIFT 3
+#define DW_VR_MII_TX_CONFIG_MASK BIT(3)
+#define DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII 0x1
+#define DW_VR_MII_TX_CONFIG_MAC_SIDE_SGMII 0x0
+#define DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT 1
+#define DW_VR_MII_PCS_MODE_MASK GENMASK(2, 1)
+#define DW_VR_MII_PCS_MODE_C37_1000BASEX 0x0
+#define DW_VR_MII_PCS_MODE_C37_SGMII 0x2
+
+/* VR_MII_AN_INTR_STS */
+#define DW_VR_MII_AN_STS_C37_ANSGM_FD BIT(1)
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP_SHIFT 2
+#define DW_VR_MII_AN_STS_C37_ANSGM_SP GENMASK(3, 2)
+#define DW_VR_MII_C37_ANSGM_SP_10 0x0
+#define DW_VR_MII_C37_ANSGM_SP_100 0x1
+#define DW_VR_MII_C37_ANSGM_SP_1000 0x2
+#define DW_VR_MII_C37_ANSGM_SP_LNKSTS BIT(4)
+
+/* SR MII MMD Control defines */
+#define AN_CL37_EN BIT(12) /* Enable Clause 37 auto-nego */
+#define SGMII_SPEED_SS13 BIT(13) /* SGMII speed along with SS6 */
+#define SGMII_SPEED_SS6 BIT(6) /* SGMII speed along with SS13 */
+
+/* VR MII EEE Control 0 defines */
+#define DW_VR_MII_EEE_LTX_EN BIT(0) /* LPI Tx Enable */
+#define DW_VR_MII_EEE_LRX_EN BIT(1) /* LPI Rx Enable */
+#define DW_VR_MII_EEE_TX_QUIET_EN BIT(2) /* Tx Quiet Enable */
+#define DW_VR_MII_EEE_RX_QUIET_EN BIT(3) /* Rx Quiet Enable */
+#define DW_VR_MII_EEE_TX_EN_CTRL BIT(4) /* Tx Control Enable */
+#define DW_VR_MII_EEE_RX_EN_CTRL BIT(7) /* Rx Control Enable */
+
+#define DW_VR_MII_EEE_MULT_FACT_100NS_SHIFT 8
+#define DW_VR_MII_EEE_MULT_FACT_100NS GENMASK(11, 8)
+
+/* VR MII EEE Control 1 defines */
+#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+
+int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
+int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val);
+
+int nxp_sja1105_sgmii_pma_config(struct dw_xpcs *xpcs);
+int nxp_sja1110_sgmii_pma_config(struct dw_xpcs *xpcs);
+int nxp_sja1110_2500basex_pma_config(struct dw_xpcs *xpcs);
diff --git a/drivers/net/phy/ax88796b.c b/drivers/net/phy/ax88796b.c
index 79bf7ef1fcfd..457896337505 100644
--- a/drivers/net/phy/ax88796b.c
+++ b/drivers/net/phy/ax88796b.c
@@ -10,6 +10,8 @@
#include <linux/mii.h>
#include <linux/phy.h>
+#define PHY_ID_ASIX_AX88772A 0x003b1861
+#define PHY_ID_ASIX_AX88772C 0x003b1881
#define PHY_ID_ASIX_AX88796B 0x003b1841
MODULE_DESCRIPTION("Asix PHY driver");
@@ -39,7 +41,75 @@ static int asix_soft_reset(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
-static struct phy_driver asix_driver[] = { {
+/* AX88772A is not working properly with some old switches (NETGEAR EN 108TP):
+ * after autoneg is done and the link status is reported as active, the MII_LPA
+ * register is 0. This issue is not reproducible on AX88772C.
+ */
+static int asix_ax88772a_read_status(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ if (!phydev->link)
+ return 0;
+
+ /* If MII_LPA is 0, phy_resolve_aneg_linkmode() will fail to resolve
+ * linkmode so use MII_BMCR as default values.
+ */
+ val = phy_read(phydev, MII_BMCR);
+ if (val < 0)
+ return val;
+
+ if (val & BMCR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ if (val & BMCR_FULLDPLX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ ret = genphy_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_linkmode(phydev);
+
+ return 0;
+}
+
+static void asix_ax88772a_link_change_notify(struct phy_device *phydev)
+{
+ /* Reset PHY, otherwise MII_LPA will provide outdated information.
+ * This issue is reproducible only with some link partner PHYs
+ */
+ if (phydev->state == PHY_NOLINK && phydev->drv->soft_reset)
+ phydev->drv->soft_reset(phydev);
+}
+
+static struct phy_driver asix_driver[] = {
+{
+ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A),
+ .name = "Asix Electronics AX88772A",
+ .flags = PHY_IS_INTERNAL,
+ .read_status = asix_ax88772a_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = asix_soft_reset,
+ .link_change_notify = asix_ax88772a_link_change_notify,
+}, {
+ PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C),
+ .name = "Asix Electronics AX88772C",
+ .flags = PHY_IS_INTERNAL,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .soft_reset = asix_soft_reset,
+}, {
.phy_id = PHY_ID_ASIX_AX88796B,
.name = "Asix Electronics AX88796B",
.phy_id_mask = 0xfffffff0,
@@ -50,6 +120,8 @@ static struct phy_driver asix_driver[] = { {
module_phy_driver(asix_driver);
static struct mdio_device_id __maybe_unused asix_tbl[] = {
+ { PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772A) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_ASIX_AX88772C) },
{ PHY_ID_ASIX_AX88796B, 0xfffffff0 },
{ }
};
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index 4ac8fd190e9d..313563482690 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -54,9 +54,9 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
u16 reg = be32_to_cpup(paddr++);
u16 mask = be32_to_cpup(paddr++);
u16 val_bits = be32_to_cpup(paddr++);
- int val;
u32 regnum = mdiobus_c45_addr(devid, reg);
- val = 0;
+ int val = 0;
+
if (mask) {
val = phy_read(phydev, regnum);
if (val < 0) {
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index a3b3842c67e5..4ac4bce1bf32 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -43,10 +43,10 @@
#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
#define MII_DM9161_INTR_SPD_CHANGE 0x0008
#define MII_DM9161_INTR_LINK_CHANGE 0x0004
-#define MII_DM9161_INTR_INIT 0x0000
+#define MII_DM9161_INTR_INIT 0x0000
#define MII_DM9161_INTR_STOP \
-(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
- | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+ (MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK | \
+ MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
#define MII_DM9161_INTR_CHANGE \
(MII_DM9161_INTR_DPLX_CHANGE | \
MII_DM9161_INTR_SPD_CHANGE | \
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 0d79f68f301c..705c16675b80 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -170,9 +170,9 @@ static ushort gpio_tab[GPIO_TABLE_SIZE] = {
module_param(chosen_phy, int, 0444);
module_param_array(gpio_tab, ushort, NULL, 0444);
-MODULE_PARM_DESC(chosen_phy, \
+MODULE_PARM_DESC(chosen_phy,
"The address of the PHY to use for the ancillary clock features");
-MODULE_PARM_DESC(gpio_tab, \
+MODULE_PARM_DESC(gpio_tab,
"Which GPIO line to use for which purpose: cal,perout,extts1,...,extts6");
static void dp83640_gpio_defaults(struct ptp_pin_desc *pd)
@@ -615,6 +615,7 @@ static void prune_rx_ts(struct dp83640_private *dp83640)
static void enable_broadcast(struct phy_device *phydev, int init_page, int on)
{
int val;
+
phy_write(phydev, PAGESEL, 0);
val = phy_read(phydev, PHYCR2);
if (on)
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 09e07b902d3a..be1b71d7cab7 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -46,8 +46,8 @@ MODULE_LICENSE("GPL");
static int et1011c_config_aneg(struct phy_device *phydev)
{
- int ctl = 0;
- ctl = phy_read(phydev, MII_BMCR);
+ int ctl = phy_read(phydev, MII_BMCR);
+
if (ctl < 0)
return ctl;
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 |
@@ -60,9 +60,10 @@ static int et1011c_config_aneg(struct phy_device *phydev)
static int et1011c_read_status(struct phy_device *phydev)
{
+ static int speed;
int ret;
u32 val;
- static int speed;
+
ret = genphy_read_status(phydev);
if (speed != phydev->speed) {
@@ -72,10 +73,10 @@ static int et1011c_read_status(struct phy_device *phydev)
ET1011C_GIGABIT_SPEED) {
val = phy_read(phydev, ET1011C_CONFIG_REG);
val &= ~ET1011C_TX_FIFO_MASK;
- phy_write(phydev, ET1011C_CONFIG_REG, val\
- | ET1011C_GMII_INTERFACE\
- | ET1011C_SYS_CLK_EN\
- | ET1011C_TX_FIFO_DEPTH_16);
+ phy_write(phydev, ET1011C_CONFIG_REG, val |
+ ET1011C_GMII_INTERFACE |
+ ET1011C_SYS_CLK_EN |
+ ET1011C_TX_FIFO_DEPTH_16);
}
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 18d81f43f2a8..c65fb5f5d2dc 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -161,8 +161,8 @@ static int fixed_phy_add_gpiod(unsigned int irq, int phy_addr,
}
int fixed_phy_add(unsigned int irq, int phy_addr,
- struct fixed_phy_status *status) {
-
+ struct fixed_phy_status *status)
+{
return fixed_phy_add_gpiod(irq, phy_addr, status, NULL);
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index bde3356a2f86..e3bf827b7959 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -242,8 +242,8 @@ static int lxt973a2_read_status(struct phy_device *phydev)
return lpa;
/* If both registers are equal, it is suspect but not
- * impossible, hence a new try
- */
+ * impossible, hence a new try
+ */
} while (lpa == adv && retry--);
mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e6721c1c26c2..3de93c9f2744 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -367,39 +367,24 @@ static irqreturn_t marvell_handle_interrupt(struct phy_device *phydev)
static int marvell_set_polarity(struct phy_device *phydev, int polarity)
{
- int reg;
- int err;
- int val;
-
- /* get the current settings */
- reg = phy_read(phydev, MII_M1011_PHY_SCR);
- if (reg < 0)
- return reg;
+ u16 val;
- val = reg;
- val &= ~MII_M1011_PHY_SCR_AUTO_CROSS;
switch (polarity) {
case ETH_TP_MDI:
- val |= MII_M1011_PHY_SCR_MDI;
+ val = MII_M1011_PHY_SCR_MDI;
break;
case ETH_TP_MDI_X:
- val |= MII_M1011_PHY_SCR_MDI_X;
+ val = MII_M1011_PHY_SCR_MDI_X;
break;
case ETH_TP_MDI_AUTO:
case ETH_TP_MDI_INVALID:
default:
- val |= MII_M1011_PHY_SCR_AUTO_CROSS;
+ val = MII_M1011_PHY_SCR_AUTO_CROSS;
break;
}
- if (val != reg) {
- /* Set the new polarity value in the register */
- err = phy_write(phydev, MII_M1011_PHY_SCR, val);
- if (err)
- return err;
- }
-
- return val != reg;
+ return phy_modify_changed(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_AUTO_CROSS, val);
}
static int marvell_config_aneg(struct phy_device *phydev)
@@ -824,14 +809,19 @@ static int m88e1111_config_init_rgmii_delays(struct phy_device *phydev)
{
int delay;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) {
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
delay = MII_M1111_RGMII_RX_DELAY | MII_M1111_RGMII_TX_DELAY;
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
delay = MII_M1111_RGMII_RX_DELAY;
- } else if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
delay = MII_M1111_RGMII_TX_DELAY;
- } else {
+ break;
+ default:
delay = 0;
+ break;
}
return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index dadf75ff3ab9..24665670a89a 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -175,6 +175,7 @@ EXPORT_SYMBOL(mdiobus_alloc_size);
static void mdiobus_release(struct device *d)
{
struct mii_bus *bus = to_mii_bus(d);
+
BUG_ON(bus->state != MDIOBUS_RELEASED &&
/* for compatibility with error handling in drivers */
bus->state != MDIOBUS_ALLOCATED);
@@ -607,7 +608,8 @@ void mdiobus_unregister(struct mii_bus *bus)
struct mdio_device *mdiodev;
int i;
- BUG_ON(bus->state != MDIOBUS_REGISTERED);
+ if (WARN_ON_ONCE(bus->state != MDIOBUS_REGISTERED))
+ return;
bus->state = MDIOBUS_UNREGISTERED;
for (i = 0; i < PHY_MAX_ADDR; i++) {
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 0837319a52d7..c94cb5382dc9 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -77,7 +77,7 @@ int mdio_device_register(struct mdio_device *mdiodev)
{
int err;
- dev_dbg(&mdiodev->dev, "mdio_device_register\n");
+ dev_dbg(&mdiodev->dev, "%s\n", __func__);
err = mdiobus_register_device(mdiodev);
if (err)
@@ -188,7 +188,7 @@ int mdio_driver_register(struct mdio_driver *drv)
struct mdio_driver_common *mdiodrv = &drv->mdiodrv;
int retval;
- pr_debug("mdio_driver_register: %s\n", mdiodrv->driver.name);
+ pr_debug("%s: %s\n", __func__, mdiodrv->driver.name);
mdiodrv->driver.bus = &mdio_bus_type;
mdiodrv->driver.probe = mdio_probe;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index a14a00328fa3..4d53886f7d51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -20,6 +20,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
@@ -38,42 +39,60 @@
/* general Interrupt control/status reg in vendor specific block. */
#define MII_KSZPHY_INTCS 0x1B
-#define KSZPHY_INTCS_JABBER BIT(15)
-#define KSZPHY_INTCS_RECEIVE_ERR BIT(14)
-#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13)
-#define KSZPHY_INTCS_PARELLEL BIT(12)
-#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11)
-#define KSZPHY_INTCS_LINK_DOWN BIT(10)
-#define KSZPHY_INTCS_REMOTE_FAULT BIT(9)
-#define KSZPHY_INTCS_LINK_UP BIT(8)
-#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
+#define KSZPHY_INTCS_JABBER BIT(15)
+#define KSZPHY_INTCS_RECEIVE_ERR BIT(14)
+#define KSZPHY_INTCS_PAGE_RECEIVE BIT(13)
+#define KSZPHY_INTCS_PARELLEL BIT(12)
+#define KSZPHY_INTCS_LINK_PARTNER_ACK BIT(11)
+#define KSZPHY_INTCS_LINK_DOWN BIT(10)
+#define KSZPHY_INTCS_REMOTE_FAULT BIT(9)
+#define KSZPHY_INTCS_LINK_UP BIT(8)
+#define KSZPHY_INTCS_ALL (KSZPHY_INTCS_LINK_UP |\
KSZPHY_INTCS_LINK_DOWN)
-#define KSZPHY_INTCS_LINK_DOWN_STATUS BIT(2)
-#define KSZPHY_INTCS_LINK_UP_STATUS BIT(0)
-#define KSZPHY_INTCS_STATUS (KSZPHY_INTCS_LINK_DOWN_STATUS |\
+#define KSZPHY_INTCS_LINK_DOWN_STATUS BIT(2)
+#define KSZPHY_INTCS_LINK_UP_STATUS BIT(0)
+#define KSZPHY_INTCS_STATUS (KSZPHY_INTCS_LINK_DOWN_STATUS |\
KSZPHY_INTCS_LINK_UP_STATUS)
+/* LinkMD Control/Status */
+#define KSZ8081_LMD 0x1d
+#define KSZ8081_LMD_ENABLE_TEST BIT(15)
+#define KSZ8081_LMD_STAT_NORMAL 0
+#define KSZ8081_LMD_STAT_OPEN 1
+#define KSZ8081_LMD_STAT_SHORT 2
+#define KSZ8081_LMD_STAT_FAIL 3
+#define KSZ8081_LMD_STAT_MASK GENMASK(14, 13)
+/* Short cable (<10 meter) has been detected by LinkMD */
+#define KSZ8081_LMD_SHORT_INDICATOR BIT(12)
+#define KSZ8081_LMD_DELTA_TIME_MASK GENMASK(8, 0)
+
/* PHY Control 1 */
-#define MII_KSZPHY_CTRL_1 0x1e
+#define MII_KSZPHY_CTRL_1 0x1e
+#define KSZ8081_CTRL1_MDIX_STAT BIT(4)
/* PHY Control 2 / PHY Control (if no PHY Control 1) */
-#define MII_KSZPHY_CTRL_2 0x1f
-#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2
+#define MII_KSZPHY_CTRL_2 0x1f
+#define MII_KSZPHY_CTRL MII_KSZPHY_CTRL_2
/* bitmap of PHY register to set interrupt mode */
+#define KSZ8081_CTRL2_HP_MDIX BIT(15)
+#define KSZ8081_CTRL2_MDI_MDI_X_SELECT BIT(14)
+#define KSZ8081_CTRL2_DISABLE_AUTO_MDIX BIT(13)
+#define KSZ8081_CTRL2_FORCE_LINK BIT(11)
+#define KSZ8081_CTRL2_POWER_SAVING BIT(10)
#define KSZPHY_CTRL_INT_ACTIVE_HIGH BIT(9)
#define KSZPHY_RMII_REF_CLK_SEL BIT(7)
/* Write/read to/from extended registers */
-#define MII_KSZPHY_EXTREG 0x0b
-#define KSZPHY_EXTREG_WRITE 0x8000
+#define MII_KSZPHY_EXTREG 0x0b
+#define KSZPHY_EXTREG_WRITE 0x8000
-#define MII_KSZPHY_EXTREG_WRITE 0x0c
-#define MII_KSZPHY_EXTREG_READ 0x0d
+#define MII_KSZPHY_EXTREG_WRITE 0x0c
+#define MII_KSZPHY_EXTREG_READ 0x0d
/* Extended registers */
-#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
-#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
-#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
+#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
+#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
+#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
#define PS_TO_REG 200
@@ -422,6 +441,87 @@ static int ksz8081_config_init(struct phy_device *phydev)
return kszphy_config_init(phydev);
}
+static int ksz8081_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = KSZ8081_CTRL2_DISABLE_AUTO_MDIX;
+ break;
+ case ETH_TP_MDI_X:
+ val = KSZ8081_CTRL2_DISABLE_AUTO_MDIX |
+ KSZ8081_CTRL2_MDI_MDI_X_SELECT;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify(phydev, MII_KSZPHY_CTRL_2,
+ KSZ8081_CTRL2_HP_MDIX |
+ KSZ8081_CTRL2_MDI_MDI_X_SELECT |
+ KSZ8081_CTRL2_DISABLE_AUTO_MDIX,
+ KSZ8081_CTRL2_HP_MDIX | val);
+}
+
+static int ksz8081_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The MDI-X configuration is automatically changed by the PHY after
+ * switching from autoneg off to on. So, take MDI-X configuration under
+ * own control and set it after autoneg configuration was done.
+ */
+ return ksz8081_config_mdix(phydev, phydev->mdix_ctrl);
+}
+
+static int ksz8081_mdix_update(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_KSZPHY_CTRL_2);
+ if (ret < 0)
+ return ret;
+
+ if (ret & KSZ8081_CTRL2_DISABLE_AUTO_MDIX) {
+ if (ret & KSZ8081_CTRL2_MDI_MDI_X_SELECT)
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ }
+
+ ret = phy_read(phydev, MII_KSZPHY_CTRL_1);
+ if (ret < 0)
+ return ret;
+
+ if (ret & KSZ8081_CTRL1_MDIX_STAT)
+ phydev->mdix = ETH_TP_MDI;
+ else
+ phydev->mdix = ETH_TP_MDI_X;
+
+ return 0;
+}
+
+static int ksz8081_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz8081_mdix_update(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
@@ -488,8 +588,7 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
static int ksz9021_config_init(struct phy_device *phydev)
{
- const struct device *dev = &phydev->mdio.dev;
- const struct device_node *of_node = dev->of_node;
+ const struct device_node *of_node;
const struct device *dev_walker;
/* The Micrel driver has a deprecated option to place phy OF
@@ -711,8 +810,7 @@ static int ksz9031_config_rgmii_delay(struct phy_device *phydev)
static int ksz9031_config_init(struct phy_device *phydev)
{
- const struct device *dev = &phydev->mdio.dev;
- const struct device_node *of_node = dev->of_node;
+ const struct device_node *of_node;
static const char *clk_skews[2] = {"rxc-skew-ps", "txc-skew-ps"};
static const char *rx_data_skews[4] = {
"rxd0-skew-ps", "rxd1-skew-ps",
@@ -907,8 +1005,7 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
static int ksz9131_config_init(struct phy_device *phydev)
{
- const struct device *dev = &phydev->mdio.dev;
- struct device_node *of_node = dev->of_node;
+ struct device_node *of_node;
char *clk_skews[2] = {"rxc-skew-psec", "txc-skew-psec"};
char *rx_data_skews[4] = {
"rxd0-skew-psec", "rxd1-skew-psec",
@@ -1048,6 +1145,92 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
return 0;
}
+static int ksz886x_config_mdix(struct phy_device *phydev, u8 ctrl)
+{
+ u16 val;
+
+ switch (ctrl) {
+ case ETH_TP_MDI:
+ val = KSZ886X_BMCR_DISABLE_AUTO_MDIX;
+ break;
+ case ETH_TP_MDI_X:
+ /* Note: The naming of the bit KSZ886X_BMCR_FORCE_MDI is bit
+ * counter intuitive, the "-X" in "1 = Force MDI" in the data
+ * sheet seems to be missing:
+ * 1 = Force MDI (sic!) (transmit on RX+/RX- pins)
+ * 0 = Normal operation (transmit on TX+/TX- pins)
+ */
+ val = KSZ886X_BMCR_DISABLE_AUTO_MDIX | KSZ886X_BMCR_FORCE_MDI;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = 0;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify(phydev, MII_BMCR,
+ KSZ886X_BMCR_HP_MDIX | KSZ886X_BMCR_FORCE_MDI |
+ KSZ886X_BMCR_DISABLE_AUTO_MDIX,
+ KSZ886X_BMCR_HP_MDIX | val);
+}
+
+static int ksz886x_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_config_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* The MDI-X configuration is automatically changed by the PHY after
+ * switching from autoneg off to on. So, take MDI-X configuration under
+ * own control and set it after autoneg configuration was done.
+ */
+ return ksz886x_config_mdix(phydev, phydev->mdix_ctrl);
+}
+
+static int ksz886x_mdix_update(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+
+ if (ret & KSZ886X_BMCR_DISABLE_AUTO_MDIX) {
+ if (ret & KSZ886X_BMCR_FORCE_MDI)
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ else
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ }
+
+ ret = phy_read(phydev, MII_KSZPHY_CTRL);
+ if (ret < 0)
+ return ret;
+
+ /* Same reverse logic as KSZ886X_BMCR_FORCE_MDI */
+ if (ret & KSZ886X_CTRL_MDIX_STAT)
+ phydev->mdix = ETH_TP_MDI_X;
+ else
+ phydev->mdix = ETH_TP_MDI;
+
+ return 0;
+}
+
+static int ksz886x_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz886x_mdix_update(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_read_status(phydev);
+}
+
static int kszphy_get_sset_count(struct phy_device *phydev)
{
return ARRAY_SIZE(kszphy_hw_stats);
@@ -1193,6 +1376,167 @@ static int kszphy_probe(struct phy_device *phydev)
return 0;
}
+static int ksz886x_cable_test_start(struct phy_device *phydev)
+{
+ if (phydev->dev_flags & MICREL_KSZ8_P1_ERRATA)
+ return -EOPNOTSUPP;
+
+ /* If autoneg is enabled, we won't be able to test cross pair
+ * short. In this case, the PHY will "detect" a link and
+ * confuse the internal state machine - disable auto neg here.
+ * If autoneg is disabled, we should set the speed to 10mbit.
+ */
+ return phy_clear_bits(phydev, MII_BMCR, BMCR_ANENABLE | BMCR_SPEED100);
+}
+
+static int ksz886x_cable_test_result_trans(u16 status)
+{
+ switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ case KSZ8081_LMD_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case KSZ8081_LMD_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case KSZ8081_LMD_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case KSZ8081_LMD_STAT_FAIL:
+ fallthrough;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static bool ksz886x_cable_test_failed(u16 status)
+{
+ return FIELD_GET(KSZ8081_LMD_STAT_MASK, status) ==
+ KSZ8081_LMD_STAT_FAIL;
+}
+
+static bool ksz886x_cable_test_fault_length_valid(u16 status)
+{
+ switch (FIELD_GET(KSZ8081_LMD_STAT_MASK, status)) {
+ case KSZ8081_LMD_STAT_OPEN:
+ fallthrough;
+ case KSZ8081_LMD_STAT_SHORT:
+ return true;
+ }
+ return false;
+}
+
+static int ksz886x_cable_test_fault_length(u16 status)
+{
+ int dt;
+
+ /* According to the data sheet the distance to the fault is
+ * DELTA_TIME * 0.4 meters.
+ */
+ dt = FIELD_GET(KSZ8081_LMD_DELTA_TIME_MASK, status);
+
+ return (dt * 400) / 10;
+}
+
+static int ksz886x_cable_test_wait_for_completion(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = phy_read_poll_timeout(phydev, KSZ8081_LMD, val,
+ !(val & KSZ8081_LMD_ENABLE_TEST),
+ 30000, 100000, true);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int ksz886x_cable_test_one_pair(struct phy_device *phydev, int pair)
+{
+ static const int ethtool_pair[] = {
+ ETHTOOL_A_CABLE_PAIR_A,
+ ETHTOOL_A_CABLE_PAIR_B,
+ };
+ int ret, val, mdix;
+
+ /* There is no way to choice the pair, like we do one ksz9031.
+ * We can workaround this limitation by using the MDI-X functionality.
+ */
+ if (pair == 0)
+ mdix = ETH_TP_MDI;
+ else
+ mdix = ETH_TP_MDI_X;
+
+ switch (phydev->phy_id & MICREL_PHY_ID_MASK) {
+ case PHY_ID_KSZ8081:
+ ret = ksz8081_config_mdix(phydev, mdix);
+ break;
+ case PHY_ID_KSZ886X:
+ ret = ksz886x_config_mdix(phydev, mdix);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Now we are ready to fire. This command will send a 100ns pulse
+ * to the pair.
+ */
+ ret = phy_write(phydev, KSZ8081_LMD, KSZ8081_LMD_ENABLE_TEST);
+ if (ret)
+ return ret;
+
+ ret = ksz886x_cable_test_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read(phydev, KSZ8081_LMD);
+ if (val < 0)
+ return val;
+
+ if (ksz886x_cable_test_failed(val))
+ return -EAGAIN;
+
+ ret = ethnl_cable_test_result(phydev, ethtool_pair[pair],
+ ksz886x_cable_test_result_trans(val));
+ if (ret)
+ return ret;
+
+ if (!ksz886x_cable_test_fault_length_valid(val))
+ return 0;
+
+ return ethnl_cable_test_fault_length(phydev, ethtool_pair[pair],
+ ksz886x_cable_test_fault_length(val));
+}
+
+static int ksz886x_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ unsigned long pair_mask = 0x3;
+ int retries = 20;
+ int pair, ret;
+
+ *finished = false;
+
+ /* Try harder if link partner is active */
+ while (pair_mask && retries--) {
+ for_each_set_bit(pair, &pair_mask, 4) {
+ ret = ksz886x_cable_test_one_pair(phydev, pair);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ return ret;
+ clear_bit(pair, &pair_mask);
+ }
+ /* If link partner is in autonegotiation mode it will send 2ms
+ * of FLPs with at least 6ms of silence.
+ * Add 2ms sleep to have better chances to hit this silence.
+ */
+ if (pair_mask)
+ msleep(2);
+ }
+
+ *finished = true;
+
+ return ret;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1299,11 +1643,14 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
+ .flags = PHY_POLL_CABLE_TEST,
/* PHY_BASIC_FEATURES */
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = ksz8081_config_init,
.soft_reset = genphy_soft_reset,
+ .config_aneg = ksz8081_config_aneg,
+ .read_status = ksz8081_read_status,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.get_sset_count = kszphy_get_sset_count,
@@ -1311,6 +1658,8 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = kszphy_suspend,
.resume = kszphy_resume,
+ .cable_test_start = ksz886x_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
}, {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
@@ -1399,9 +1748,14 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8851 Ethernet MAC or KSZ886X Switch",
/* PHY_BASIC_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.config_init = kszphy_config_init,
+ .config_aneg = ksz886x_config_aneg,
+ .read_status = ksz886x_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .cable_test_start = ksz886x_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
}, {
.name = "Micrel KSZ87XX Switch",
/* PHY_BASIC_FEATURES */
diff --git a/drivers/net/phy/mii_timestamper.c b/drivers/net/phy/mii_timestamper.c
index b71b7456462d..51ae0593a04f 100644
--- a/drivers/net/phy/mii_timestamper.c
+++ b/drivers/net/phy/mii_timestamper.c
@@ -111,6 +111,9 @@ void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
struct mii_timestamping_desc *desc;
struct list_head *this;
+ if (!mii_ts)
+ return;
+
/* mii_timestamper statically registered by the PHY driver won't use the
* register_mii_timestamper() and thus don't have ->device set. Don't
* try to unregister these.
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 796b68f4b499..7e6ac2c5e27e 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -50,18 +50,13 @@ static int yt8511_write_page(struct phy_device *phydev, int page)
static int yt8511_config_init(struct phy_device *phydev)
{
+ int oldpage, ret = 0;
unsigned int ge, fe;
- int ret, oldpage;
- /* set clock mode to 125mhz */
oldpage = phy_select_page(phydev, YT8511_EXT_CLK_GATE);
if (oldpage < 0)
goto err_restore_page;
- ret = __phy_modify(phydev, YT8511_PAGE, 0, YT8511_CLK_125M);
- if (ret < 0)
- goto err_restore_page;
-
/* set rgmii delay mode */
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
@@ -80,14 +75,20 @@ static int yt8511_config_init(struct phy_device *phydev)
ge = YT8511_DELAY_RX | YT8511_DELAY_GE_TX_EN;
fe = YT8511_DELAY_FE_TX_EN;
break;
- default: /* leave everything alone in other modes */
- break;
+ default: /* do not support other modes */
+ ret = -EOPNOTSUPP;
+ goto err_restore_page;
}
ret = __phy_modify(phydev, YT8511_PAGE, (YT8511_DELAY_RX | YT8511_DELAY_GE_TX_EN), ge);
if (ret < 0)
goto err_restore_page;
+ /* set clock mode to 125mhz */
+ ret = __phy_modify(phydev, YT8511_PAGE, 0, YT8511_CLK_125M);
+ if (ret < 0)
+ goto err_restore_page;
+
/* fast ethernet delay is in a separate page */
ret = __phy_write(phydev, YT8511_PAGE_SELECT, YT8511_EXT_DELAY_DRIVE);
if (ret < 0)
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 46160baaafe3..9ae9cc6b23c2 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -68,7 +68,8 @@ static int ns_ack_interrupt(struct phy_device *phydev)
return ret;
/* Clear the interrupt status bit by writing a “1”
- * to the corresponding bit in INT_CLEAR (2:0 are reserved) */
+ * to the corresponding bit in INT_CLEAR (2:0 are reserved)
+ */
ret = phy_write(phydev, DP83865_INT_CLEAR, ret & ~0x7);
return ret;
@@ -150,7 +151,8 @@ static int ns_config_init(struct phy_device *phydev)
{
ns_giga_speed_fallback(phydev, ALL_FALLBACK_ON);
/* In the latest MAC or switches design, the 10 Mbps loopback
- is desired to be turned off. */
+ * is desired to be turned off.
+ */
ns_10_base_t_hdx_loopack(phydev, hdx_loopback_off);
return ns_ack_interrupt(phydev);
}
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 512e4cb5d2c2..91a327f67a42 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -325,7 +325,7 @@ static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
{
ts->tv_nsec = hwts->nsec;
if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
- ts->tv_sec -= BIT(2);
+ ts->tv_sec -= TS_SEC_MASK + 1;
ts->tv_sec &= ~TS_SEC_MASK;
ts->tv_sec |= hwts->sec & TS_SEC_MASK;
}
@@ -427,8 +427,8 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
nxp_c45_process_txts(priv, &hwts);
}
- nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
+ nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
hwts.sec = ts_raw >> 30;
hwts.nsec = ts_raw & GENMASK(29, 0);
@@ -1035,6 +1035,12 @@ static int nxp_c45_config_init(struct phy_device *phydev)
return ret;
}
+ /* Bug workaround for SJA1110 rev B: enable write access
+ * to MDIO_MMD_PMAPMD
+ */
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
+ phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
+
phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
PHY_CONFIG_AUTO);
@@ -1090,7 +1096,7 @@ static int nxp_c45_probe(struct phy_device *phydev)
VEND1_PORT_ABILITIES);
ptp_ability = !!(ptp_ability & PTP_ABILITY);
if (!ptp_ability) {
- phydev_info(phydev, "the phy does not support PTP");
+ phydev_dbg(phydev, "the phy does not support PTP");
goto no_ptp_support;
}
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index f4816b7d31b3..c617dbcad6ea 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -172,7 +172,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_an_config_aneg);
* @phydev: target phy_device struct
*
* Disable auto-negotiation in the Clause 45 PHY. The link parameters
- * parameters are controlled through the PMA/PMD MMD registers.
+ * are controlled through the PMA/PMD MMD registers.
*
* Returns zero on success, negative errno code on failure.
*/
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 8d333d3084ed..2870c33b8975 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -76,7 +76,8 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
/* A mapping of all SUPPORTED settings to speed/duplex. This table
* must be grouped by speed and sorted in descending match priority
- * - iow, descending speed. */
+ * - iow, descending speed.
+ */
#define PHY_SETTING(s, d, b) { .speed = SPEED_ ## s, .duplex = DUPLEX_ ## d, \
.bit = ETHTOOL_LINK_MODE_ ## b ## _BIT}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1f0512e39c65..8eeb26d8aeb7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -380,8 +380,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
else if (val & BMCR_SPEED100)
phydev->speed = SPEED_100;
else phydev->speed = SPEED_10;
- }
- else {
+ } else {
if (phydev->autoneg == AUTONEG_DISABLE)
change_autoneg = true;
phydev->autoneg = AUTONEG_ENABLE;
@@ -1136,6 +1135,9 @@ void phy_state_machine(struct work_struct *work)
else if (do_suspend)
phy_suspend(phydev);
+ if (err == -ENODEV)
+ return;
+
if (err < 0)
phy_error(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1539ea021ac0..5d5f9a9ee768 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/acpi.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -833,6 +834,27 @@ static int get_phy_c22_id(struct mii_bus *bus, int addr, u32 *phy_id)
return 0;
}
+/* Extract the phy ID from the compatible string of the form
+ * ethernet-phy-idAAAA.BBBB.
+ */
+int fwnode_get_phy_id(struct fwnode_handle *fwnode, u32 *phy_id)
+{
+ unsigned int upper, lower;
+ const char *cp;
+ int ret;
+
+ ret = fwnode_property_read_string(fwnode, "compatible", &cp);
+ if (ret)
+ return ret;
+
+ if (sscanf(cp, "ethernet-phy-id%4x.%4x", &upper, &lower) != 2)
+ return -EINVAL;
+
+ *phy_id = ((upper & GENMASK(15, 0)) << 16) | (lower & GENMASK(15, 0));
+ return 0;
+}
+EXPORT_SYMBOL(fwnode_get_phy_id);
+
/**
* get_phy_device - reads the specified PHY device and returns its @phy_device
* struct
@@ -870,6 +892,18 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
if (r)
return ERR_PTR(r);
+ /* PHY device such as the Marvell Alaska 88E2110 will return a PHY ID
+ * of 0 when probed using get_phy_c22_id() with no error. Proceed to
+ * probe with C45 to see if we're able to get a valid PHY ID in the C45
+ * space, if successful, create the C45 PHY device.
+ */
+ if (!is_c45 && phy_id == 0 && bus->probe_capabilities >= MDIOBUS_C45) {
+ r = get_phy_c45_ids(bus, addr, &c45_ids);
+ if (!r)
+ return phy_device_create(bus, addr, phy_id,
+ true, &c45_ids);
+ }
+
return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
}
EXPORT_SYMBOL(get_phy_device);
@@ -923,8 +957,7 @@ EXPORT_SYMBOL(phy_device_register);
*/
void phy_device_remove(struct phy_device *phydev)
{
- if (phydev->mii_ts)
- unregister_mii_timestamper(phydev->mii_ts);
+ unregister_mii_timestamper(phydev->mii_ts);
device_del(&phydev->mdio.dev);
@@ -2864,6 +2897,90 @@ static bool phy_drv_supports_irq(struct phy_driver *phydrv)
}
/**
+ * fwnode_mdio_find_device - Given a fwnode, find the mdio_device
+ * @fwnode: pointer to the mdio_device's fwnode
+ *
+ * If successful, returns a pointer to the mdio_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ * The caller should call put_device() on the mdio_device after its use.
+ */
+struct mdio_device *fwnode_mdio_find_device(struct fwnode_handle *fwnode)
+{
+ struct device *d;
+
+ if (!fwnode)
+ return NULL;
+
+ d = bus_find_device_by_fwnode(&mdio_bus_type, fwnode);
+ if (!d)
+ return NULL;
+
+ return to_mdio_device(d);
+}
+EXPORT_SYMBOL(fwnode_mdio_find_device);
+
+/**
+ * fwnode_phy_find_device - For provided phy_fwnode, find phy_device.
+ *
+ * @phy_fwnode: Pointer to the phy's fwnode.
+ *
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ */
+struct phy_device *fwnode_phy_find_device(struct fwnode_handle *phy_fwnode)
+{
+ struct mdio_device *mdiodev;
+
+ mdiodev = fwnode_mdio_find_device(phy_fwnode);
+ if (!mdiodev)
+ return NULL;
+
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+ return to_phy_device(&mdiodev->dev);
+
+ put_device(&mdiodev->dev);
+
+ return NULL;
+}
+EXPORT_SYMBOL(fwnode_phy_find_device);
+
+/**
+ * device_phy_find_device - For the given device, get the phy_device
+ * @dev: Pointer to the given device
+ *
+ * Refer return conditions of fwnode_phy_find_device().
+ */
+struct phy_device *device_phy_find_device(struct device *dev)
+{
+ return fwnode_phy_find_device(dev_fwnode(dev));
+}
+EXPORT_SYMBOL_GPL(device_phy_find_device);
+
+/**
+ * fwnode_get_phy_node - Get the phy_node using the named reference.
+ * @fwnode: Pointer to fwnode from which phy_node has to be obtained.
+ *
+ * Refer return conditions of fwnode_find_reference().
+ * For ACPI, only "phy-handle" is supported. Legacy DT properties "phy"
+ * and "phy-device" are not supported in ACPI. DT supports all the three
+ * named references to the phy node.
+ */
+struct fwnode_handle *fwnode_get_phy_node(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *phy_node;
+
+ /* Only phy-handle is used for ACPI */
+ phy_node = fwnode_find_reference(fwnode, "phy-handle", 0);
+ if (is_acpi_node(fwnode) || !IS_ERR(phy_node))
+ return phy_node;
+ phy_node = fwnode_find_reference(fwnode, "phy", 0);
+ if (IS_ERR(phy_node))
+ phy_node = fwnode_find_reference(fwnode, "phy-device", 0);
+ return phy_node;
+}
+EXPORT_SYMBOL_GPL(fwnode_get_phy_node);
+
+/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
*
@@ -2904,15 +3021,14 @@ static int phy_probe(struct device *dev)
* a controller will attach, and may modify one
* or both of these values
*/
- if (phydrv->features) {
+ if (phydrv->features)
linkmode_copy(phydev->supported, phydrv->features);
- } else if (phydrv->get_features) {
+ else if (phydrv->get_features)
err = phydrv->get_features(phydev);
- } else if (phydev->is_c45) {
+ else if (phydev->is_c45)
err = genphy_c45_pma_read_abilities(phydev);
- } else {
+ else
err = genphy_read_abilities(phydev);
- }
if (err)
goto out;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 96d8e88b4e46..eb29ef53d971 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -5,6 +5,7 @@
*
* Copyright (C) 2015 Russell King
*/
+#include <linux/acpi.h>
#include <linux/ethtool.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
@@ -181,7 +182,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
pl->link_config.duplex = DUPLEX_FULL;
/* We treat the "pause" and "asym-pause" terminology as
- * defining the link partner's ability. */
+ * defining the link partner's ability.
+ */
if (fwnode_property_read_bool(fixed_node, "pause"))
__set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
pl->link_config.lp_advertising);
@@ -311,6 +313,11 @@ static int phylink_parse_mode(struct phylink *pl, struct fwnode_handle *fwnode)
phylink_set(pl->supported, 5000baseT_Full);
break;
+ case PHY_INTERFACE_MODE_25GBASER:
+ phylink_set(pl->supported, 25000baseCR_Full);
+ phylink_set(pl->supported, 25000baseKR_Full);
+ phylink_set(pl->supported, 25000baseSR_Full);
+ fallthrough;
case PHY_INTERFACE_MODE_USXGMII:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
@@ -679,7 +686,8 @@ static void phylink_resolve(struct work_struct *w)
phylink_mac_pcs_get_state(pl, &link_state);
/* If we have a phy, the "up" state is the union of
- * both the PHY and the MAC */
+ * both the PHY and the MAC
+ */
if (pl->phydev)
link_state.link &= pl->phy_state.link;
@@ -688,7 +696,8 @@ static void phylink_resolve(struct work_struct *w)
link_state.interface = pl->phy_state.interface;
/* If we have a PHY, we need to update with
- * the PHY flow control bits. */
+ * the PHY flow control bits.
+ */
link_state.pause = pl->phy_state.pause;
mac_config = true;
}
@@ -1084,7 +1093,26 @@ EXPORT_SYMBOL_GPL(phylink_connect_phy);
int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
u32 flags)
{
- struct device_node *phy_node;
+ return phylink_fwnode_phy_connect(pl, of_fwnode_handle(dn), flags);
+}
+EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+
+/**
+ * phylink_fwnode_phy_connect() - connect the PHY specified in the fwnode.
+ * @pl: a pointer to a &struct phylink returned from phylink_create()
+ * @fwnode: a pointer to a &struct fwnode_handle.
+ * @flags: PHY-specific flags to communicate to the PHY device driver
+ *
+ * Connect the phy specified @fwnode to the phylink instance specified
+ * by @pl.
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int phylink_fwnode_phy_connect(struct phylink *pl,
+ struct fwnode_handle *fwnode,
+ u32 flags)
+{
+ struct fwnode_handle *phy_fwnode;
struct phy_device *phy_dev;
int ret;
@@ -1094,28 +1122,25 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
phy_interface_mode_is_8023z(pl->link_interface)))
return 0;
- phy_node = of_parse_phandle(dn, "phy-handle", 0);
- if (!phy_node)
- phy_node = of_parse_phandle(dn, "phy", 0);
- if (!phy_node)
- phy_node = of_parse_phandle(dn, "phy-device", 0);
-
- if (!phy_node) {
+ phy_fwnode = fwnode_get_phy_node(fwnode);
+ if (IS_ERR(phy_fwnode)) {
if (pl->cfg_link_an_mode == MLO_AN_PHY)
return -ENODEV;
return 0;
}
- phy_dev = of_phy_find_device(phy_node);
+ phy_dev = fwnode_phy_find_device(phy_fwnode);
/* We're done with the phy_node handle */
- of_node_put(phy_node);
+ fwnode_handle_put(phy_fwnode);
if (!phy_dev)
return -ENODEV;
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
- if (ret)
+ if (ret) {
+ phy_device_free(phy_dev);
return ret;
+ }
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
@@ -1123,7 +1148,7 @@ int phylink_of_phy_connect(struct phylink *pl, struct device_node *dn,
return ret;
}
-EXPORT_SYMBOL_GPL(phylink_of_phy_connect);
+EXPORT_SYMBOL_GPL(phylink_fwnode_phy_connect);
/**
* phylink_disconnect_phy() - disconnect any PHY attached to the phylink
@@ -1358,11 +1383,10 @@ int phylink_ethtool_ksettings_get(struct phylink *pl,
ASSERT_RTNL();
- if (pl->phydev) {
+ if (pl->phydev)
phy_ethtool_ksettings_get(pl->phydev, kset);
- } else {
+ else
kset->base.port = pl->link_port;
- }
linkmode_copy(kset->link_modes.supported, pl->supported);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index d5c1aaa8236a..30d15f7c9b03 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -100,6 +100,7 @@ static int qs6612_ack_interrupt(struct phy_device *phydev)
static int qs6612_config_intr(struct phy_device *phydev)
{
int err;
+
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
/* clear any interrupts before enabling them */
err = qs6612_ack_interrupt(phydev);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 821e85a97367..11be60333fa8 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -8,6 +8,7 @@
* Copyright (c) 2004 Freescale Semiconductor, Inc.
*/
#include <linux/bitops.h>
+#include <linux/of.h>
#include <linux/phy.h>
#include <linux/module.h>
#include <linux/delay.h>
@@ -27,6 +28,7 @@
#define RTL821x_PAGE_SELECT 0x1f
#define RTL8211F_PHYCR1 0x18
+#define RTL8211F_PHYCR2 0x19
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
@@ -40,6 +42,8 @@
#define RTL8211E_TX_DELAY BIT(12)
#define RTL8211E_RX_DELAY BIT(11)
+#define RTL8211F_CLKOUT_EN BIT(0)
+
#define RTL8201F_ISR 0x1e
#define RTL8201F_ISR_ANERR BIT(15)
#define RTL8201F_ISR_DUPLEX BIT(13)
@@ -71,6 +75,11 @@ MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
MODULE_LICENSE("GPL");
+struct rtl821x_priv {
+ u16 phycr1;
+ u16 phycr2;
+};
+
static int rtl821x_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, RTL821x_PAGE_SELECT);
@@ -81,6 +90,37 @@ static int rtl821x_write_page(struct phy_device *phydev, int page)
return __phy_write(phydev, RTL821x_PAGE_SELECT, page);
}
+static int rtl821x_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct rtl821x_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR1);
+ if (ret < 0)
+ return ret;
+
+ priv->phycr1 = ret & (RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF);
+ if (of_property_read_bool(dev->of_node, "realtek,aldps-enable"))
+ priv->phycr1 |= RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF;
+
+ ret = phy_read_paged(phydev, 0xa43, RTL8211F_PHYCR2);
+ if (ret < 0)
+ return ret;
+
+ priv->phycr2 = ret & RTL8211F_CLKOUT_EN;
+ if (of_property_read_bool(dev->of_node, "realtek,clkout-disable"))
+ priv->phycr2 &= ~RTL8211F_CLKOUT_EN;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static int rtl8201_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -291,13 +331,19 @@ static int rtl8211c_config_init(struct phy_device *phydev)
static int rtl8211f_config_init(struct phy_device *phydev)
{
+ struct rtl821x_priv *priv = phydev->priv;
struct device *dev = &phydev->mdio.dev;
u16 val_txdly, val_rxdly;
- u16 val;
int ret;
- val = RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_XTAL_OFF;
- phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1, val, val);
+ ret = phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1,
+ RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
+ priv->phycr1);
+ if (ret < 0) {
+ dev_err(dev, "aldps mode configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
@@ -354,6 +400,27 @@ static int rtl8211f_config_init(struct phy_device *phydev)
val_rxdly ? "enabled" : "disabled");
}
+ ret = phy_modify_paged(phydev, 0xa43, RTL8211F_PHYCR2,
+ RTL8211F_CLKOUT_EN, priv->phycr2);
+ if (ret < 0) {
+ dev_err(dev, "clkout configuration failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ return genphy_soft_reset(phydev);
+}
+
+static int rtl821x_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_resume(phydev);
+ if (ret < 0)
+ return ret;
+
+ msleep(20);
+
return 0;
}
@@ -847,12 +914,13 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
+ .probe = rtl821x_probe,
.config_init = &rtl8211f_config_init,
.read_status = rtlgen_read_status,
.config_intr = &rtl8211f_config_intr,
.handle_interrupt = rtl8211f_handle_interrupt,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = rtl821x_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index e61de66e973b..7362f8c3271c 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -392,6 +392,11 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
unsigned long *link_modes)
{
+ if (phylink_test(link_modes, 25000baseCR_Full) ||
+ phylink_test(link_modes, 25000baseKR_Full) ||
+ phylink_test(link_modes, 25000baseSR_Full))
+ return PHY_INTERFACE_MODE_25GBASER;
+
if (phylink_test(link_modes, 10000baseCR_Full) ||
phylink_test(link_modes, 10000baseSR_Full) ||
phylink_test(link_modes, 10000baseLR_Full) ||
@@ -624,14 +629,14 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
* be put via sfp_bus_put() when done.
*
* Returns:
- * - on success, a pointer to the sfp_bus structure,
- * - %NULL if no SFP is specified,
- * - on failure, an error pointer value:
+ * - on success, a pointer to the sfp_bus structure,
+ * - %NULL if no SFP is specified,
+ * - on failure, an error pointer value:
*
- * - corresponding to the errors detailed for
- * fwnode_property_get_reference_args().
- * - %-ENOMEM if we failed to allocate the bus.
- * - an error from the upstream's connect_phy() method.
+ * - corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * - %-ENOMEM if we failed to allocate the bus.
+ * - an error from the upstream's connect_phy() method.
*/
struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
{
@@ -666,14 +671,14 @@ EXPORT_SYMBOL_GPL(sfp_bus_find_fwnode);
* bus, so it is safe to put the bus after this call.
*
* Returns:
- * - on success, a pointer to the sfp_bus structure,
- * - %NULL if no SFP is specified,
- * - on failure, an error pointer value:
+ * - on success, a pointer to the sfp_bus structure,
+ * - %NULL if no SFP is specified,
+ * - on failure, an error pointer value:
*
- * - corresponding to the errors detailed for
- * fwnode_property_get_reference_args().
- * - %-ENOMEM if we failed to allocate the bus.
- * - an error from the upstream's connect_phy() method.
+ * - corresponding to the errors detailed for
+ * fwnode_property_get_reference_args().
+ * - %-ENOMEM if we failed to allocate the bus.
+ * - an error from the upstream's connect_phy() method.
*/
int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
const struct sfp_upstream_ops *ops)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 37f722c763d7..34e90216bd2c 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -2153,7 +2153,7 @@ static void sfp_sm_main(struct sfp *sfp, unsigned int event)
case SFP_S_INIT:
if (event == SFP_E_TIMEOUT && sfp->state & SFP_F_TX_FAULT) {
- /* TX_FAULT is still asserted after t_init or
+ /* TX_FAULT is still asserted after t_init
* or t_start_up, so assume there is a fault.
*/
sfp_sm_fault(sfp, SFP_S_INIT_TX_FAULT,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index ca49c1ad3efc..8b5445a724ce 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -160,11 +160,11 @@ static const struct spi_device_id ks8995_id[] = {
MODULE_DEVICE_TABLE(spi, ks8995_id);
static const struct of_device_id ks8895_spi_of_match[] = {
- { .compatible = "micrel,ks8995" },
- { .compatible = "micrel,ksz8864" },
- { .compatible = "micrel,ksz8795" },
- { },
- };
+ { .compatible = "micrel,ks8995" },
+ { .compatible = "micrel,ksz8864" },
+ { .compatible = "micrel,ksz8795" },
+ { },
+};
MODULE_DEVICE_TABLE(of, ks8895_spi_of_match);
static inline u8 get_chip_id(u8 val)
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 431fe5e0ce31..309e4c3496c4 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -20,12 +20,12 @@
#include <linux/mii.h>
#include <linux/phy.h>
-#define MII_XCIIS 0x11 /* Configuration Info IRQ & Status Reg */
-#define MII_XIE 0x12 /* Interrupt Enable Register */
+#define MII_XCIIS 0x11 /* Configuration Info IRQ & Status Reg */
+#define MII_XIE 0x12 /* Interrupt Enable Register */
#define MII_XIE_DEFAULT_MASK 0x0070 /* ANE complete, Remote Fault, Link Down */
#define STE101P_PHY_ID 0x00061c50
-#define STE100P_PHY_ID 0x1c040011
+#define STE100P_PHY_ID 0x1c040011
static int ste10Xp_config_init(struct phy_device *phydev)
{
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 16704e243162..897b979ec03c 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -249,7 +249,8 @@ static int vsc73xx_config_aneg(struct phy_device *phydev)
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
- * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
+ */
static int vsc8601_add_skew(struct phy_device *phydev)
{
int ret;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 179308782888..4c5d69732a7e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -164,6 +164,8 @@ config USB_NET_AX8817X
depends on USB_USBNET
select CRC32
select PHYLIB
+ select AX88796B_PHY
+ imply NET_SELFTESTS
default y
help
This option adds support for ASIX AX88xxx based USB 2.0
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 3b53685301de..e1994a246122 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -25,6 +25,8 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <net/selftests.h>
#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
@@ -178,6 +180,10 @@ struct asix_common_private {
u16 presvd_phy_advertise;
u16 presvd_phy_bmcr;
struct asix_rx_fixup_info rx_fixup_info;
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ u16 phy_addr;
+ char phy_name[20];
};
extern const struct driver_info ax88172a_info;
@@ -205,8 +211,7 @@ struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
int asix_set_sw_mii(struct usbnet *dev, int in_pm);
int asix_set_hw_mii(struct usbnet *dev, int in_pm);
-int asix_read_phy_addr(struct usbnet *dev, int internal);
-int asix_get_phy_addr(struct usbnet *dev);
+int asix_read_phy_addr(struct usbnet *dev, bool internal);
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm);
@@ -215,6 +220,7 @@ int asix_write_rx_ctl(struct usbnet *dev, u16 mode, int in_pm);
u16 asix_read_medium_status(struct usbnet *dev, int in_pm);
int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm);
+void asix_adjust_link(struct net_device *netdev);
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm);
@@ -223,6 +229,9 @@ void asix_set_multicast(struct net_device *net);
int asix_mdio_read(struct net_device *netdev, int phy_id, int loc);
void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val);
+int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum);
+int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc);
void asix_mdio_write_nopm(struct net_device *netdev, int phy_id, int loc,
int val);
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 7bc6e8f856fe..ac92bc52a85e 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -288,32 +288,33 @@ int asix_set_hw_mii(struct usbnet *dev, int in_pm)
return ret;
}
-int asix_read_phy_addr(struct usbnet *dev, int internal)
+int asix_read_phy_addr(struct usbnet *dev, bool internal)
{
- int offset = (internal ? 1 : 0);
+ int ret, offset;
u8 buf[2];
- int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
- netdev_dbg(dev->net, "asix_get_phy_addr()\n");
+ ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf, 0);
+ if (ret < 0)
+ goto error;
if (ret < 2) {
- netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
- goto out;
+ ret = -EIO;
+ goto error;
}
- netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
- *((__le16 *)buf));
+
+ offset = (internal ? 1 : 0);
ret = buf[offset];
-out:
+ netdev_dbg(dev->net, "%s PHY address 0x%x\n",
+ internal ? "internal" : "external", ret);
+
return ret;
-}
-int asix_get_phy_addr(struct usbnet *dev)
-{
- /* return the address of the internal phy */
- return asix_read_phy_addr(dev, 1);
-}
+error:
+ netdev_err(dev->net, "Error reading PHY_ID register: %02x\n", ret);
+ return ret;
+}
int asix_sw_reset(struct usbnet *dev, u8 flags, int in_pm)
{
@@ -383,6 +384,27 @@ int asix_write_medium_mode(struct usbnet *dev, u16 mode, int in_pm)
return ret;
}
+/* set MAC link settings according to information from phylib */
+void asix_adjust_link(struct net_device *netdev)
+{
+ struct phy_device *phydev = netdev->phydev;
+ struct usbnet *dev = netdev_priv(netdev);
+ u16 mode = 0;
+
+ if (phydev->link) {
+ mode = AX88772_MEDIUM_DEFAULT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ mode &= ~AX_MEDIUM_FD;
+
+ if (phydev->speed != SPEED_100)
+ mode &= ~AX_MEDIUM_PS;
+ }
+
+ asix_write_medium_mode(dev, mode, 0);
+ phy_print_status(phydev);
+}
+
int asix_write_gpio(struct usbnet *dev, u16 value, int sleep, int in_pm)
{
int ret;
@@ -463,18 +485,23 @@ int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
return ret;
}
- asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id,
- (__u16)loc, 2, &res, 0);
- asix_set_hw_mii(dev, 0);
+ ret = asix_read_cmd(dev, AX_CMD_READ_MII_REG, phy_id, (__u16)loc, 2,
+ &res, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = asix_set_hw_mii(dev, 0);
+out:
mutex_unlock(&dev->phy_mutex);
netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
phy_id, loc, le16_to_cpu(res));
- return le16_to_cpu(res);
+ return ret < 0 ? ret : le16_to_cpu(res);
}
-void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
+static int __asix_mdio_write(struct net_device *netdev, int phy_id, int loc,
+ int val)
{
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
@@ -494,15 +521,40 @@ void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
ret = asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG,
0, 0, 1, &smsr, 0);
} while (!(smsr & AX_HOST_EN) && (i++ < 30) && (ret != -ENODEV));
- if (ret == -ENODEV) {
- mutex_unlock(&dev->phy_mutex);
- return;
- }
- asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id,
- (__u16)loc, 2, &res, 0);
- asix_set_hw_mii(dev, 0);
+ if (ret == -ENODEV)
+ goto out;
+
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2,
+ &res, 0);
+ if (ret < 0)
+ goto out;
+
+ ret = asix_set_hw_mii(dev, 0);
+out:
mutex_unlock(&dev->phy_mutex);
+
+ return ret < 0 ? ret : 0;
+}
+
+void asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
+{
+ __asix_mdio_write(netdev, phy_id, loc, val);
+}
+
+/* MDIO read and write wrappers for phylib */
+int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ struct usbnet *priv = bus->priv;
+
+ return asix_mdio_read(priv->net, phy_id, regnum);
+}
+
+int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
+{
+ struct usbnet *priv = bus->priv;
+
+ return __asix_mdio_write(priv->net, phy_id, regnum, val);
}
int asix_mdio_read_nopm(struct net_device *netdev, int phy_id, int loc)
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 19a8fafb8f04..aec97b021a73 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -262,7 +262,10 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.mdio_write = asix_mdio_write;
dev->mii.phy_id_mask = 0x3f;
dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = asix_get_phy_addr(dev);
+
+ dev->mii.phy_id = asix_read_phy_addr(dev, true);
+ if (dev->mii.phy_id < 0)
+ return dev->mii.phy_id;
dev->net->netdev_ops = &ax88172_netdev_ops;
dev->net->ethtool_ops = &ax88172_ethtool_ops;
@@ -280,9 +283,29 @@ out:
return ret;
}
+static void ax88772_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ net_selftest_get_strings(data);
+ break;
+ }
+}
+
+static int ax88772_ethtool_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return net_selftest_get_count();
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct ethtool_ops ax88772_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
- .get_link = asix_get_link,
+ .get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
.get_wol = asix_get_wol,
@@ -290,37 +313,18 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
.get_eeprom_len = asix_get_eeprom_len,
.get_eeprom = asix_get_eeprom,
.set_eeprom = asix_set_eeprom,
- .nway_reset = usbnet_nway_reset,
- .get_link_ksettings = usbnet_get_link_ksettings_mii,
- .set_link_ksettings = usbnet_set_link_ksettings_mii,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .self_test = net_selftest,
+ .get_strings = ax88772_ethtool_get_strings,
+ .get_sset_count = ax88772_ethtool_get_sset_count,
};
-static int ax88772_link_reset(struct usbnet *dev)
-{
- u16 mode;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
-
- mii_check_media(&dev->mii, 1, 1);
- mii_ethtool_gset(&dev->mii, &ecmd);
- mode = AX88772_MEDIUM_DEFAULT;
-
- if (ethtool_cmd_speed(&ecmd) != SPEED_100)
- mode &= ~AX_MEDIUM_PS;
-
- if (ecmd.duplex != DUPLEX_FULL)
- mode &= ~AX_MEDIUM_FD;
-
- netdev_dbg(dev->net, "ax88772_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
- ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
-
- asix_write_medium_mode(dev, mode, 0);
-
- return 0;
-}
-
static int ax88772_reset(struct usbnet *dev)
{
struct asix_data *data = (struct asix_data *)&dev->data;
+ struct asix_common_private *priv = dev->driver_priv;
int ret;
/* Rewrite MAC address */
@@ -339,6 +343,8 @@ static int ax88772_reset(struct usbnet *dev)
if (ret < 0)
goto out;
+ phy_start(priv->phydev);
+
return 0;
out:
@@ -583,7 +589,7 @@ static const struct net_device_ops ax88772_netdev_ops = {
.ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = asix_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = asix_ioctl,
+ .ndo_do_ioctl = phy_do_ioctl_running,
.ndo_set_rx_mode = asix_set_multicast,
};
@@ -592,6 +598,9 @@ static void ax88772_suspend(struct usbnet *dev)
struct asix_common_private *priv = dev->driver_priv;
u16 medium;
+ if (netif_running(dev->net))
+ phy_stop(priv->phydev);
+
/* Stop MAC operation */
medium = asix_read_medium_status(dev, 1);
medium &= ~AX_MEDIUM_RE;
@@ -599,14 +608,6 @@ static void ax88772_suspend(struct usbnet *dev)
netdev_dbg(dev->net, "ax88772_suspend: medium=0x%04x\n",
asix_read_medium_status(dev, 1));
-
- /* Preserve BMCR for restoring */
- priv->presvd_phy_bmcr =
- asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_BMCR);
-
- /* Preserve ANAR for restoring */
- priv->presvd_phy_advertise =
- asix_mdio_read_nopm(dev->net, dev->mii.phy_id, MII_ADVERTISE);
}
static int asix_suspend(struct usb_interface *intf, pm_message_t message)
@@ -620,39 +621,22 @@ static int asix_suspend(struct usb_interface *intf, pm_message_t message)
return usbnet_suspend(intf, message);
}
-static void ax88772_restore_phy(struct usbnet *dev)
-{
- struct asix_common_private *priv = dev->driver_priv;
-
- if (priv->presvd_phy_advertise) {
- /* Restore Advertisement control reg */
- asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_ADVERTISE,
- priv->presvd_phy_advertise);
-
- /* Restore BMCR */
- if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
- priv->presvd_phy_bmcr |= BMCR_ANRESTART;
-
- asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
- priv->presvd_phy_bmcr);
-
- priv->presvd_phy_advertise = 0;
- priv->presvd_phy_bmcr = 0;
- }
-}
-
static void ax88772_resume(struct usbnet *dev)
{
+ struct asix_common_private *priv = dev->driver_priv;
int i;
for (i = 0; i < 3; i++)
if (!ax88772_hw_reset(dev, 1))
break;
- ax88772_restore_phy(dev);
+
+ if (netif_running(dev->net))
+ phy_start(priv->phydev);
}
static void ax88772a_resume(struct usbnet *dev)
{
+ struct asix_common_private *priv = dev->driver_priv;
int i;
for (i = 0; i < 3; i++) {
@@ -660,7 +644,8 @@ static void ax88772a_resume(struct usbnet *dev)
break;
}
- ax88772_restore_phy(dev);
+ if (netif_running(dev->net))
+ phy_start(priv->phydev);
}
static int asix_resume(struct usb_interface *intf)
@@ -674,12 +659,61 @@ static int asix_resume(struct usb_interface *intf)
return usbnet_resume(intf);
}
+static int ax88772_init_mdio(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+
+ priv->mdio = devm_mdiobus_alloc(&dev->udev->dev);
+ if (!priv->mdio)
+ return -ENOMEM;
+
+ priv->mdio->priv = dev;
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+
+ return devm_mdiobus_register(&dev->udev->dev, priv->mdio);
+}
+
+static int ax88772_init_phy(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+ int ret;
+
+ ret = asix_read_phy_addr(dev, true);
+ if (ret < 0)
+ return ret;
+
+ priv->phy_addr = ret;
+
+ snprintf(priv->phy_name, sizeof(priv->phy_name), PHY_ID_FMT,
+ priv->mdio->id, priv->phy_addr);
+
+ priv->phydev = phy_connect(dev->net, priv->phy_name, &asix_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(priv->phydev)) {
+ netdev_err(dev->net, "Could not connect to PHY device %s\n",
+ priv->phy_name);
+ ret = PTR_ERR(priv->phydev);
+ return ret;
+ }
+
+ priv->phydev->mac_managed_pm = 1;
+
+ phy_attached_info(priv->phydev);
+
+ return 0;
+}
+
static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
{
- int ret, i;
u8 buf[ETH_ALEN] = {0}, chipcode = 0;
- u32 phyid;
struct asix_common_private *priv;
+ int ret, i;
+ u32 phyid;
usbnet_get_endpoints(dev, intf);
@@ -711,14 +745,6 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
asix_set_netdev_dev_addr(dev, buf);
- /* Initialize MII structure */
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = asix_mdio_read;
- dev->mii.mdio_write = asix_mdio_write;
- dev->mii.phy_id_mask = 0x1f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = asix_get_phy_addr(dev);
-
dev->net->netdev_ops = &ax88772_netdev_ops;
dev->net->ethtool_ops = &ax88772_ethtool_ops;
dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */
@@ -746,11 +772,11 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
dev->rx_urb_size = 2048;
}
- dev->driver_priv = kzalloc(sizeof(struct asix_common_private), GFP_KERNEL);
- if (!dev->driver_priv)
+ priv = devm_kzalloc(&dev->udev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- priv = dev->driver_priv;
+ dev->driver_priv = priv;
priv->presvd_phy_bmcr = 0;
priv->presvd_phy_advertise = 0;
@@ -762,13 +788,32 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
priv->suspend = ax88772_suspend;
}
+ ret = ax88772_init_mdio(dev);
+ if (ret)
+ return ret;
+
+ return ax88772_init_phy(dev);
+}
+
+static int ax88772_stop(struct usbnet *dev)
+{
+ struct asix_common_private *priv = dev->driver_priv;
+
+ /* On unplugged USB, we will get MDIO communication errors and the
+ * PHY will be set in to PHY_HALTED state.
+ */
+ if (priv->phydev->state != PHY_HALTED)
+ phy_stop(priv->phydev);
+
return 0;
}
static void ax88772_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ struct asix_common_private *priv = dev->driver_priv;
+
+ phy_disconnect(priv->phydev);
asix_rx_fixup_common_free(dev->driver_priv);
- kfree(dev->driver_priv);
}
static const struct ethtool_ops ax88178_ethtool_ops = {
@@ -1081,7 +1126,10 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id_mask = 0x1f;
dev->mii.reg_num_mask = 0xff;
dev->mii.supports_gmii = 1;
- dev->mii.phy_id = asix_get_phy_addr(dev);
+
+ dev->mii.phy_id = asix_read_phy_addr(dev, true);
+ if (dev->mii.phy_id < 0)
+ return dev->mii.phy_id;
dev->net->netdev_ops = &ax88178_netdev_ops;
dev->net->ethtool_ops = &ax88178_ethtool_ops;
@@ -1153,8 +1201,8 @@ static const struct driver_info ax88772_info = {
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
- .link_reset = ax88772_link_reset,
.reset = ax88772_reset,
+ .stop = ax88772_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
@@ -1165,7 +1213,6 @@ static const struct driver_info ax88772b_info = {
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
- .link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
@@ -1201,7 +1248,6 @@ static const struct driver_info hg20f9_info = {
.bind = ax88772_bind,
.unbind = ax88772_unbind,
.status = asix_status,
- .link_reset = ax88772_link_reset,
.reset = ax88772_reset,
.flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
FLAG_MULTI_PACKET,
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index b404c9462dce..530947d7477b 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -25,20 +25,6 @@ struct ax88172a_private {
struct asix_rx_fixup_info rx_fixup_info;
};
-/* MDIO read and write wrappers for phylib */
-static int asix_mdio_bus_read(struct mii_bus *bus, int phy_id, int regnum)
-{
- return asix_mdio_read(((struct usbnet *)bus->priv)->net, phy_id,
- regnum);
-}
-
-static int asix_mdio_bus_write(struct mii_bus *bus, int phy_id, int regnum,
- u16 val)
-{
- asix_mdio_write(((struct usbnet *)bus->priv)->net, phy_id, regnum, val);
- return 0;
-}
-
/* set MAC link settings according to information from phylib */
static void ax88172a_adjust_link(struct net_device *netdev)
{
@@ -219,7 +205,12 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
goto free;
}
- priv->phy_addr = asix_read_phy_addr(dev, priv->use_embdphy);
+ ret = asix_read_phy_addr(dev, priv->use_embdphy);
+ if (ret < 0)
+ goto free;
+
+ priv->phy_addr = ret;
+
ax88172a_reset_phy(dev, priv->use_embdphy);
/* Asix framing packs multiple eth frames into a 2K usb bulk transfer */
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 0eeec80bec31..2e60bc1b9a6b 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -26,7 +26,7 @@
* for transport over USB using a simpler USB device model than the
* previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet").
*
- * For details, see www.usb.org/developers/devclass_docs/CDC_EEM10.pdf
+ * For details, see https://usb.org/sites/default/files/CDC_EEM10.pdf
*
* This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24,
* 2.6.27 and 2.6.30rc2 kernel.
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 7eb0109e9baa..eb3817d70f2b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -217,7 +217,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
goto bad_desc;
}
skip:
- /* Communcation class functions with bmCapabilities are not
+ /* Communication class functions with bmCapabilities are not
* RNDIS. But some Wireless class RNDIS functions use
* bmCapabilities for their own purpose. The failsafe is
* therefore applied only to Communication class RNDIS
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 42fb75057c15..4c4ab7b38d78 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -301,8 +301,8 @@ error:
return NULL;
}
-/* Some devices are known to send Neigbor Solicitation messages and
- * require Neigbor Advertisement replies. The IPv6 core will not
+/* Some devices are known to send Neighbor Solicitation messages and
+ * require Neighbor Advertisement replies. The IPv6 core will not
* respond since IFF_NOARP is set, so we must handle them ourselves.
*/
static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci)
@@ -589,7 +589,7 @@ static const struct driver_info cdc_mbim_info_zlp = {
*
* Note: The current implementation of this feature restricts each NTB
* to a single NDP, implying that multiplexed sessions cannot share an
- * NTB. This might affect performace for multiplexed sessions.
+ * NTB. This might affect performance for multiplexed sessions.
*/
static const struct driver_info cdc_mbim_info_ndp_to_end = {
.description = "CDC MBIM",
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 783d6139fdfa..c67f11e0e9a7 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -640,7 +640,7 @@ out:
/* set MTU to max supported by the device if necessary */
dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev));
- /* do not exceed operater preferred MTU */
+ /* do not exceed operator preferred MTU */
if (ctx->mbim_extended_desc) {
mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU);
if (mbim_mtu != 0 && mbim_mtu < dev->net->mtu)
@@ -697,7 +697,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
u32 def_rx, def_tx;
- /* be conservative when selecting intial buffer size to
+ /* be conservative when selecting initial buffer size to
* increase the number of hosts this will work for
*/
def_rx = min_t(u32, CDC_NCM_NTB_DEF_SIZE_RX,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index fa30e78c6e49..54ef8492ca01 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1688,7 +1688,7 @@ static int hso_serial_tiocmset(struct tty_struct *tty,
spin_unlock_irqrestore(&serial->serial_lock, flags);
return usb_control_msg(serial->parent->usb,
- usb_rcvctrlpipe(serial->parent->usb, 0), 0x22,
+ usb_sndctrlpipe(serial->parent->usb, 0), 0x22,
0x21, val, if_num, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
@@ -2435,7 +2435,7 @@ static int hso_rfkill_set_block(void *data, bool blocked)
if (hso_dev->usb_gone)
rv = 0;
else
- rv = usb_control_msg(hso_dev->usb, usb_rcvctrlpipe(hso_dev->usb, 0),
+ rv = usb_control_msg(hso_dev->usb, usb_sndctrlpipe(hso_dev->usb, 0),
enabled ? 0x82 : 0x81, 0x40, 0, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
mutex_unlock(&hso_dev->mutex);
@@ -2617,32 +2617,31 @@ static struct hso_device *hso_create_bulk_serial_device(
num_urbs = 2;
serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
GFP_KERNEL);
+ if (!serial->tiocmget)
+ goto exit;
serial->tiocmget->serial_state_notification
= kzalloc(sizeof(struct hso_serial_state_notification),
GFP_KERNEL);
- /* it isn't going to break our heart if serial->tiocmget
- * allocation fails don't bother checking this.
- */
- if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
- tiocmget = serial->tiocmget;
- tiocmget->endp = hso_get_ep(interface,
- USB_ENDPOINT_XFER_INT,
- USB_DIR_IN);
- if (!tiocmget->endp) {
- dev_err(&interface->dev, "Failed to find INT IN ep\n");
- goto exit;
- }
-
- tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (tiocmget->urb) {
- mutex_init(&tiocmget->mutex);
- init_waitqueue_head(&tiocmget->waitq);
- } else
- hso_free_tiomget(serial);
+ if (!serial->tiocmget->serial_state_notification)
+ goto exit;
+ tiocmget = serial->tiocmget;
+ tiocmget->endp = hso_get_ep(interface,
+ USB_ENDPOINT_XFER_INT,
+ USB_DIR_IN);
+ if (!tiocmget->endp) {
+ dev_err(&interface->dev, "Failed to find INT IN ep\n");
+ goto exit;
}
- }
- else
+
+ tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tiocmget->urb)
+ goto exit;
+
+ mutex_init(&tiocmget->mutex);
+ init_waitqueue_head(&tiocmget->waitq);
+ } else {
num_urbs = 1;
+ }
if (hso_serial_common_create(serial, num_urbs, BULK_URB_RX_SIZE,
BULK_URB_TX_SIZE))
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index ed05f992c612..6fde41550de1 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -61,7 +61,7 @@ static struct sk_buff *int51x1_tx_fixup(struct usbnet *dev,
int need_tail = 0;
__le16 *len;
- /* if packet and our header is smaler than 64 pad to 64 (+ ZLP) */
+ /* if packet and our header is smaller than 64 pad to 64 (+ ZLP) */
if ((pack_with_header_len) < dev->maxpacket)
need_tail = dev->maxpacket - pack_with_header_len + 1;
/*
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 6acc5e904518..25489389ea49 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -298,7 +298,7 @@ struct lan78xx_net;
struct lan78xx_priv {
struct lan78xx_net *dev;
u32 rfe_ctl;
- u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
+ u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
struct mutex dataport_mutex; /* for dataport access */
@@ -1645,6 +1645,7 @@ static const struct ethtool_ops lan78xx_ethtool_ops = {
.get_strings = lan78xx_get_strings,
.get_wol = lan78xx_get_wol,
.set_wol = lan78xx_set_wol,
+ .get_ts_info = ethtool_op_get_ts_info,
.get_eee = lan78xx_get_eee,
.set_eee = lan78xx_set_eee,
.get_pauseparam = lan78xx_get_pause,
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 217a2d8fa47b..b2495fa80171 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -31,7 +31,7 @@
* Windows/Mac drivers do send a couple of such frames to the device
* during initialisation, with protocol set to 0x0906 or 0x0b06 and (what
* seems to be) a flag in the .dummy_flags. This doesn't seem necessary
- * for modem operation but can possibly be used for GPS or other funcitons.
+ * for modem operation but can possibly be used for GPS or other functions.
*/
struct vl600_frame_hdr {
@@ -72,7 +72,7 @@ static int vl600_bind(struct usbnet *dev, struct usb_interface *intf)
/* ARP packets don't go through, but they're also of no use. The
* subnet has only two hosts anyway: us and the gateway / DHCP
* server (probably simulated by modem firmware or network operator)
- * whose address changes everytime we connect to the intarwebz and
+ * whose address changes every time we connect to the intarwebz and
* who doesn't bother answering ARP requests either. So hardware
* addresses have no meaning, the destination and the source of every
* packet depend only on whether it is on the IN or OUT endpoint. */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 136ea06540ff..85039e17f4cd 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2445,7 +2445,7 @@ static int rx_bottom(struct r8152 *tp, int budget)
unsigned int pkt_len, rx_frag_head_sz;
struct sk_buff *skb;
- /* limite the skb numbers for rx_queue */
+ /* limit the skb numbers for rx_queue */
if (unlikely(skb_queue_len(&tp->rx_queue) >= 1000))
break;
@@ -8107,6 +8107,37 @@ static void r8156b_init(struct r8152 *tp)
tp->coalesce = 15000; /* 15 us */
}
+static bool rtl_check_vendor_ok(struct usb_interface *intf)
+{
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_endpoint_descriptor *in, *out, *intr;
+
+ if (usb_find_common_endpoints(alt, &in, &out, &intr, NULL) < 0) {
+ dev_err(&intf->dev, "Expected endpoints are not found\n");
+ return false;
+ }
+
+ /* Check Rx endpoint address */
+ if (usb_endpoint_num(in) != 1) {
+ dev_err(&intf->dev, "Invalid Rx endpoint address\n");
+ return false;
+ }
+
+ /* Check Tx endpoint address */
+ if (usb_endpoint_num(out) != 2) {
+ dev_err(&intf->dev, "Invalid Tx endpoint address\n");
+ return false;
+ }
+
+ /* Check interrupt endpoint address */
+ if (usb_endpoint_num(intr) != 3) {
+ dev_err(&intf->dev, "Invalid interrupt endpoint address\n");
+ return false;
+ }
+
+ return true;
+}
+
static bool rtl_vendor_mode(struct usb_interface *intf)
{
struct usb_host_interface *alt = intf->cur_altsetting;
@@ -8115,12 +8146,15 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
int i, num_configs;
if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
- return true;
+ return rtl_check_vendor_ok(intf);
/* The vendor mode is not always config #1, so to find it out. */
udev = interface_to_usbdev(intf);
c = udev->config;
num_configs = udev->descriptor.bNumConfigurations;
+ if (num_configs < 2)
+ return false;
+
for (i = 0; i < num_configs; (i++, c++)) {
struct usb_interface_descriptor *desc = NULL;
@@ -8135,7 +8169,8 @@ static bool rtl_vendor_mode(struct usb_interface *intf)
}
}
- WARN_ON_ONCE(i == num_configs);
+ if (i == num_configs)
+ dev_err(&intf->dev, "Unexpected Device\n");
return false;
}
@@ -8176,7 +8211,7 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (!tp)
return 0;
- /* reset the MAC adddress in case of policy change */
+ /* reset the MAC address in case of policy change */
if (determine_ethernet_addr(tp, &sa) >= 0) {
rtnl_lock();
dev_set_mac_address (tp->netdev, &sa, NULL);
@@ -8932,6 +8967,79 @@ static int rtl8152_set_ringparam(struct net_device *netdev,
return 0;
}
+static void rtl8152_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ u16 bmcr, lcladv, rmtadv;
+ u8 cap;
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ mutex_lock(&tp->control);
+
+ bmcr = r8152_mdio_read(tp, MII_BMCR);
+ lcladv = r8152_mdio_read(tp, MII_ADVERTISE);
+ rmtadv = r8152_mdio_read(tp, MII_LPA);
+
+ mutex_unlock(&tp->control);
+
+ usb_autopm_put_interface(tp->intf);
+
+ if (!(bmcr & BMCR_ANENABLE)) {
+ pause->autoneg = 0;
+ pause->rx_pause = 0;
+ pause->tx_pause = 0;
+ return;
+ }
+
+ pause->autoneg = 1;
+
+ cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+
+ if (cap & FLOW_CTRL_RX)
+ pause->rx_pause = 1;
+
+ if (cap & FLOW_CTRL_TX)
+ pause->tx_pause = 1;
+}
+
+static int rtl8152_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ struct r8152 *tp = netdev_priv(netdev);
+ u16 old, new1;
+ u8 cap = 0;
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&tp->control);
+
+ if (pause->autoneg && !(r8152_mdio_read(tp, MII_BMCR) & BMCR_ANENABLE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pause->rx_pause)
+ cap |= FLOW_CTRL_RX;
+
+ if (pause->tx_pause)
+ cap |= FLOW_CTRL_TX;
+
+ old = r8152_mdio_read(tp, MII_ADVERTISE);
+ new1 = (old & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) | mii_advertise_flowctrl(cap);
+ if (old != new1)
+ r8152_mdio_write(tp, MII_ADVERTISE, new1);
+
+out:
+ mutex_unlock(&tp->control);
+ usb_autopm_put_interface(tp->intf);
+
+ return ret;
+}
+
static const struct ethtool_ops ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.get_drvinfo = rtl8152_get_drvinfo,
@@ -8954,6 +9062,8 @@ static const struct ethtool_ops ops = {
.set_tunable = rtl8152_set_tunable,
.get_ringparam = rtl8152_get_ringparam,
.set_ringparam = rtl8152_set_ringparam,
+ .get_pauseparam = rtl8152_get_pauseparam,
+ .set_pauseparam = rtl8152_set_pauseparam,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -9381,9 +9491,6 @@ static int rtl8152_probe(struct usb_interface *intf,
if (!rtl_vendor_mode(intf))
return -ENODEV;
- if (intf->cur_altsetting->desc.bNumEndpoints < 3)
- return -ENODEV;
-
usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index f813ca9dec53..85a8b96e39a6 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -324,7 +324,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
* For RX we handle drivers that zero-pad to end-of-packet.
* Don't let userspace change these settings.
*
- * NOTE: there still seems to be wierdness here, as if we need
+ * NOTE: there still seems to be weirdness here, as if we need
* to do some more things to make sure WinCE targets accept this.
* They default to jumbograms of 8KB or 16KB, which is absurd
* for such low data rates and which is also more than Linux
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f8cdabb9ef5a..b286993da67c 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
- return ret;
+ goto err;
}
smsc75xx_init_mac_address(dev);
@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = smsc75xx_reset(dev);
if (ret < 0) {
netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
- return ret;
+ goto err;
}
dev->net->netdev_ops = &smsc75xx_netdev_ops;
@@ -1502,6 +1502,10 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
return 0;
+
+err:
+ kfree(pdata);
+ return ret;
}
static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ecf62849f4c1..57a5a025255c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1597,6 +1597,9 @@ void usbnet_disconnect (struct usb_interface *intf)
xdev->bus->bus_name, xdev->devpath,
dev->driver_info->description);
+ if (dev->driver_info->unbind)
+ dev->driver_info->unbind(dev, intf);
+
net = dev->net;
unregister_netdev (net);
@@ -1604,9 +1607,6 @@ void usbnet_disconnect (struct usb_interface *intf)
usb_scuttle_anchored_urbs(&dev->deferred);
- if (dev->driver_info->unbind)
- dev->driver_info->unbind (dev, intf);
-
usb_kill_urb(dev->interrupt);
usb_free_urb(dev->interrupt);
kfree(dev->padding_pkt);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 073fec4c0df1..0416a7e00914 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -401,6 +401,9 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
/* If whole_page, there is an offset between the beginning of the
* data and the allocated space, otherwise the data and the allocated
* space are aligned.
+ *
+ * Buffers with headroom use PAGE_SIZE as alloc size, see
+ * add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
if (whole_page) {
/* Buffers with whole_page use PAGE_SIZE as alloc size,
@@ -730,6 +733,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
len -= vi->hdr_len;
stats->bytes += len;
+ if (unlikely(len > GOOD_PACKET_LEN)) {
+ pr_debug("%s: rx error: len %u exceeds max size %d\n",
+ dev->name, len, GOOD_PACKET_LEN);
+ dev->stats.rx_length_errors++;
+ goto err_len;
+ }
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -833,6 +842,7 @@ err:
err_xdp:
rcu_read_unlock();
stats->xdp_drops++;
+err_len:
stats->drops++;
put_page(page);
xdp_xmit:
@@ -886,6 +896,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
+ if (unlikely(len > truesize)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)ctx);
+ dev->stats.rx_length_errors++;
+ goto err_skb;
+ }
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -1012,13 +1028,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
- if (unlikely(len > truesize)) {
- pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
- dev->stats.rx_length_errors++;
- goto err_skb;
- }
-
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
metasize, !!headroom);
curr_skb = head_skb;
@@ -1627,7 +1636,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
virtio_is_little_endian(vi->vdev), false,
0))
- BUG();
+ return -EPROTO;
if (vi->mergeable_rx_bufs)
hdr->num_buffers = 0;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 503e2fd7ce51..07eaef5e73c2 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -274,7 +274,7 @@ vrf_map_register_dev(struct net_device *dev, struct netlink_ext_ack *extack)
int res;
/* we pre-allocate elements used in the spin-locked section (so that we
- * keep the spinlock as short as possibile).
+ * keep the spinlock as short as possible).
*/
new_me = vrf_map_elem_alloc(GFP_KERNEL);
if (!new_me)
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 2369ca250cd6..43caab0b7dee 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -1,13 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* $Id: cosa.c,v 1.31 2000/03/08 17:47:16 kas Exp $ */
-/*
- * Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
+/* Copyright (C) 1995-1997 Jan "Yenya" Kasprzak <kas@fi.muni.cz>
* Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*/
-/*
- * The driver for the SRP and COSA synchronous serial cards.
+/* The driver for the SRP and COSA synchronous serial cards.
*
* HARDWARE INFO
*
@@ -90,7 +88,7 @@
#define COSA_MAX_ID_STRING 128
/* Maximum length of the channel name */
-#define COSA_MAX_NAME (sizeof("cosaXXXcXXX")+1)
+#define COSA_MAX_NAME (sizeof("cosaXXXcXXX") + 1)
/* Per-channel data structure */
@@ -124,9 +122,9 @@ struct channel_data {
};
/* cosa->firmware_status bits */
-#define COSA_FW_RESET (1<<0) /* Is the ROM monitor active? */
-#define COSA_FW_DOWNLOAD (1<<1) /* Is the microcode downloaded? */
-#define COSA_FW_START (1<<2) /* Is the microcode running? */
+#define COSA_FW_RESET BIT(0) /* Is the ROM monitor active? */
+#define COSA_FW_DOWNLOAD BIT(1) /* Is the microcode downloaded? */
+#define COSA_FW_START BIT(2) /* Is the microcode running? */
struct cosa_data {
int num; /* Card number */
@@ -152,28 +150,25 @@ struct cosa_data {
char *type; /* card type */
};
-/*
- * Define this if you want all the possible ports to be autoprobed.
+/* Define this if you want all the possible ports to be autoprobed.
* It is here but it probably is not a good idea to use this.
*/
-/* #define COSA_ISA_AUTOPROBE 1 */
+/* #define COSA_ISA_AUTOPROBE 1*/
-/*
- * Character device major number. 117 was allocated for us.
+/* Character device major number. 117 was allocated for us.
* The value of 0 means to allocate a first free one.
*/
static DEFINE_MUTEX(cosa_chardev_mutex);
static int cosa_major = 117;
-/*
- * Encoding of the minor numbers:
+/* Encoding of the minor numbers:
* The lowest CARD_MINOR_BITS bits means the channel on the single card,
* the highest bits means the card number.
*/
#define CARD_MINOR_BITS 4 /* How many bits in minor number are reserved
- * for the single card */
-/*
- * The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
+ * for the single card
+ */
+/* The following depends on CARD_MINOR_BITS. Unfortunately, the "MODULE_STRING"
* macro doesn't like anything other than the raw number as an argument :-(
*/
#define MAX_CARDS 16
@@ -184,8 +179,7 @@ static int cosa_major = 117;
#define DRIVER_TXMAP_SHIFT 2
#define DRIVER_TXMAP_MASK 0x0c /* FIXME: 0xfc for 8-channel version */
-/*
- * for cosa->rxtx - indicates whether either transmit or receive is
+/* for cosa->rxtx - indicates whether either transmit or receive is
* in progress. These values are mean number of the bit.
*/
#define TXBIT 0
@@ -198,22 +192,22 @@ static int cosa_major = 117;
#undef DEBUG_IRQS //1 /* Print the message when the IRQ is received */
#undef DEBUG_IO //1 /* Dump the I/O traffic */
-#define TX_TIMEOUT (5*HZ)
+#define TX_TIMEOUT (5 * HZ)
/* Maybe the following should be allocated dynamically */
static struct cosa_data cosa_cards[MAX_CARDS];
static int nr_cards;
#ifdef COSA_ISA_AUTOPROBE
-static int io[MAX_CARDS+1] = { 0x220, 0x228, 0x210, 0x218, 0, };
+static int io[MAX_CARDS + 1] = {0x220, 0x228, 0x210, 0x218, 0,};
/* NOTE: DMA is not autoprobed!!! */
-static int dma[MAX_CARDS+1] = { 1, 7, 1, 7, 1, 7, 1, 7, 0, };
+static int dma[MAX_CARDS + 1] = {1, 7, 1, 7, 1, 7, 1, 7, 0,};
#else
-static int io[MAX_CARDS+1];
-static int dma[MAX_CARDS+1];
+static int io[MAX_CARDS + 1];
+static int dma[MAX_CARDS + 1];
#endif
/* IRQ can be safely autoprobed */
-static int irq[MAX_CARDS+1] = { -1, -1, -1, -1, -1, -1, 0, };
+static int irq[MAX_CARDS + 1] = {-1, -1, -1, -1, -1, -1, 0,};
/* for class stuff*/
static struct class *cosa_class;
@@ -244,14 +238,14 @@ MODULE_LICENSE("GPL");
#define cosa_inw inw
#endif
-#define is_8bit(cosa) (!(cosa->datareg & 0x08))
+#define is_8bit(cosa) (!((cosa)->datareg & 0x08))
-#define cosa_getstatus(cosa) (cosa_inb(cosa->statusreg))
-#define cosa_putstatus(cosa, stat) (cosa_outb(stat, cosa->statusreg))
-#define cosa_getdata16(cosa) (cosa_inw(cosa->datareg))
-#define cosa_getdata8(cosa) (cosa_inb(cosa->datareg))
-#define cosa_putdata16(cosa, dt) (cosa_outw(dt, cosa->datareg))
-#define cosa_putdata8(cosa, dt) (cosa_outb(dt, cosa->datareg))
+#define cosa_getstatus(cosa) (cosa_inb((cosa)->statusreg))
+#define cosa_putstatus(cosa, stat) (cosa_outb(stat, (cosa)->statusreg))
+#define cosa_getdata16(cosa) (cosa_inw((cosa)->datareg))
+#define cosa_getdata8(cosa) (cosa_inb((cosa)->datareg))
+#define cosa_putdata16(cosa, dt) (cosa_outw(dt, (cosa)->datareg))
+#define cosa_putdata8(cosa, dt) (cosa_outb(dt, (cosa)->datareg))
/* Initialization stuff */
static int cosa_probe(int ioaddr, int irq, int dma);
@@ -280,14 +274,14 @@ static char *chrdev_setup_rx(struct channel_data *channel, int size);
static int chrdev_rx_done(struct channel_data *channel);
static int chrdev_tx_done(struct channel_data *channel, int size);
static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos);
+ char __user *buf, size_t count, loff_t *ppos);
static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos);
+ const char __user *buf, size_t count, loff_t *ppos);
static unsigned int cosa_poll(struct file *file, poll_table *poll);
static int cosa_open(struct inode *inode, struct file *file);
static int cosa_release(struct inode *inode, struct file *file);
static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg);
+ unsigned long arg);
#ifdef COSA_FASYNC_WORKING
static int cosa_fasync(struct inode *inode, struct file *file, int on);
#endif
@@ -337,7 +331,7 @@ static void debug_status_in(struct cosa_data *cosa, int status);
static void debug_status_out(struct cosa_data *cosa, int status);
#endif
-static inline struct channel_data* dev_to_chan(struct net_device *dev)
+static inline struct channel_data *dev_to_chan(struct net_device *dev)
{
return (struct channel_data *)dev_to_hdlc(dev)->priv;
}
@@ -355,15 +349,16 @@ static int __init cosa_init(void)
goto out;
}
} else {
- if (!(cosa_major=register_chrdev(0, "cosa", &cosa_fops))) {
+ cosa_major = register_chrdev(0, "cosa", &cosa_fops);
+ if (!cosa_major) {
pr_warn("unable to register chardev\n");
err = -EIO;
goto out;
}
}
- for (i=0; i<MAX_CARDS; i++)
+ for (i = 0; i < MAX_CARDS; i++)
cosa_cards[i].num = -1;
- for (i=0; io[i] != 0 && i < MAX_CARDS; i++)
+ for (i = 0; io[i] != 0 && i < MAX_CARDS; i++)
cosa_probe(io[i], irq[i], dma[i]);
if (!nr_cards) {
pr_warn("no devices found\n");
@@ -426,7 +421,7 @@ static const struct net_device_ops cosa_ops = {
static int cosa_probe(int base, int irq, int dma)
{
- struct cosa_data *cosa = cosa_cards+nr_cards;
+ struct cosa_data *cosa = cosa_cards + nr_cards;
int i, err = 0;
memset(cosa, 0, sizeof(struct cosa_data));
@@ -438,7 +433,8 @@ static int cosa_probe(int base, int irq, int dma)
return -1;
}
/* I/O address should be between 0x100 and 0x3ff and should be
- * multiple of 8. */
+ * multiple of 8.
+ */
if (base < 0x100 || base > 0x3ff || base & 0x7) {
pr_info("invalid I/O address 0x%x\n", base);
return -1;
@@ -448,8 +444,9 @@ static int cosa_probe(int base, int irq, int dma)
pr_info("invalid DMA %d\n", dma);
return -1;
}
- /* and finally, on 16-bit COSA DMA should be 4-7 and
- * I/O base should not be multiple of 0x10 */
+ /* and finally, on 16-bit COSA DMA should be 4-7 and
+ * I/O base should not be multiple of 0x10
+ */
if (((base & 0x8) && dma < 4) || (!(base & 0x8) && dma > 3)) {
pr_info("8/16 bit base and DMA mismatch (base=0x%x, dma=%d)\n",
base, dma);
@@ -458,12 +455,12 @@ static int cosa_probe(int base, int irq, int dma)
cosa->dma = dma;
cosa->datareg = base;
- cosa->statusreg = is_8bit(cosa)?base+1:base+2;
+ cosa->statusreg = is_8bit(cosa) ? base + 1 : base + 2;
spin_lock_init(&cosa->lock);
- if (!request_region(base, is_8bit(cosa)?2:4,"cosa"))
+ if (!request_region(base, is_8bit(cosa) ? 2 : 4, "cosa"))
return -1;
-
+
if (cosa_reset_and_read_id(cosa, cosa->id_string) < 0) {
printk(KERN_DEBUG "probe at 0x%x failed.\n", base);
err = -1;
@@ -471,11 +468,11 @@ static int cosa_probe(int base, int irq, int dma)
}
/* Test the validity of identification string */
- if (!strncmp(cosa->id_string, "SRP", 3))
+ if (!strncmp(cosa->id_string, "SRP", 3)) {
cosa->type = "srp";
- else if (!strncmp(cosa->id_string, "COSA", 4))
- cosa->type = is_8bit(cosa)? "cosa8": "cosa16";
- else {
+ } else if (!strncmp(cosa->id_string, "COSA", 4)) {
+ cosa->type = is_8bit(cosa) ? "cosa8" : "cosa16";
+ } else {
/* Print a warning only if we are not autoprobing */
#ifndef COSA_ISA_AUTOPROBE
pr_info("valid signature not found at 0x%x\n", base);
@@ -483,9 +480,9 @@ static int cosa_probe(int base, int irq, int dma)
err = -1;
goto err_out;
}
- /* Update the name of the region now we know the type of card */
- release_region(base, is_8bit(cosa)?2:4);
- if (!request_region(base, is_8bit(cosa)?2:4, cosa->type)) {
+ /* Update the name of the region now we know the type of card */
+ release_region(base, is_8bit(cosa) ? 2 : 4);
+ if (!request_region(base, is_8bit(cosa) ? 2 : 4, cosa->type)) {
printk(KERN_DEBUG "changing name at 0x%x failed.\n", base);
return -1;
}
@@ -495,8 +492,7 @@ static int cosa_probe(int base, int irq, int dma)
unsigned long irqs;
/* pr_info("IRQ autoprobe\n"); */
irqs = probe_irq_on();
- /*
- * Enable interrupt on tx buffer empty (it sure is)
+ /* Enable interrupt on tx buffer empty (it sure is)
* really sure ?
* FIXME: When this code is not used as module, we should
* probably call udelay() instead of the interruptible sleep.
@@ -536,8 +532,8 @@ static int cosa_probe(int base, int irq, int dma)
err = -1;
goto err_out1;
}
-
- cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL|GFP_DMA);
+
+ cosa->bouncebuf = kmalloc(COSA_MTU, GFP_KERNEL | GFP_DMA);
if (!cosa->bouncebuf) {
err = -ENOMEM;
goto err_out2;
@@ -563,7 +559,8 @@ static int cosa_probe(int base, int irq, int dma)
sema_init(&chan->wsem, 1);
/* Register the network interface */
- if (!(chan->netdev = alloc_hdlcdev(chan))) {
+ chan->netdev = alloc_hdlcdev(chan);
+ if (!chan->netdev) {
pr_warn("%s: alloc_hdlcdev failed\n", chan->name);
err = -ENOMEM;
goto err_hdlcdev;
@@ -603,12 +600,11 @@ err_out2:
err_out1:
free_irq(cosa->irq, cosa);
err_out:
- release_region(cosa->datareg,is_8bit(cosa)?2:4);
+ release_region(cosa->datareg, is_8bit(cosa) ? 2 : 4);
pr_notice("cosa%d: allocating resources failed\n", cosa->num);
return err;
}
-
/*---------- network device ---------- */
static int cosa_net_attach(struct net_device *dev, unsigned short encoding,
@@ -659,7 +655,7 @@ static int cosa_net_open(struct net_device *dev)
}
static netdev_tx_t cosa_net_tx(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
struct channel_data *chan = dev_to_chan(dev);
@@ -714,13 +710,12 @@ static int cosa_net_close(struct net_device *dev)
static char *cosa_net_setup_rx(struct channel_data *chan, int size)
{
- /*
- * We can safely fall back to non-dma-able memory, because we have
+ /* We can safely fall back to non-dma-able memory, because we have
* the cosa->bouncebuf pre-allocated.
*/
kfree_skb(chan->rx_skb);
chan->rx_skb = dev_alloc_skb(size);
- if (chan->rx_skb == NULL) {
+ if (!chan->rx_skb) {
pr_notice("%s: Memory squeeze, dropping packet\n", chan->name);
chan->netdev->stats.rx_dropped++;
return NULL;
@@ -767,7 +762,7 @@ static int cosa_net_tx_done(struct channel_data *chan, int size)
/*---------- Character device ---------- */
static ssize_t cosa_read(struct file *file,
- char __user *buf, size_t count, loff_t *ppos)
+ char __user *buf, size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(wait, current);
unsigned long flags;
@@ -782,9 +777,9 @@ static ssize_t cosa_read(struct file *file,
}
if (mutex_lock_interruptible(&chan->rlock))
return -ERESTARTSYS;
-
- chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL);
- if (chan->rxdata == NULL) {
+
+ chan->rxdata = kmalloc(COSA_MTU, GFP_DMA | GFP_KERNEL);
+ if (!chan->rxdata) {
mutex_unlock(&chan->rlock);
return -ENOMEM;
}
@@ -840,9 +835,8 @@ static int chrdev_rx_done(struct channel_data *chan)
return 1;
}
-
static ssize_t cosa_write(struct file *file,
- const char __user *buf, size_t count, loff_t *ppos)
+ const char __user *buf, size_t count, loff_t *ppos)
{
DECLARE_WAITQUEUE(wait, current);
struct channel_data *chan = file->private_data;
@@ -860,10 +854,10 @@ static ssize_t cosa_write(struct file *file,
if (count > COSA_MTU)
count = COSA_MTU;
-
+
/* Allocate the buffer */
- kbuf = kmalloc(count, GFP_KERNEL|GFP_DMA);
- if (kbuf == NULL) {
+ kbuf = kmalloc(count, GFP_KERNEL | GFP_DMA);
+ if (!kbuf) {
up(&chan->wsem);
return -ENOMEM;
}
@@ -872,7 +866,7 @@ static ssize_t cosa_write(struct file *file,
kfree(kbuf);
return -EFAULT;
}
- chan->tx_status=0;
+ chan->tx_status = 0;
cosa_start_tx(chan, kbuf, count);
spin_lock_irqsave(&cosa->lock, flags);
@@ -927,20 +921,20 @@ static int cosa_open(struct inode *inode, struct file *file)
int ret = 0;
mutex_lock(&cosa_chardev_mutex);
- if ((n=iminor(file_inode(file))>>CARD_MINOR_BITS)
- >= nr_cards) {
+ n = iminor(file_inode(file)) >> CARD_MINOR_BITS;
+ if (n >= nr_cards) {
ret = -ENODEV;
goto out;
}
- cosa = cosa_cards+n;
+ cosa = cosa_cards + n;
- if ((n=iminor(file_inode(file))
- & ((1<<CARD_MINOR_BITS)-1)) >= cosa->nchannels) {
+ n = iminor(file_inode(file)) & ((1 << CARD_MINOR_BITS) - 1);
+ if (n >= cosa->nchannels) {
ret = -ENODEV;
goto out;
}
chan = cosa->chan + n;
-
+
file->private_data = chan;
spin_lock_irqsave(&cosa->lock, flags);
@@ -982,26 +976,25 @@ static struct fasync_struct *fasync[256] = { NULL, };
/* To be done ... */
static int cosa_fasync(struct inode *inode, struct file *file, int on)
{
- int port = iminor(inode);
+ int port = iminor(inode);
return fasync_helper(inode, file, on, &fasync[port]);
}
#endif
-
/* ---------- Ioctls ---------- */
-/*
- * Ioctl subroutines can safely be made inline, because they are called
+/* Ioctl subroutines can safely be made inline, because they are called
* only from cosa_ioctl().
*/
static inline int cosa_reset(struct cosa_data *cosa)
{
char idstring[COSA_MAX_ID_STRING];
+
if (cosa->usage > 1)
pr_info("cosa%d: WARNING: reset requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->num, cosa->usage);
- cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_START);
+ cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_START);
if (cosa_reset_and_read_id(cosa, idstring) < 0) {
pr_notice("cosa%d: reset failed\n", cosa->num);
return -EIO;
@@ -1025,7 +1018,7 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
cosa->name, cosa->firmware_status);
return -EPERM;
}
-
+
if (copy_from_user(&d, arg, sizeof(d)))
return -EFAULT;
@@ -1034,9 +1027,8 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
if (d.len < 0 || d.len > COSA_MAX_FIRMWARE_SIZE)
return -EINVAL;
-
/* If something fails, force the user to reset the card */
- cosa->firmware_status &= ~(COSA_FW_RESET|COSA_FW_DOWNLOAD);
+ cosa->firmware_status &= ~(COSA_FW_RESET | COSA_FW_DOWNLOAD);
i = download(cosa, d.code, d.len, d.addr);
if (i < 0) {
@@ -1046,7 +1038,7 @@ static inline int cosa_download(struct cosa_data *cosa, void __user *arg)
}
pr_info("cosa%d: downloading microcode - 0x%04x bytes at 0x%04x\n",
cosa->num, d.len, d.addr);
- cosa->firmware_status |= COSA_FW_RESET|COSA_FW_DOWNLOAD;
+ cosa->firmware_status |= COSA_FW_RESET | COSA_FW_DOWNLOAD;
return 0;
}
@@ -1091,14 +1083,15 @@ static inline int cosa_start(struct cosa_data *cosa, int address)
pr_info("cosa%d: WARNING: start microcode requested with cosa->usage > 1 (%d). Odd things may happen.\n",
cosa->num, cosa->usage);
- if ((cosa->firmware_status & (COSA_FW_RESET|COSA_FW_DOWNLOAD))
- != (COSA_FW_RESET|COSA_FW_DOWNLOAD)) {
+ if ((cosa->firmware_status & (COSA_FW_RESET | COSA_FW_DOWNLOAD))
+ != (COSA_FW_RESET | COSA_FW_DOWNLOAD)) {
pr_notice("%s: download the microcode and/or reset the card first (status %d)\n",
cosa->name, cosa->firmware_status);
return -EPERM;
}
cosa->firmware_status &= ~COSA_FW_RESET;
- if ((i=startmicrocode(cosa, address)) < 0) {
+ i = startmicrocode(cosa, address);
+ if (i < 0) {
pr_notice("cosa%d: start microcode at 0x%04x failed: %d\n",
cosa->num, address, i);
return -EIO;
@@ -1108,11 +1101,12 @@ static inline int cosa_start(struct cosa_data *cosa, int address)
cosa->firmware_status |= COSA_FW_START;
return 0;
}
-
+
/* Buffer of size at least COSA_MAX_ID_STRING is expected */
static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
{
- int l = strlen(cosa->id_string)+1;
+ int l = strlen(cosa->id_string) + 1;
+
if (copy_to_user(string, cosa->id_string, l))
return -EFAULT;
return l;
@@ -1121,16 +1115,19 @@ static inline int cosa_getidstr(struct cosa_data *cosa, char __user *string)
/* Buffer of size at least COSA_MAX_ID_STRING is expected */
static inline int cosa_gettype(struct cosa_data *cosa, char __user *string)
{
- int l = strlen(cosa->type)+1;
+ int l = strlen(cosa->type) + 1;
+
if (copy_to_user(string, cosa->type, l))
return -EFAULT;
return l;
}
static int cosa_ioctl_common(struct cosa_data *cosa,
- struct channel_data *channel, unsigned int cmd, unsigned long arg)
+ struct channel_data *channel, unsigned int cmd,
+ unsigned long arg)
{
void __user *argp = (void __user *)arg;
+
switch (cmd) {
case COSAIORSET: /* Reset the device */
if (!capable(CAP_NET_ADMIN))
@@ -1143,7 +1140,7 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
case COSAIODOWNLD: /* Download the firmware */
if (!capable(CAP_SYS_RAWIO))
return -EACCES;
-
+
return cosa_download(cosa, argp);
case COSAIORMEM:
if (!capable(CAP_SYS_RAWIO))
@@ -1176,6 +1173,7 @@ static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int rv;
struct channel_data *chan = dev_to_chan(dev);
+
rv = cosa_ioctl_common(chan->cosa, chan, cmd,
(unsigned long)ifr->ifr_data);
if (rv != -ENOIOCTLCMD)
@@ -1184,7 +1182,7 @@ static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
struct channel_data *channel = file->private_data;
struct cosa_data *cosa;
@@ -1197,11 +1195,9 @@ static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-
/*---------- HW layer interface ---------- */
-/*
- * The higher layer can bind itself to the HW layer by setting the callbacks
+/* The higher layer can bind itself to the HW layer by setting the callbacks
* in the channel_data structure and by using these routines.
*/
static void cosa_enable_rx(struct channel_data *chan)
@@ -1220,8 +1216,7 @@ static void cosa_disable_rx(struct channel_data *chan)
put_driver_status(cosa);
}
-/*
- * FIXME: This routine probably should check for cosa_start_tx() called when
+/* FIXME: This routine probably should check for cosa_start_tx() called when
* the previous transmit is still unfinished. In this case the non-zero
* return value should indicate to the caller that the queuing(sp?) up
* the transmit has failed.
@@ -1235,7 +1230,7 @@ static int cosa_start_tx(struct channel_data *chan, char *buf, int len)
pr_info("cosa%dc%d: starting tx(0x%x)",
chan->cosa->num, chan->num, len);
- for (i=0; i<len; i++)
+ for (i = 0; i < len; i++)
pr_cont(" %02x", buf[i]&0xff);
pr_cont("\n");
#endif
@@ -1262,10 +1257,10 @@ static void put_driver_status(struct cosa_data *cosa)
status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
| (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
- &DRIVER_TXMAP_MASK : 0);
+ | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
+ & DRIVER_TXMAP_MASK : 0);
if (!cosa->rxtx) {
- if (cosa->rxbitmap|cosa->txbitmap) {
+ if (cosa->rxbitmap | cosa->txbitmap) {
if (!cosa->enabled) {
cosa_putstatus(cosa, SR_RX_INT_ENA);
#ifdef DEBUG_IO
@@ -1294,10 +1289,10 @@ static void put_driver_status_nolock(struct cosa_data *cosa)
status = (cosa->rxbitmap ? DRIVER_RX_READY : 0)
| (cosa->txbitmap ? DRIVER_TX_READY : 0)
- | (cosa->txbitmap? ~(cosa->txbitmap<<DRIVER_TXMAP_SHIFT)
- &DRIVER_TXMAP_MASK : 0);
+ | (cosa->txbitmap ? ~(cosa->txbitmap << DRIVER_TXMAP_SHIFT)
+ & DRIVER_TXMAP_MASK : 0);
- if (cosa->rxbitmap|cosa->txbitmap) {
+ if (cosa->rxbitmap | cosa->txbitmap) {
cosa_putstatus(cosa, SR_RX_INT_ENA);
#ifdef DEBUG_IO
debug_status_out(cosa, SR_RX_INT_ENA);
@@ -1316,8 +1311,7 @@ static void put_driver_status_nolock(struct cosa_data *cosa)
#endif
}
-/*
- * The "kickme" function: When the DMA times out, this is called to
+/* The "kickme" function: When the DMA times out, this is called to
* clean up the driver status.
* FIXME: Preliminary support, the interface is probably wrong.
*/
@@ -1344,7 +1338,7 @@ static void cosa_kick(struct cosa_data *cosa)
udelay(100);
cosa_putstatus(cosa, 0);
udelay(100);
- (void) cosa_getdata8(cosa);
+ (void)cosa_getdata8(cosa);
udelay(100);
cosa_putdata8(cosa, 0);
udelay(100);
@@ -1352,8 +1346,7 @@ static void cosa_kick(struct cosa_data *cosa)
spin_unlock_irqrestore(&cosa->lock, flags);
}
-/*
- * Check if the whole buffer is DMA-able. It means it is below the 16M of
+/* Check if the whole buffer is DMA-able. It means it is below the 16M of
* physical memory and doesn't span the 64k boundary. For now it seems
* SKB's never do this, but we'll check this anyway.
*/
@@ -1361,9 +1354,10 @@ static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
{
static int count;
unsigned long b = (unsigned long)buf;
- if (b+len >= MAX_DMA_ADDRESS)
+
+ if (b + len >= MAX_DMA_ADDRESS)
return 0;
- if ((b^ (b+len)) & 0x10000) {
+ if ((b ^ (b + len)) & 0x10000) {
if (count++ < 5)
pr_info("%s: packet spanning a 64k boundary\n",
chan->name);
@@ -1372,11 +1366,9 @@ static int cosa_dma_able(struct channel_data *chan, char *buf, int len)
return 1;
}
-
/* ---------- The SRP/COSA ROM monitor functions ---------- */
-/*
- * Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
+/* Downloading SRP microcode: say "w" to SRP monitor, it answers by "w=",
* drivers need to say 4-digit hex number meaning start address of the microcode
* separated by a single space. Monitor replies by saying " =". Now driver
* has to write 4-digit hex number meaning the last byte address ended
@@ -1387,18 +1379,27 @@ static int download(struct cosa_data *cosa, const char __user *microcode, int le
{
int i;
- if (put_wait_data(cosa, 'w') == -1) return -1;
+ if (put_wait_data(cosa, 'w') == -1)
+ return -1;
if ((i=get_wait_data(cosa)) != 'w') { printk("dnld: 0x%04x\n",i); return -2;}
- if (get_wait_data(cosa) != '=') return -3;
-
- if (puthexnumber(cosa, address) < 0) return -4;
- if (put_wait_data(cosa, ' ') == -1) return -10;
- if (get_wait_data(cosa) != ' ') return -11;
- if (get_wait_data(cosa) != '=') return -12;
-
- if (puthexnumber(cosa, address+length-1) < 0) return -13;
- if (put_wait_data(cosa, ' ') == -1) return -18;
- if (get_wait_data(cosa) != ' ') return -19;
+ if (get_wait_data(cosa) != '=')
+ return -3;
+
+ if (puthexnumber(cosa, address) < 0)
+ return -4;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -10;
+ if (get_wait_data(cosa) != ' ')
+ return -11;
+ if (get_wait_data(cosa) != '=')
+ return -12;
+
+ if (puthexnumber(cosa, address + length - 1) < 0)
+ return -13;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -18;
+ if (get_wait_data(cosa) != ' ')
+ return -19;
while (length--) {
char c;
@@ -1413,43 +1414,53 @@ static int download(struct cosa_data *cosa, const char __user *microcode, int le
microcode++;
}
- if (get_wait_data(cosa) != '\r') return -21;
- if (get_wait_data(cosa) != '\n') return -22;
- if (get_wait_data(cosa) != '.') return -23;
+ if (get_wait_data(cosa) != '\r')
+ return -21;
+ if (get_wait_data(cosa) != '\n')
+ return -22;
+ if (get_wait_data(cosa) != '.')
+ return -23;
#if 0
printk(KERN_DEBUG "cosa%d: download completed.\n", cosa->num);
#endif
return 0;
}
-
-/*
- * Starting microcode is done via the "g" command of the SRP monitor.
+/* Starting microcode is done via the "g" command of the SRP monitor.
* The chat should be the following: "g" "g=" "<addr><CR>"
* "<CR><CR><LF><CR><LF>".
*/
static int startmicrocode(struct cosa_data *cosa, int address)
{
- if (put_wait_data(cosa, 'g') == -1) return -1;
- if (get_wait_data(cosa) != 'g') return -2;
- if (get_wait_data(cosa) != '=') return -3;
-
- if (puthexnumber(cosa, address) < 0) return -4;
- if (put_wait_data(cosa, '\r') == -1) return -5;
-
- if (get_wait_data(cosa) != '\r') return -6;
- if (get_wait_data(cosa) != '\r') return -7;
- if (get_wait_data(cosa) != '\n') return -8;
- if (get_wait_data(cosa) != '\r') return -9;
- if (get_wait_data(cosa) != '\n') return -10;
+ if (put_wait_data(cosa, 'g') == -1)
+ return -1;
+ if (get_wait_data(cosa) != 'g')
+ return -2;
+ if (get_wait_data(cosa) != '=')
+ return -3;
+
+ if (puthexnumber(cosa, address) < 0)
+ return -4;
+ if (put_wait_data(cosa, '\r') == -1)
+ return -5;
+
+ if (get_wait_data(cosa) != '\r')
+ return -6;
+ if (get_wait_data(cosa) != '\r')
+ return -7;
+ if (get_wait_data(cosa) != '\n')
+ return -8;
+ if (get_wait_data(cosa) != '\r')
+ return -9;
+ if (get_wait_data(cosa) != '\n')
+ return -10;
#if 0
printk(KERN_DEBUG "cosa%d: microcode started\n", cosa->num);
#endif
return 0;
}
-/*
- * Reading memory is done via the "r" command of the SRP monitor.
+/* Reading memory is done via the "r" command of the SRP monitor.
* The chat is the following "r" "r=" "<addr> " " =" "<last_byte> " " "
* Then driver can read the data and the conversation is finished
* by SRP monitor sending "<CR><LF>." (dot at the end).
@@ -1459,27 +1470,39 @@ static int startmicrocode(struct cosa_data *cosa, int address)
*/
static int readmem(struct cosa_data *cosa, char __user *microcode, int length, int address)
{
- if (put_wait_data(cosa, 'r') == -1) return -1;
- if ((get_wait_data(cosa)) != 'r') return -2;
- if ((get_wait_data(cosa)) != '=') return -3;
-
- if (puthexnumber(cosa, address) < 0) return -4;
- if (put_wait_data(cosa, ' ') == -1) return -5;
- if (get_wait_data(cosa) != ' ') return -6;
- if (get_wait_data(cosa) != '=') return -7;
-
- if (puthexnumber(cosa, address+length-1) < 0) return -8;
- if (put_wait_data(cosa, ' ') == -1) return -9;
- if (get_wait_data(cosa) != ' ') return -10;
+ if (put_wait_data(cosa, 'r') == -1)
+ return -1;
+ if ((get_wait_data(cosa)) != 'r')
+ return -2;
+ if ((get_wait_data(cosa)) != '=')
+ return -3;
+
+ if (puthexnumber(cosa, address) < 0)
+ return -4;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -5;
+ if (get_wait_data(cosa) != ' ')
+ return -6;
+ if (get_wait_data(cosa) != '=')
+ return -7;
+
+ if (puthexnumber(cosa, address + length - 1) < 0)
+ return -8;
+ if (put_wait_data(cosa, ' ') == -1)
+ return -9;
+ if (get_wait_data(cosa) != ' ')
+ return -10;
while (length--) {
char c;
int i;
- if ((i=get_wait_data(cosa)) == -1) {
+
+ i = get_wait_data(cosa);
+ if (i == -1) {
pr_info("0x%04x bytes remaining\n", length);
return -11;
}
- c=i;
+ c = i;
#if 1
if (put_user(c, microcode))
return -23; /* ??? */
@@ -1489,22 +1512,24 @@ static int readmem(struct cosa_data *cosa, char __user *microcode, int length, i
microcode++;
}
- if (get_wait_data(cosa) != '\r') return -21;
- if (get_wait_data(cosa) != '\n') return -22;
- if (get_wait_data(cosa) != '.') return -23;
+ if (get_wait_data(cosa) != '\r')
+ return -21;
+ if (get_wait_data(cosa) != '\n')
+ return -22;
+ if (get_wait_data(cosa) != '.')
+ return -23;
#if 0
printk(KERN_DEBUG "cosa%d: readmem completed.\n", cosa->num);
#endif
return 0;
}
-/*
- * This function resets the device and reads the initial prompt
+/* This function resets the device and reads the initial prompt
* of the device's ROM monitor.
*/
static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
{
- int i=0, id=0, prev=0, curr=0;
+ int i = 0, id = 0, prev = 0, curr = 0;
/* Reset the card ... */
cosa_putstatus(cosa, 0);
@@ -1514,18 +1539,18 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
/* Disable all IRQs from the card */
cosa_putstatus(cosa, 0);
- /*
- * Try to read the ID string. The card then prints out the
+ /* Try to read the ID string. The card then prints out the
* identification string ended by the "\n\x2e".
*
* The following loop is indexed through i (instead of id)
* to avoid looping forever when for any reason
* the port returns '\r', '\n' or '\x2e' permanently.
*/
- for (i=0; i<COSA_MAX_ID_STRING-1; i++, prev=curr) {
- if ((curr = get_wait_data(cosa)) == -1) {
+ for (i = 0; i < COSA_MAX_ID_STRING - 1; i++, prev = curr) {
+ curr = get_wait_data(cosa);
+ if (curr == -1)
return -1;
- }
+
curr &= 0xff;
if (curr != '\r' && curr != '\n' && curr != 0x2e)
idstring[id++] = curr;
@@ -1537,11 +1562,9 @@ static int cosa_reset_and_read_id(struct cosa_data *cosa, char *idstring)
return id;
}
-
/* ---------- Auxiliary routines for COSA/SRP monitor ---------- */
-/*
- * This routine gets the data byte from the card waiting for the SR_RX_RDY
+/* This routine gets the data byte from the card waiting for the SR_RX_RDY
* bit to be set in a loop. It should be used in the exceptional cases
* only (for example when resetting the card or downloading the firmware.
*/
@@ -1553,10 +1576,11 @@ static int get_wait_data(struct cosa_data *cosa)
/* read data and return them */
if (cosa_getstatus(cosa) & SR_RX_RDY) {
short r;
+
r = cosa_getdata8(cosa);
#if 0
pr_info("get_wait_data returning after %d retries\n",
- 999-retries);
+ 999 - retries);
#endif
return r;
}
@@ -1568,20 +1592,20 @@ static int get_wait_data(struct cosa_data *cosa)
return -1;
}
-/*
- * This routine puts the data byte to the card waiting for the SR_TX_RDY
+/* This routine puts the data byte to the card waiting for the SR_TX_RDY
* bit to be set in a loop. It should be used in the exceptional cases
* only (for example when resetting the card or downloading the firmware).
*/
static int put_wait_data(struct cosa_data *cosa, int data)
{
int retries = 1000;
+
while (--retries) {
/* read data and return them */
if (cosa_getstatus(cosa) & SR_TX_RDY) {
cosa_putdata8(cosa, data);
#if 0
- pr_info("Putdata: %d retries\n", 999-retries);
+ pr_info("Putdata: %d retries\n", 999 - retries);
#endif
return 0;
}
@@ -1594,9 +1618,8 @@ static int put_wait_data(struct cosa_data *cosa, int data)
cosa->num, cosa_getstatus(cosa));
return -1;
}
-
-/*
- * The following routine puts the hexadecimal number into the SRP monitor
+
+/* The following routine puts the hexadecimal number into the SRP monitor
* and verifies the proper echo of the sent bytes. Returns 0 on success,
* negative number on failure (-1,-3,-5,-7) means that put_wait_data() failed,
* (-2,-4,-6,-8) means that reading echo failed.
@@ -1608,26 +1631,24 @@ static int puthexnumber(struct cosa_data *cosa, int number)
/* Well, I should probably replace this by something faster. */
sprintf(temp, "%04X", number);
- for (i=0; i<4; i++) {
+ for (i = 0; i < 4; i++) {
if (put_wait_data(cosa, temp[i]) == -1) {
pr_notice("cosa%d: puthexnumber failed to write byte %d\n",
cosa->num, i);
- return -1-2*i;
+ return -1 - 2 * i;
}
if (get_wait_data(cosa) != temp[i]) {
pr_notice("cosa%d: puthexhumber failed to read echo of byte %d\n",
cosa->num, i);
- return -2-2*i;
+ return -2 - 2 * i;
}
}
return 0;
}
-
/* ---------- Interrupt routines ---------- */
-/*
- * There are three types of interrupt:
+/* There are three types of interrupt:
* At the beginning of transmit - this handled is in tx_interrupt(),
* at the beginning of receive - it is in rx_interrupt() and
* at the end of transmit/receive - it is the eot_interrupt() function.
@@ -1635,14 +1656,13 @@ static int puthexnumber(struct cosa_data *cosa, int number)
* COSA status byte. I have moved the rx/tx/eot interrupt handling into
* separate functions to make it more readable. These functions are inline,
* so there should be no overhead of function call.
- *
+ *
* In the COSA bus-master mode, we need to tell the card the address of a
* buffer. Unfortunately, COSA may be too slow for us, so we must busy-wait.
* It's time to use the bottom half :-(
*/
-/*
- * Transmit interrupt routine - called when COSA is willing to obtain
+/* Transmit interrupt routine - called when COSA is willing to obtain
* data from the OS. The most tricky part of the routine is selection
* of channel we (OS) want to send packet for. For SRP we should probably
* use the round-robin approach. The newer COSA firmwares have a simple
@@ -1667,7 +1687,8 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
set_bit(TXBIT, &cosa->rxtx);
if (!test_bit(IRQBIT, &cosa->rxtx)) {
/* flow control, see the comment above */
- int i=0;
+ int i = 0;
+
if (!cosa->txbitmap) {
pr_warn("%s: No channel wants data in TX IRQ. Expect DMA timeout.\n",
cosa->name);
@@ -1681,9 +1702,10 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
i++;
if (cosa->txchan >= cosa->nchannels)
cosa->txchan = 0;
- if (!(cosa->txbitmap & (1<<cosa->txchan)))
+ if (!(cosa->txbitmap & (1 << cosa->txchan)))
continue;
- if (~status & (1 << (cosa->txchan+DRIVER_TXMAP_SHIFT)))
+ if (~status &
+ (1 << (cosa->txchan + DRIVER_TXMAP_SHIFT)))
break;
/* in second pass, accept first ready-to-TX channel */
if (i > cosa->nchannels) {
@@ -1698,12 +1720,13 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
}
cosa->txsize = cosa->chan[cosa->txchan].txsize;
- if (cosa_dma_able(cosa->chan+cosa->txchan,
- cosa->chan[cosa->txchan].txbuf, cosa->txsize)) {
+ if (cosa_dma_able(cosa->chan + cosa->txchan,
+ cosa->chan[cosa->txchan].txbuf,
+ cosa->txsize)) {
cosa->txbuf = cosa->chan[cosa->txchan].txbuf;
} else {
memcpy(cosa->bouncebuf, cosa->chan[cosa->txchan].txbuf,
- cosa->txsize);
+ cosa->txsize);
cosa->txbuf = cosa->bouncebuf;
}
}
@@ -1711,12 +1734,12 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
if (is_8bit(cosa)) {
if (!test_bit(IRQBIT, &cosa->rxtx)) {
cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0)|
+ cosa_putdata8(cosa, ((cosa->txchan << 5) & 0xe0) |
((cosa->txsize >> 8) & 0x1f));
#ifdef DEBUG_IO
debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0)|
- ((cosa->txsize >> 8) & 0x1f));
+ debug_data_out(cosa, ((cosa->txchan << 5) & 0xe0) |
+ ((cosa->txsize >> 8) & 0x1f));
debug_data_in(cosa, cosa_getdata8(cosa));
#else
cosa_getdata8(cosa);
@@ -1727,20 +1750,20 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
} else {
clear_bit(IRQBIT, &cosa->rxtx);
cosa_putstatus(cosa, 0);
- cosa_putdata8(cosa, cosa->txsize&0xff);
+ cosa_putdata8(cosa, cosa->txsize & 0xff);
#ifdef DEBUG_IO
debug_status_out(cosa, 0);
- debug_data_out(cosa, cosa->txsize&0xff);
+ debug_data_out(cosa, cosa->txsize & 0xff);
#endif
}
} else {
cosa_putstatus(cosa, SR_TX_INT_ENA);
- cosa_putdata16(cosa, ((cosa->txchan<<13) & 0xe000)
+ cosa_putdata16(cosa, ((cosa->txchan << 13) & 0xe000)
| (cosa->txsize & 0x1fff));
#ifdef DEBUG_IO
debug_status_out(cosa, SR_TX_INT_ENA);
- debug_data_out(cosa, ((cosa->txchan<<13) & 0xe000)
- | (cosa->txsize & 0x1fff));
+ debug_data_out(cosa, ((cosa->txchan << 13) & 0xe000) |
+ (cosa->txsize & 0x1fff));
debug_data_in(cosa, cosa_getdata8(cosa));
debug_status_out(cosa, 0);
#else
@@ -1751,25 +1774,28 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
if (cosa->busmaster) {
unsigned long addr = virt_to_bus(cosa->txbuf);
- int count=0;
+ int count = 0;
+
pr_info("busmaster IRQ\n");
- while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+ while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
count++;
udelay(10);
- if (count > 1000) break;
+ if (count > 1000)
+ break;
}
pr_info("status %x\n", cosa_getstatus(cosa));
pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, (addr >> 16)&0xffff);
+ cosa_putdata16(cosa, (addr >> 16) & 0xffff);
count = 0;
- while (!(cosa_getstatus(cosa)&SR_TX_RDY)) {
+ while (!(cosa_getstatus(cosa) & SR_TX_RDY)) {
count++;
- if (count > 1000) break;
+ if (count > 1000)
+ break;
udelay(10);
}
pr_info("ready after %d loops\n", count);
- cosa_putdata16(cosa, addr &0xffff);
+ cosa_putdata16(cosa, addr & 0xffff);
flags1 = claim_dma_lock();
set_dma_mode(cosa->dma, DMA_MODE_CASCADE);
enable_dma(cosa->dma);
@@ -1785,9 +1811,9 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
enable_dma(cosa->dma);
release_dma_lock(flags1);
}
- cosa_putstatus(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+ cosa_putstatus(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
#ifdef DEBUG_IO
- debug_status_out(cosa, SR_TX_DMA_ENA|SR_USR_INT_ENA);
+ debug_status_out(cosa, SR_TX_DMA_ENA | SR_USR_INT_ENA);
#endif
spin_unlock_irqrestore(&cosa->lock, flags);
}
@@ -1806,7 +1832,7 @@ static inline void rx_interrupt(struct cosa_data *cosa, int status)
if (!test_bit(IRQBIT, &cosa->rxtx)) {
set_bit(IRQBIT, &cosa->rxtx);
put_driver_status_nolock(cosa);
- cosa->rxsize = cosa_getdata8(cosa) <<8;
+ cosa->rxsize = cosa_getdata8(cosa) << 8;
#ifdef DEBUG_IO
debug_data_in(cosa, cosa->rxsize >> 8);
#endif
@@ -1859,20 +1885,20 @@ reject: /* Reject the packet */
disable_dma(cosa->dma);
clear_dma_ff(cosa->dma);
set_dma_mode(cosa->dma, DMA_MODE_READ);
- if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff)) {
+ if (cosa_dma_able(cosa->rxchan, cosa->rxbuf, cosa->rxsize & 0x1fff))
set_dma_addr(cosa->dma, virt_to_bus(cosa->rxbuf));
- } else {
+ else
set_dma_addr(cosa->dma, virt_to_bus(cosa->bouncebuf));
- }
- set_dma_count(cosa->dma, (cosa->rxsize&0x1fff));
+
+ set_dma_count(cosa->dma, (cosa->rxsize & 0x1fff));
enable_dma(cosa->dma);
release_dma_lock(flags);
spin_lock_irqsave(&cosa->lock, flags);
- cosa_putstatus(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+ cosa_putstatus(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
if (!is_8bit(cosa) && (status & SR_TX_RDY))
cosa_putdata8(cosa, DRIVER_RX_READY);
#ifdef DEBUG_IO
- debug_status_out(cosa, SR_RX_DMA_ENA|SR_USR_INT_ENA);
+ debug_status_out(cosa, SR_RX_DMA_ENA | SR_USR_INT_ENA);
if (!is_8bit(cosa) && (status & SR_TX_RDY))
debug_data_cmd(cosa, DRIVER_RX_READY);
#endif
@@ -1882,13 +1908,15 @@ reject: /* Reject the packet */
static inline void eot_interrupt(struct cosa_data *cosa, int status)
{
unsigned long flags, flags1;
+
spin_lock_irqsave(&cosa->lock, flags);
flags1 = claim_dma_lock();
disable_dma(cosa->dma);
clear_dma_ff(cosa->dma);
release_dma_lock(flags1);
if (test_bit(TXBIT, &cosa->rxtx)) {
- struct channel_data *chan = cosa->chan+cosa->txchan;
+ struct channel_data *chan = cosa->chan + cosa->txchan;
+
if (chan->tx_done)
if (chan->tx_done(chan, cosa->txsize))
clear_bit(chan->num, &cosa->txbitmap);
@@ -1896,9 +1924,10 @@ static inline void eot_interrupt(struct cosa_data *cosa, int status)
#ifdef DEBUG_DATA
{
int i;
+
pr_info("cosa%dc%d: done rx(0x%x)",
cosa->num, cosa->rxchan->num, cosa->rxsize);
- for (i=0; i<cosa->rxsize; i++)
+ for (i = 0; i < cosa->rxsize; i++)
pr_cont(" %02x", cosa->rxbuf[i]&0xff);
pr_cont("\n");
}
@@ -1914,8 +1943,7 @@ static inline void eot_interrupt(struct cosa_data *cosa, int status)
} else {
pr_notice("cosa%d: unexpected EOT interrupt\n", cosa->num);
}
- /*
- * Clear the RXBIT, TXBIT and IRQBIT (the latest should be
+ /* Clear the RXBIT, TXBIT and IRQBIT (the latest should be
* cleared anyway). We should do it as soon as possible
* so that we can tell the COSA we are done and to give it a time
* for recovery.
@@ -1968,10 +1996,8 @@ again:
return IRQ_HANDLED;
}
-
/* ---------- I/O debugging routines ---------- */
-/*
- * These routines can be used to monitor COSA/SRP I/O and to printk()
+/* These routines can be used to monitor COSA/SRP I/O and to printk()
* the data being transferred on the data and status I/O port in a
* readable way.
*/
@@ -1980,6 +2006,7 @@ again:
static void debug_status_in(struct cosa_data *cosa, int status)
{
char *s;
+
switch (status & SR_CMD_FROM_SRP_MASK) {
case SR_UP_REQUEST:
s = "RX_REQ";
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 5de71e44fc5a..b3466e084e84 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * FarSync WAN driver for Linux (2.6.x kernel version)
+/* FarSync WAN driver for Linux (2.6.x kernel version)
*
* Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
*
@@ -30,8 +29,7 @@
#include "farsync.h"
-/*
- * Module info
+/* Module info
*/
MODULE_AUTHOR("R.J.Dunlop <bob.dunlop@farsite.co.uk>");
MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
@@ -49,20 +47,23 @@ MODULE_LICENSE("GPL");
/* Default parameters for the link
*/
#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
- * useful */
+ * useful
+ */
#define FST_TXQ_DEPTH 16 /* This one is for the buffering
* of frames on the way down to the card
* so that we can keep the card busy
* and maximise throughput
*/
#define FST_HIGH_WATER_MARK 12 /* Point at which we flow control
- * network layer */
+ * network layer
+ */
#define FST_LOW_WATER_MARK 8 /* Point at which we remove flow
- * control from network layer */
+ * control from network layer
+ */
#define FST_MAX_MTU 8000 /* Huge but possible */
#define FST_DEF_MTU 1500 /* Common sane value */
-#define FST_TX_TIMEOUT (2*HZ)
+#define FST_TX_TIMEOUT (2 * HZ)
#ifdef ARPHRD_RAWHDLC
#define ARPHRD_MYTYPE ARPHRD_RAWHDLC /* Raw frames */
@@ -70,13 +71,12 @@ MODULE_LICENSE("GPL");
#define ARPHRD_MYTYPE ARPHRD_HDLC /* Cisco-HDLC (keepalives etc) */
#endif
-/*
- * Modules parameters and associated variables
+/* Modules parameters and associated variables
*/
static int fst_txq_low = FST_LOW_WATER_MARK;
static int fst_txq_high = FST_HIGH_WATER_MARK;
static int fst_max_reads = 7;
-static int fst_excluded_cards = 0;
+static int fst_excluded_cards;
static int fst_excluded_list[FST_MAX_CARDS];
module_param(fst_txq_low, int, 0);
@@ -105,9 +105,11 @@ module_param_array(fst_excluded_list, int, NULL, 0);
#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */
#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main
- * configuration structure */
+ * configuration structure
+ */
#define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA
- * buffers */
+ * buffers
+ */
#define LEN_TX_BUFFER 8192 /* Size of packet buffers */
#define LEN_RX_BUFFER 8192
@@ -377,8 +379,7 @@ struct fst_shared {
#define INTCSR_9054 0x68 /* Interrupt control/status register */
/* 9054 DMA Registers */
-/*
- * Note that we will be using DMA Channel 0 for copying rx data
+/* Note that we will be using DMA Channel 0 for copying rx data
* and Channel 1 for copying tx data
*/
#define DMAMODE0 0x80
@@ -421,7 +422,7 @@ struct buf_window {
/* Per port (line or channel) information
*/
struct fst_port_info {
- struct net_device *dev; /* Device struct - must be first */
+ struct net_device *dev; /* Device struct - must be first */
struct fst_card_info *card; /* Card we're associated with */
int index; /* Port index on the card */
int hwif; /* Line hardware (lineInterface copy) */
@@ -431,8 +432,7 @@ struct fst_port_info {
int txpos; /* Next Tx buffer to use */
int txipos; /* Next Tx buffer to check for free */
int start; /* Indication of start/stop to network */
- /*
- * A sixteen entry transmit queue
+ /* A sixteen entry transmit queue
*/
int txqs; /* index to get next buffer to tx */
int txqe; /* index to queue next packet */
@@ -479,9 +479,7 @@ struct fst_card_info {
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
#define port_to_dev(P) ((P)->dev)
-
-/*
- * Shared memory window access macros
+/* Shared memory window access macros
*
* We have a nice memory based structure above, which could be directly
* mapped on i386 but might not work on other architectures unless we use
@@ -491,16 +489,15 @@ struct fst_card_info {
*/
#define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X))
-#define FST_RDB(C,E) readb ((C)->mem + WIN_OFFSET(E))
-#define FST_RDW(C,E) readw ((C)->mem + WIN_OFFSET(E))
-#define FST_RDL(C,E) readl ((C)->mem + WIN_OFFSET(E))
+#define FST_RDB(C, E) (readb((C)->mem + WIN_OFFSET(E)))
+#define FST_RDW(C, E) (readw((C)->mem + WIN_OFFSET(E)))
+#define FST_RDL(C, E) (readl((C)->mem + WIN_OFFSET(E)))
-#define FST_WRB(C,E,B) writeb ((B), (C)->mem + WIN_OFFSET(E))
-#define FST_WRW(C,E,W) writew ((W), (C)->mem + WIN_OFFSET(E))
-#define FST_WRL(C,E,L) writel ((L), (C)->mem + WIN_OFFSET(E))
+#define FST_WRB(C, E, B) (writeb((B), (C)->mem + WIN_OFFSET(E)))
+#define FST_WRW(C, E, W) (writew((W), (C)->mem + WIN_OFFSET(E)))
+#define FST_WRL(C, E, L) (writel((L), (C)->mem + WIN_OFFSET(E)))
-/*
- * Debug support
+/* Debug support
*/
#if FST_DEBUG
@@ -524,43 +521,41 @@ do { \
} while (0)
#endif
-/*
- * PCI ID lookup table
+/* PCI ID lookup table
*/
static const struct pci_device_id fst_pci_dev_id[] = {
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4P},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T1U},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2U},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4U},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
- {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
+ {PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
{0,} /* End */
};
MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
-/*
- * Device Driver Work Queues
+/* Device Driver Work Queues
*
- * So that we don't spend too much time processing events in the
- * Interrupt Service routine, we will declare a work queue per Card
+ * So that we don't spend too much time processing events in the
+ * Interrupt Service routine, we will declare a work queue per Card
* and make the ISR schedule a task in the queue for later execution.
* In the 2.4 Kernel we used to use the immediate queue for BH's
- * Now that they are gone, tasklets seem to be much better than work
+ * Now that they are gone, tasklets seem to be much better than work
* queues.
*/
@@ -578,18 +573,16 @@ static u64 fst_work_txq;
static u64 fst_work_intq;
static void
-fst_q_work_item(u64 * queue, int card_index)
+fst_q_work_item(u64 *queue, int card_index)
{
unsigned long flags;
u64 mask;
- /*
- * Grab the queue exclusively
+ /* Grab the queue exclusively
*/
spin_lock_irqsave(&fst_work_q_lock, flags);
- /*
- * Making an entry in the queue is simply a matter of setting
+ /* Making an entry in the queue is simply a matter of setting
* a bit for the card indicating that there is work to do in the
* bottom half for the card. Note the limitation of 64 cards.
* That ought to be enough
@@ -606,8 +599,7 @@ fst_process_tx_work_q(struct tasklet_struct *unused)
u64 work_txq;
int i;
- /*
- * Grab the queue exclusively
+ /* Grab the queue exclusively
*/
dbg(DBG_TX, "fst_process_tx_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
@@ -615,12 +607,11 @@ fst_process_tx_work_q(struct tasklet_struct *unused)
fst_work_txq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
- /*
- * Call the bottom half for each card with work waiting
+ /* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_txq & 0x01) {
- if (fst_card_array[i] != NULL) {
+ if (fst_card_array[i]) {
dbg(DBG_TX, "Calling tx bh for card %d\n", i);
do_bottom_half_tx(fst_card_array[i]);
}
@@ -636,8 +627,7 @@ fst_process_int_work_q(struct tasklet_struct *unused)
u64 work_intq;
int i;
- /*
- * Grab the queue exclusively
+ /* Grab the queue exclusively
*/
dbg(DBG_INTR, "fst_process_int_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
@@ -645,12 +635,11 @@ fst_process_int_work_q(struct tasklet_struct *unused)
fst_work_intq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
- /*
- * Call the bottom half for each card with work waiting
+ /* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_intq & 0x01) {
- if (fst_card_array[i] != NULL) {
+ if (fst_card_array[i]) {
dbg(DBG_INTR,
"Calling rx & tx bh for card %d\n", i);
do_bottom_half_rx(fst_card_array[i]);
@@ -683,19 +672,16 @@ fst_cpureset(struct fst_card_info *card)
dbg(DBG_ASS,
"Error in reading interrupt line register\n");
}
- /*
- * Assert PLX software reset and Am186 hardware reset
+ /* Assert PLX software reset and Am186 hardware reset
* and then deassert the PLX software reset but 186 still in reset
*/
outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
- /*
- * We are delaying here to allow the 9054 to reset itself
+ /* We are delaying here to allow the 9054 to reset itself
*/
usleep_range(10, 20);
outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
- /*
- * We are delaying here to allow the 9054 to reload its eeprom
+ /* We are delaying here to allow the 9054 to reload its eeprom
*/
usleep_range(10, 20);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
@@ -720,19 +706,17 @@ static inline void
fst_cpurelease(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
- /*
- * Force posted writes to complete
+ /* Force posted writes to complete
*/
- (void) readb(card->mem);
+ (void)readb(card->mem);
- /*
- * Release LRESET DO = 1
+ /* Release LRESET DO = 1
* Then release Local Hold, DO = 1
*/
outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
} else {
- (void) readb(card->ctlmem);
+ (void)readb(card->ctlmem);
}
}
@@ -742,7 +726,7 @@ static inline void
fst_clear_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
- (void) readb(card->ctlmem);
+ (void)readb(card->ctlmem);
} else {
/* Poke the appropriate PLX chip register (same as enabling interrupts)
*/
@@ -755,11 +739,10 @@ fst_clear_intr(struct fst_card_info *card)
static inline void
fst_enable_intr(struct fst_card_info *card)
{
- if (card->family == FST_FAMILY_TXU) {
+ if (card->family == FST_FAMILY_TXU)
outl(0x0f0c0900, card->pci_conf + INTCSR_9054);
- } else {
+ else
outw(0x0543, card->pci_conf + INTCSR_9052);
- }
}
/* Disable card interrupts
@@ -767,11 +750,10 @@ fst_enable_intr(struct fst_card_info *card)
static inline void
fst_disable_intr(struct fst_card_info *card)
{
- if (card->family == FST_FAMILY_TXU) {
+ if (card->family == FST_FAMILY_TXU)
outl(0x00000000, card->pci_conf + INTCSR_9054);
- } else {
+ else
outw(0x0000, card->pci_conf + INTCSR_9052);
- }
}
/* Process the result of trying to pass a received frame up the stack
@@ -782,8 +764,7 @@ fst_process_rx_status(int rx_status, char *name)
switch (rx_status) {
case NET_RX_SUCCESS:
{
- /*
- * Nothing to do here
+ /* Nothing to do here
*/
break;
}
@@ -800,11 +781,10 @@ fst_process_rx_status(int rx_status, char *name)
static inline void
fst_init_dma(struct fst_card_info *card)
{
- /*
- * This is only required for the PLX 9054
+ /* This is only required for the PLX 9054
*/
if (card->family == FST_FAMILY_TXU) {
- pci_set_master(card->device);
+ pci_set_master(card->device);
outl(0x00020441, card->pci_conf + DMAMODE0);
outl(0x00020441, card->pci_conf + DMAMODE1);
outl(0x0, card->pci_conf + DMATHR);
@@ -819,8 +799,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
{
struct net_device *dev = port_to_dev(port);
- /*
- * Everything is now set, just tell the card to go
+ /* Everything is now set, just tell the card to go
*/
dbg(DBG_TX, "fst_tx_dma_complete\n");
FST_WRB(card, txDescrRing[port->index][txpos].bits,
@@ -830,8 +809,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
netif_trans_update(dev);
}
-/*
- * Mark it for our own raw sockets interface
+/* Mark it for our own raw sockets interface
*/
static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
{
@@ -874,55 +852,47 @@ fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
dev->stats.rx_dropped++;
}
-/*
- * Receive a frame through the DMA
+/* Receive a frame through the DMA
*/
static inline void
fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
- /*
- * This routine will setup the DMA and start it
+ /* This routine will setup the DMA and start it
*/
dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len);
- if (card->dmarx_in_progress) {
+ if (card->dmarx_in_progress)
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
- }
outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
- /*
- * We use the dmarx_in_progress flag to flag the channel as busy
+ /* We use the dmarx_in_progress flag to flag the channel as busy
*/
card->dmarx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */
}
-/*
- * Send a frame through the DMA
+/* Send a frame through the DMA
*/
static inline void
fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
- /*
- * This routine will setup the DMA and start it.
+ /* This routine will setup the DMA and start it.
*/
dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len);
- if (card->dmatx_in_progress) {
+ if (card->dmatx_in_progress)
dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
- }
outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
outl(mem, card->pci_conf + DMALADR1); /* to here */
outl(len, card->pci_conf + DMASIZ1); /* for this length */
outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
- /*
- * We use the dmatx_in_progress to flag the channel as busy
+ /* We use the dmatx_in_progress to flag the channel as busy
*/
card->dmatx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */
@@ -958,12 +928,11 @@ fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
mbval = FST_RDW(card, portMailbox[port->index][0]);
}
- if (safety > 0) {
+ if (safety > 0)
dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety);
- }
- if (mbval == NAK) {
+
+ if (mbval == NAK)
dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n");
- }
FST_WRW(card, portMailbox[port->index][0], cmd);
@@ -998,8 +967,7 @@ fst_op_lower(struct fst_port_info *port, unsigned int outputs)
fst_issue_cmd(port, SETV24O);
}
-/*
- * Setup port Rx buffers
+/* Setup port Rx buffers
*/
static void
fst_rx_config(struct fst_port_info *port)
@@ -1016,8 +984,8 @@ fst_rx_config(struct fst_port_info *port)
for (i = 0; i < NUM_RX_BUFFER; i++) {
offset = BUF_OFFSET(rxBuffer[pi][i][0]);
- FST_WRW(card, rxDescrRing[pi][i].ladr, (u16) offset);
- FST_WRB(card, rxDescrRing[pi][i].hadr, (u8) (offset >> 16));
+ FST_WRW(card, rxDescrRing[pi][i].ladr, (u16)offset);
+ FST_WRB(card, rxDescrRing[pi][i].hadr, (u8)(offset >> 16));
FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER));
FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER);
FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN);
@@ -1026,8 +994,7 @@ fst_rx_config(struct fst_port_info *port)
spin_unlock_irqrestore(&card->card_lock, flags);
}
-/*
- * Setup port Tx buffers
+/* Setup port Tx buffers
*/
static void
fst_tx_config(struct fst_port_info *port)
@@ -1044,8 +1011,8 @@ fst_tx_config(struct fst_port_info *port)
for (i = 0; i < NUM_TX_BUFFER; i++) {
offset = BUF_OFFSET(txBuffer[pi][i][0]);
- FST_WRW(card, txDescrRing[pi][i].ladr, (u16) offset);
- FST_WRB(card, txDescrRing[pi][i].hadr, (u8) (offset >> 16));
+ FST_WRW(card, txDescrRing[pi][i].ladr, (u16)offset);
+ FST_WRB(card, txDescrRing[pi][i].hadr, (u8)(offset >> 16));
FST_WRW(card, txDescrRing[pi][i].bcnt, 0);
FST_WRB(card, txDescrRing[pi][i].bits, 0);
}
@@ -1069,16 +1036,14 @@ fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port)
ais = FST_RDB(card, suStatus.alarmIndicationSignal);
if (los) {
- /*
- * Lost the link
+ /* Lost the link
*/
if (netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier off\n");
netif_carrier_off(port_to_dev(port));
}
} else {
- /*
- * Link available
+ /* Link available
*/
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier on\n");
@@ -1110,7 +1075,7 @@ fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port)
signals = FST_RDL(card, v24DebouncedSts[port->index]);
- if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+ if (signals & ((port->hwif == X21 || port->hwif == X21D)
? IPSTS_INDICATE : IPSTS_DCD)) {
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "DCD active\n");
@@ -1132,8 +1097,7 @@ fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port,
{
struct net_device *dev = port_to_dev(port);
- /*
- * Increment the appropriate error counter
+ /* Increment the appropriate error counter
*/
dev->stats.rx_errors++;
if (dmabits & RX_OFLO) {
@@ -1168,15 +1132,14 @@ fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
int pi;
pi = port->index;
- /*
- * Discard buffer descriptors until we see the start of the
+ /* Discard buffer descriptors until we see the start of the
* next frame. Note that for long frames this could be in
- * a subsequent interrupt.
+ * a subsequent interrupt.
*/
i = 0;
while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
if (++i > NUM_RX_BUFFER) {
dbg(DBG_ASS, "intr_rx: Discarding more bufs"
" than we have\n");
@@ -1190,11 +1153,9 @@ fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
/* Discard the terminal buffer */
if (!(dmabits & DMA_OWN)) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
}
port->rxpos = rxp;
- return;
-
}
/* Rx complete interrupt
@@ -1219,17 +1180,15 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
pi, rxp);
return;
}
- if (card->dmarx_in_progress) {
+ if (card->dmarx_in_progress)
return;
- }
/* Get buffer length */
len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt);
/* Discard the CRC */
len -= 2;
if (len == 0) {
- /*
- * This seems to happen on the TE1 interface sometimes
+ /* This seems to happen on the TE1 interface sometimes
* so throw the frame away and log the event.
*/
pr_err("Frame received with 0 length. Card %d Port %d\n",
@@ -1237,7 +1196,7 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
@@ -1254,7 +1213,8 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
}
/* Allocate SKB */
- if ((skb = dev_alloc_skb(len)) == NULL) {
+ skb = dev_alloc_skb(len);
+ if (!skb) {
dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
dev->stats.rx_dropped++;
@@ -1262,18 +1222,17 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
- /*
- * We know the length we need to receive, len.
+ /* We know the length we need to receive, len.
* It's not worth using the DMA for reads of less than
* FST_MIN_DMA_LEN
*/
- if ((len < FST_MIN_DMA_LEN) || (card->family == FST_FAMILY_TXP)) {
+ if (len < FST_MIN_DMA_LEN || card->family == FST_FAMILY_TXP) {
memcpy_fromio(skb_put(skb, len),
card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]),
len);
@@ -1307,12 +1266,11 @@ fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
}
- rxp = (rxp+1) % NUM_RX_BUFFER;
+ rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
}
-/*
- * The bottom halfs to the ISR
+/* The bottom half to the ISR
*
*/
@@ -1326,8 +1284,7 @@ do_bottom_half_tx(struct fst_card_info *card)
unsigned long flags;
struct net_device *dev;
- /*
- * Find a free buffer for the transmit
+ /* Find a free buffer for the transmit
* Step through each port on this card
*/
@@ -1340,39 +1297,36 @@ do_bottom_half_tx(struct fst_card_info *card)
while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
DMA_OWN) &&
!(card->dmatx_in_progress)) {
- /*
- * There doesn't seem to be a txdone event per-se
+ /* There doesn't seem to be a txdone event per-se
* We seem to have to deduce it, by checking the DMA_OWN
* bit on the next buffer we think we can use
*/
spin_lock_irqsave(&card->card_lock, flags);
- if ((txq_length = port->txqe - port->txqs) < 0) {
- /*
- * This is the case where one has wrapped and the
+ txq_length = port->txqe - port->txqs;
+ if (txq_length < 0) {
+ /* This is the case where one has wrapped and the
* maths gives us a negative number
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > 0) {
- /*
- * There is something to send
+ /* There is something to send
*/
spin_lock_irqsave(&card->card_lock, flags);
skb = port->txq[port->txqs];
port->txqs++;
- if (port->txqs == FST_TXQ_DEPTH) {
+ if (port->txqs == FST_TXQ_DEPTH)
port->txqs = 0;
- }
+
spin_unlock_irqrestore(&card->card_lock, flags);
- /*
- * copy the data and set the required indicators on the
+ /* copy the data and set the required indicators on the
* card.
*/
FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
cnv_bcnt(skb->len));
- if ((skb->len < FST_MIN_DMA_LEN) ||
- (card->family == FST_FAMILY_TXP)) {
+ if (skb->len < FST_MIN_DMA_LEN ||
+ card->family == FST_FAMILY_TXP) {
/* Enqueue the packet with normal io */
memcpy_toio(card->mem +
BUF_OFFSET(txBuffer[pi]
@@ -1401,8 +1355,7 @@ do_bottom_half_tx(struct fst_card_info *card)
}
if (++port->txpos >= NUM_TX_BUFFER)
port->txpos = 0;
- /*
- * If we have flow control on, can we now release it?
+ /* If we have flow control on, can we now release it?
*/
if (port->start) {
if (txq_length < fst_txq_low) {
@@ -1413,8 +1366,7 @@ do_bottom_half_tx(struct fst_card_info *card)
}
dev_kfree_skb(skb);
} else {
- /*
- * Nothing to send so break out of the while loop
+ /* Nothing to send so break out of the while loop
*/
break;
}
@@ -1438,8 +1390,7 @@ do_bottom_half_rx(struct fst_card_info *card)
while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
& DMA_OWN) && !(card->dmarx_in_progress)) {
if (rx_count > fst_max_reads) {
- /*
- * Don't spend forever in receive processing
+ /* Don't spend forever in receive processing
* Schedule another event
*/
fst_q_work_item(&fst_work_intq, card->card_no);
@@ -1452,8 +1403,7 @@ do_bottom_half_rx(struct fst_card_info *card)
}
}
-/*
- * The interrupt service routine
+/* The interrupt service routine
* Dev_id is our fst_card_info pointer
*/
static irqreturn_t
@@ -1468,8 +1418,7 @@ fst_intr(int dummy, void *dev_id)
unsigned int do_card_interrupt;
unsigned int int_retry_count;
- /*
- * Check to see if the interrupt was for this card
+ /* Check to see if the interrupt was for this card
* return if not
* Note that the call to clear the interrupt is important
*/
@@ -1478,10 +1427,9 @@ fst_intr(int dummy, void *dev_id)
pr_err("Interrupt received for card %d in a non running state (%d)\n",
card->card_no, card->state);
- /*
- * It is possible to really be running, i.e. we have re-loaded
+ /* It is possible to really be running, i.e. we have re-loaded
* a running card
- * Clear and reprime the interrupt source
+ * Clear and reprime the interrupt source
*/
fst_clear_intr(card);
return IRQ_HANDLED;
@@ -1490,8 +1438,7 @@ fst_intr(int dummy, void *dev_id)
/* Clear and reprime the interrupt source */
fst_clear_intr(card);
- /*
- * Is the interrupt for this card (handshake == 1)
+ /* Is the interrupt for this card (handshake == 1)
*/
do_card_interrupt = 0;
if (FST_RDB(card, interruptHandshake) == 1) {
@@ -1500,13 +1447,11 @@ fst_intr(int dummy, void *dev_id)
FST_WRB(card, interruptHandshake, 0xEE);
}
if (card->family == FST_FAMILY_TXU) {
- /*
- * Is it a DMA Interrupt
+ /* Is it a DMA Interrupt
*/
dma_intcsr = inl(card->pci_conf + INTCSR_9054);
if (dma_intcsr & 0x00200000) {
- /*
- * DMA Channel 0 (Rx transfer complete)
+ /* DMA Channel 0 (Rx transfer complete)
*/
dbg(DBG_RX, "DMA Rx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR0);
@@ -1517,8 +1462,7 @@ fst_intr(int dummy, void *dev_id)
do_card_interrupt += FST_RX_DMA_INT;
}
if (dma_intcsr & 0x00400000) {
- /*
- * DMA Channel 1 (Tx transfer complete)
+ /* DMA Channel 1 (Tx transfer complete)
*/
dbg(DBG_TX, "DMA Tx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR1);
@@ -1529,8 +1473,7 @@ fst_intr(int dummy, void *dev_id)
}
}
- /*
- * Have we been missing Interrupts
+ /* Have we been missing Interrupts
*/
int_retry_count = FST_RDL(card, interruptRetryCount);
if (int_retry_count) {
@@ -1539,9 +1482,8 @@ fst_intr(int dummy, void *dev_id)
FST_WRL(card, interruptRetryCount, 0);
}
- if (!do_card_interrupt) {
+ if (!do_card_interrupt)
return IRQ_HANDLED;
- }
/* Scehdule the bottom half of the ISR */
fst_q_work_item(&fst_work_intq, card->card_no);
@@ -1611,7 +1553,7 @@ fst_intr(int dummy, void *dev_id)
rdidx = 0;
}
FST_WRB(card, interruptEvent.rdindex, rdidx);
- return IRQ_HANDLED;
+ return IRQ_HANDLED;
}
/* Check that the shared memory configuration is one that we can handle
@@ -1635,7 +1577,8 @@ check_started_ok(struct fst_card_info *card)
return;
}
/* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
- if ((i = FST_RDB(card, taskStatus)) == 0x01) {
+ i = FST_RDB(card, taskStatus);
+ if (i == 0x01) {
card->state = FST_RUNNING;
} else if (i == 0xFF) {
pr_err("Firmware initialisation failed. Card halted\n");
@@ -1665,8 +1608,8 @@ set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
int err;
unsigned char my_framing;
- /* Set things according to the user set valid flags
- * Several of the old options have been invalidated/replaced by the
+ /* Set things according to the user set valid flags
+ * Several of the old options have been invalidated/replaced by the
* generic hdlc package.
*/
err = 0;
@@ -1740,9 +1683,8 @@ set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
#endif
}
#if FST_DEBUG
- if (info->valid & FSTVAL_DEBUG) {
+ if (info->valid & FSTVAL_DEBUG)
fst_debug_mask = info->debug;
- }
#endif
return err;
@@ -1754,7 +1696,7 @@ gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
{
int i;
- memset(info, 0, sizeof (struct fstioc_info));
+ memset(info, 0, sizeof(struct fstioc_info));
i = port->index;
info->kernelVersion = LINUX_VERSION_CODE;
@@ -1787,27 +1729,23 @@ gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
info->cardMode = FST_RDW(card, cardMode);
info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion);
- /*
- * The T2U can report cable presence for both A or B
- * in bits 0 and 1 of cableStatus. See which port we are and
+ /* The T2U can report cable presence for both A or B
+ * in bits 0 and 1 of cableStatus. See which port we are and
* do the mapping.
*/
if (card->family == FST_FAMILY_TXU) {
if (port->index == 0) {
- /*
- * Port A
+ /* Port A
*/
info->cableStatus = info->cableStatus & 1;
} else {
- /*
- * Port B
+ /* Port B
*/
info->cableStatus = info->cableStatus >> 1;
info->cableStatus = info->cableStatus & 1;
}
}
- /*
- * Some additional bits if we are TE1
+ /* Some additional bits if we are TE1
*/
if (card->type == FST_TYPE_TE1) {
info->lineSpeed = FST_RDL(card, suConfig.dataRate);
@@ -1851,14 +1789,12 @@ fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
sync_serial_settings sync;
int i;
- if (ifr->ifr_settings.size != sizeof (sync)) {
+ if (ifr->ifr_settings.size != sizeof(sync))
return -ENOMEM;
- }
if (copy_from_user
- (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof (sync))) {
+ (&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof(sync)))
return -EFAULT;
- }
if (sync.loopback)
return -EINVAL;
@@ -1951,12 +1887,11 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
ifr->ifr_settings.type = IF_IFACE_X21;
break;
}
- if (ifr->ifr_settings.size == 0) {
+ if (ifr->ifr_settings.size == 0)
return 0; /* only type requested */
- }
- if (ifr->ifr_settings.size < sizeof (sync)) {
+
+ if (ifr->ifr_settings.size < sizeof(sync))
return -ENOMEM;
- }
i = port->index;
memset(&sync, 0, sizeof(sync));
@@ -1966,11 +1901,10 @@ fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
- if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
+ if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof(sync)))
return -EFAULT;
- }
- ifr->ifr_settings.size = sizeof (sync);
+ ifr->ifr_settings.size = sizeof(sync);
return 0;
}
@@ -2008,21 +1942,19 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* First copy in the header with the length and offset of data
* to write
*/
- if (ifr->ifr_data == NULL) {
+ if (!ifr->ifr_data)
return -EINVAL;
- }
+
if (copy_from_user(&wrthdr, ifr->ifr_data,
- sizeof (struct fstioc_write))) {
+ sizeof(struct fstioc_write)))
return -EFAULT;
- }
/* Sanity check the parameters. We don't support partial writes
* when going over the top
*/
if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE ||
- wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
+ wrthdr.size + wrthdr.offset > FST_MEMSIZE)
return -ENXIO;
- }
/* Now copy the data to the card. */
@@ -2037,9 +1969,9 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Writes to the memory of a card in the reset state constitute
* a download
*/
- if (card->state == FST_RESET) {
+ if (card->state == FST_RESET)
card->state = FST_DOWNLOAD;
- }
+
return 0;
case FSTGETCONF:
@@ -2059,21 +1991,18 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
- if (ifr->ifr_data == NULL) {
+ if (!ifr->ifr_data)
return -EINVAL;
- }
gather_conf_info(card, port, &info);
- if (copy_to_user(ifr->ifr_data, &info, sizeof (info))) {
+ if (copy_to_user(ifr->ifr_data, &info, sizeof(info)))
return -EFAULT;
- }
+
return 0;
case FSTSETCONF:
-
- /*
- * Most of the settings have been moved to the generic ioctls
+ /* Most of the settings have been moved to the generic ioctls
* this just covers debug and board ident now
*/
@@ -2082,9 +2011,8 @@ fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
card->card_no, card->state);
return -EIO;
}
- if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
+ if (copy_from_user(&info, ifr->ifr_data, sizeof(info)))
return -EFAULT;
- }
return set_conf_from_info(card, port, &info);
@@ -2150,7 +2078,7 @@ fst_openport(struct fst_port_info *port)
port->run = 1;
signals = FST_RDL(port->card, v24DebouncedSts[port->index]);
- if (signals & (((port->hwif == X21) || (port->hwif == X21D))
+ if (signals & ((port->hwif == X21 || port->hwif == X21D)
? IPSTS_INDICATE : IPSTS_DCD))
netif_carrier_on(port_to_dev(port));
else
@@ -2159,7 +2087,6 @@ fst_openport(struct fst_port_info *port)
port->txqe = 0;
port->txqs = 0;
}
-
}
static void
@@ -2185,7 +2112,7 @@ fst_open(struct net_device *dev)
port = dev_to_port(dev);
if (!try_module_get(THIS_MODULE))
- return -EBUSY;
+ return -EBUSY;
if (port->mode != FST_RAW) {
err = hdlc_open(dev);
@@ -2220,9 +2147,9 @@ fst_close(struct net_device *dev)
netif_stop_queue(dev);
fst_closeport(dev_to_port(dev));
- if (port->mode != FST_RAW) {
+ if (port->mode != FST_RAW)
hdlc_close(dev);
- }
+
module_put(THIS_MODULE);
return 0;
}
@@ -2230,8 +2157,7 @@ fst_close(struct net_device *dev)
static int
fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity)
{
- /*
- * Setting currently fixed in FarSync card so we check and forget
+ /* Setting currently fixed in FarSync card so we check and forget
*/
if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
@@ -2289,23 +2215,21 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /*
- * We are always going to queue the packet
+ /* We are always going to queue the packet
* so that the bottom half is the only place we tx from
* Check there is room in the port txq
*/
spin_lock_irqsave(&card->card_lock, flags);
- if ((txq_length = port->txqe - port->txqs) < 0) {
- /*
- * This is the case where the next free has wrapped but the
+ txq_length = port->txqe - port->txqs;
+ if (txq_length < 0) {
+ /* This is the case where the next free has wrapped but the
* last used hasn't
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > fst_txq_high) {
- /*
- * We have got enough buffers in the pipeline. Ask the network
+ /* We have got enough buffers in the pipeline. Ask the network
* layer to stop sending frames down
*/
netif_stop_queue(dev);
@@ -2313,8 +2237,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (txq_length == FST_TXQ_DEPTH - 1) {
- /*
- * This shouldn't have happened but such is life
+ /* This shouldn't have happened but such is life
*/
dev_kfree_skb(skb);
dev->stats.tx_errors++;
@@ -2323,8 +2246,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /*
- * queue the buffer
+ /* queue the buffer
*/
spin_lock_irqsave(&card->card_lock, flags);
port->txq[port->txqe] = skb;
@@ -2340,8 +2262,7 @@ fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-/*
- * Card setup having checked hardware resources.
+/* Card setup having checked hardware resources.
* Should be pretty bizarre if we get an error here (kernel memory
* exhaustion is one possibility). If we do see a problem we report it
* via a printk and leave the corresponding interface and all that follow
@@ -2371,7 +2292,7 @@ fst_init_card(struct fst_card_info *card)
err = register_hdlc_device(card->ports[i].dev);
if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
- i, -err);
+ i, -err);
while (i--)
unregister_hdlc_device(card->ports[i].dev);
return err;
@@ -2393,14 +2314,13 @@ static const struct net_device_ops fst_ops = {
.ndo_tx_timeout = fst_tx_timeout,
};
-/*
- * Initialise card when detected.
+/* Initialise card when detected.
* Returns 0 to indicate success, or errno otherwise.
*/
static int
fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int no_of_cards_added = 0;
+ static int no_of_cards_added;
struct fst_card_info *card;
int err = 0;
int i;
@@ -2411,17 +2331,15 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#if FST_DEBUG
dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
#endif
- /*
- * We are going to be clever and allow certain cards not to be
+ /* We are going to be clever and allow certain cards not to be
* configured. An exclude list can be provided in /etc/modules.conf
*/
if (fst_excluded_cards != 0) {
- /*
- * There are cards to exclude
+ /* There are cards to exclude
*
*/
for (i = 0; i < fst_excluded_cards; i++) {
- if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
+ if (pdev->devfn >> 3 == fst_excluded_list[i]) {
pr_info("FarSync PCI device %d not assigned\n",
(pdev->devfn) >> 3);
return -EBUSY;
@@ -2431,16 +2349,18 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Allocate driver private data */
card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL);
- if (card == NULL)
+ if (!card)
return -ENOMEM;
/* Try to enable the device */
- if ((err = pci_enable_device(pdev)) != 0) {
+ err = pci_enable_device(pdev);
+ if (err) {
pr_err("Failed to enable card. Err %d\n", -err);
goto enable_fail;
}
- if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
+ err = pci_request_regions(pdev, "FarSync");
+ if (err) {
pr_err("Failed to allocate regions. Err %d\n", -err);
goto regions_fail;
}
@@ -2449,12 +2369,14 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->pci_conf = pci_resource_start(pdev, 1);
card->phys_mem = pci_resource_start(pdev, 2);
card->phys_ctlmem = pci_resource_start(pdev, 3);
- if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
+ card->mem = ioremap(card->phys_mem, FST_MEMSIZE);
+ if (!card->mem) {
pr_err("Physical memory remap failed\n");
err = -ENODEV;
goto ioremap_physmem_fail;
}
- if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
+ card->ctlmem = ioremap(card->phys_ctlmem, 0x10);
+ if (!card->ctlmem) {
pr_err("Control memory remap failed\n");
err = -ENODEV;
goto ioremap_ctlmem_fail;
@@ -2474,19 +2396,20 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->family = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T4P))
? FST_FAMILY_TXP : FST_FAMILY_TXU;
- if ((ent->driver_data == FST_TYPE_T1U) ||
- (ent->driver_data == FST_TYPE_TE1))
+ if (ent->driver_data == FST_TYPE_T1U ||
+ ent->driver_data == FST_TYPE_TE1)
card->nports = 1;
else
card->nports = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T2U)) ? 2 : 4;
card->state = FST_UNINIT;
- spin_lock_init ( &card->card_lock );
+ spin_lock_init(&card->card_lock);
- for ( i = 0 ; i < card->nports ; i++ ) {
+ for (i = 0; i < card->nports; i++) {
struct net_device *dev = alloc_hdlcdev(&card->ports[i]);
hdlc_device *hdlc;
+
if (!dev) {
while (i--)
free_netdev(card->ports[i].dev);
@@ -2495,29 +2418,29 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto hdlcdev_fail;
}
card->ports[i].dev = dev;
- card->ports[i].card = card;
- card->ports[i].index = i;
- card->ports[i].run = 0;
+ card->ports[i].card = card;
+ card->ports[i].index = i;
+ card->ports[i].run = 0;
hdlc = dev_to_hdlc(dev);
- /* Fill in the net device info */
+ /* Fill in the net device info */
/* Since this is a PCI setup this is purely
* informational. Give them the buffer addresses
* and basic card I/O.
*/
- dev->mem_start = card->phys_mem
- + BUF_OFFSET ( txBuffer[i][0][0]);
- dev->mem_end = card->phys_mem
- + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
- dev->base_addr = card->pci_conf;
- dev->irq = card->irq;
+ dev->mem_start = card->phys_mem
+ + BUF_OFFSET(txBuffer[i][0][0]);
+ dev->mem_end = card->phys_mem
+ + BUF_OFFSET(txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
+ dev->base_addr = card->pci_conf;
+ dev->irq = card->irq;
dev->netdev_ops = &fst_ops;
dev->tx_queue_len = FST_TX_QUEUE_LEN;
dev->watchdog_timeo = FST_TX_TIMEOUT;
- hdlc->attach = fst_attach;
- hdlc->xmit = fst_start_xmit;
+ hdlc->attach = fst_attach;
+ hdlc->xmit = fst_start_xmit;
}
card->device = pdev;
@@ -2549,13 +2472,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto init_card_fail;
if (card->family == FST_FAMILY_TXU) {
- /*
- * Allocate a dma buffer for transmit and receives
+ /* Allocate a dma buffer for transmit and receives
*/
card->rx_dma_handle_host =
dma_alloc_coherent(&card->device->dev, FST_MAX_MTU,
&card->rx_dma_handle_card, GFP_KERNEL);
- if (card->rx_dma_handle_host == NULL) {
+ if (!card->rx_dma_handle_host) {
pr_err("Could not allocate rx dma buffer\n");
err = -ENOMEM;
goto rx_dma_fail;
@@ -2563,7 +2485,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
card->tx_dma_handle_host =
dma_alloc_coherent(&card->device->dev, FST_MAX_MTU,
&card->tx_dma_handle_card, GFP_KERNEL);
- if (card->tx_dma_handle_host == NULL) {
+ if (!card->tx_dma_handle_host) {
pr_err("Could not allocate tx dma buffer\n");
err = -ENOMEM;
goto tx_dma_fail;
@@ -2598,8 +2520,7 @@ enable_fail:
return err;
}
-/*
- * Cleanup and close down a card
+/* Cleanup and close down a card
*/
static void
fst_remove_one(struct pci_dev *pdev)
@@ -2611,6 +2532,7 @@ fst_remove_one(struct pci_dev *pdev)
for (i = 0; i < card->nports; i++) {
struct net_device *dev = port_to_dev(&card->ports[i]);
+
unregister_hdlc_device(dev);
}
@@ -2621,8 +2543,7 @@ fst_remove_one(struct pci_dev *pdev)
iounmap(card->mem);
pci_release_regions(pdev);
if (card->family == FST_FAMILY_TXU) {
- /*
- * Free dma buffers
+ /* Free dma buffers
*/
dma_free_coherent(&card->device->dev, FST_MAX_MTU,
card->rx_dma_handle_host,
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 058e48182838..0d19e39fec86 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -47,7 +47,6 @@
#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
-
static inline struct net_device *port_to_dev(port_t *port)
{
return port->dev;
@@ -59,12 +58,18 @@ static inline int sca_intr_status(card_t *card)
u8 isr0 = sca_in(ISR0, card);
u8 isr1 = sca_in(ISR1, card);
- if (isr1 & 0x03) result |= SCA_INTR_DMAC_RX(0);
- if (isr1 & 0x0C) result |= SCA_INTR_DMAC_TX(0);
- if (isr1 & 0x30) result |= SCA_INTR_DMAC_RX(1);
- if (isr1 & 0xC0) result |= SCA_INTR_DMAC_TX(1);
- if (isr0 & 0x0F) result |= SCA_INTR_MSCI(0);
- if (isr0 & 0xF0) result |= SCA_INTR_MSCI(1);
+ if (isr1 & 0x03)
+ result |= SCA_INTR_DMAC_RX(0);
+ if (isr1 & 0x0C)
+ result |= SCA_INTR_DMAC_TX(0);
+ if (isr1 & 0x30)
+ result |= SCA_INTR_DMAC_RX(1);
+ if (isr1 & 0xC0)
+ result |= SCA_INTR_DMAC_TX(1);
+ if (isr0 & 0x0F)
+ result |= SCA_INTR_MSCI(0);
+ if (isr0 & 0xF0)
+ result |= SCA_INTR_MSCI(1);
if (!(result & SCA_INTR_DMAC_TX(0)))
if (sca_in(DSR_TX(0), card) & DSR_EOM)
@@ -76,7 +81,7 @@ static inline int sca_intr_status(card_t *card)
return result;
}
-static inline port_t* dev_to_port(struct net_device *dev)
+static inline port_t *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
@@ -87,7 +92,6 @@ static inline u16 next_desc(port_t *port, u16 desc, int transmit)
: port_to_card(port)->rx_ring_buffers);
}
-
static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
{
u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
@@ -98,14 +102,12 @@ static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
transmit * rx_buffs + desc;
}
-
static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
}
-
static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
int transmit)
{
@@ -118,14 +120,12 @@ static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
#endif
}
-
static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
{
return port_to_card(port)->buff_offset +
desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
}
-
static inline void sca_set_carrier(port_t *port)
{
if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
@@ -143,7 +143,6 @@ static inline void sca_set_carrier(port_t *port)
}
}
-
static void sca_init_port(port_t *port)
{
card_t *card = port_to_card(port);
@@ -213,13 +212,12 @@ static void sca_init_port(port_t *port)
sca_set_carrier(port);
}
-
#ifdef NEED_SCA_MSCI_INTR
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
u16 msci = get_msci(port);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
/* Reset MSCI TX underrun and CDCD status bit */
@@ -236,7 +234,6 @@ static inline void sca_msci_intr(port_t *port)
}
#endif
-
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
u16 rxin)
{
@@ -265,8 +262,9 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
openwin(card, page + 1);
memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
- } else
+ } else {
memcpy_fromio(skb->data, winbase(card) + buff, len);
+ }
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0); /* select pkt_desc table page back */
@@ -282,7 +280,6 @@ static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
netif_rx(skb);
}
-
/* Receive DMA interrupt service */
static inline void sca_rx_intr(port_t *port)
{
@@ -304,7 +301,7 @@ static inline void sca_rx_intr(port_t *port)
pkt_desc __iomem *desc;
u32 cda = sca_inw(dmac + CDAL, card);
- if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+ if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc)))
break; /* No frame received */
desc = desc_address(port, port->rxin, 0);
@@ -322,8 +319,9 @@ static inline void sca_rx_intr(port_t *port)
dev->stats.rx_crc_errors++;
if (stat & ST_RX_EOM)
port->rxpart = 0; /* received last fragment */
- } else
+ } else {
sca_rx(card, port, desc, port->rxin);
+ }
/* Set new error descriptor address */
sca_outw(desc_off, dmac + EDAL, card);
@@ -334,13 +332,12 @@ static inline void sca_rx_intr(port_t *port)
sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
}
-
/* Transmit DMA interrupt service */
static inline void sca_tx_intr(port_t *port)
{
struct net_device *dev = port_to_dev(port);
u16 dmac = get_dmac_tx(port);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u8 stat;
spin_lock(&port->lock);
@@ -356,7 +353,8 @@ static inline void sca_tx_intr(port_t *port)
u32 desc_off = desc_offset(port, port->txlast, 1);
u32 cda = sca_inw(dmac + CDAL, card);
- if ((cda >= desc_off) && (cda < desc_off + sizeof(pkt_desc)))
+
+ if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc)))
break; /* Transmitter is/will_be sending this frame */
desc = desc_address(port, port->txlast, 1);
@@ -370,8 +368,7 @@ static inline void sca_tx_intr(port_t *port)
spin_unlock(&port->lock);
}
-
-static irqreturn_t sca_intr(int irq, void* dev_id)
+static irqreturn_t sca_intr(int irq, void *dev_id)
{
card_t *card = dev_id;
int i;
@@ -379,10 +376,11 @@ static irqreturn_t sca_intr(int irq, void* dev_id)
int handled = 0;
u8 page = sca_get_page(card);
- while((stat = sca_intr_status(card)) != 0) {
+ while ((stat = sca_intr_status(card)) != 0) {
handled = 1;
for (i = 0; i < 2; i++) {
port_t *port = get_port(card, i);
+
if (port) {
if (stat & SCA_INTR_MSCI(i))
sca_msci_intr(port);
@@ -400,15 +398,13 @@ static irqreturn_t sca_intr(int irq, void* dev_id)
return IRQ_RETVAL(handled);
}
-
static void sca_set_port(port_t *port)
{
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u16 msci = get_msci(port);
u8 md2 = sca_in(msci + MD2, card);
unsigned int tmc, br = 10, brv = 1024;
-
if (port->settings.clock_rate > 0) {
/* Try lower br for better accuracy*/
do {
@@ -417,14 +413,15 @@ static void sca_set_port(port_t *port)
/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
tmc = CLOCK_BASE / brv / port->settings.clock_rate;
- }while (br > 1 && tmc <= 128);
+ } while (br > 1 && tmc <= 128);
if (tmc < 1) {
tmc = 1;
br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
brv = 1;
- } else if (tmc > 255)
+ } else if (tmc > 255) {
tmc = 256; /* tmc=0 means 256 - low baud rates */
+ }
port->settings.clock_rate = CLOCK_BASE / brv / tmc;
} else {
@@ -450,34 +447,50 @@ static void sca_set_port(port_t *port)
md2 &= ~MD2_LOOPBACK;
sca_out(md2, msci + MD2, card);
-
}
-
static void sca_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
u16 msci = get_msci(port);
u8 md0, md2;
- switch(port->encoding) {
- case ENCODING_NRZ: md2 = MD2_NRZ; break;
- case ENCODING_NRZI: md2 = MD2_NRZI; break;
- case ENCODING_FM_MARK: md2 = MD2_FM_MARK; break;
- case ENCODING_FM_SPACE: md2 = MD2_FM_SPACE; break;
- default: md2 = MD2_MANCHESTER;
+ switch (port->encoding) {
+ case ENCODING_NRZ:
+ md2 = MD2_NRZ;
+ break;
+ case ENCODING_NRZI:
+ md2 = MD2_NRZI;
+ break;
+ case ENCODING_FM_MARK:
+ md2 = MD2_FM_MARK;
+ break;
+ case ENCODING_FM_SPACE:
+ md2 = MD2_FM_SPACE;
+ break;
+ default:
+ md2 = MD2_MANCHESTER;
}
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
- switch(port->parity) {
- case PARITY_CRC16_PR0: md0 = MD0_HDLC | MD0_CRC_16_0; break;
- case PARITY_CRC16_PR1: md0 = MD0_HDLC | MD0_CRC_16; break;
- case PARITY_CRC16_PR0_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU_0; break;
- case PARITY_CRC16_PR1_CCITT: md0 = MD0_HDLC | MD0_CRC_ITU; break;
- default: md0 = MD0_HDLC | MD0_CRC_NONE;
+ switch (port->parity) {
+ case PARITY_CRC16_PR0:
+ md0 = MD0_HDLC | MD0_CRC_16_0;
+ break;
+ case PARITY_CRC16_PR1:
+ md0 = MD0_HDLC | MD0_CRC_16;
+ break;
+ case PARITY_CRC16_PR0_CCITT:
+ md0 = MD0_HDLC | MD0_CRC_ITU_0;
+ break;
+ case PARITY_CRC16_PR1_CCITT:
+ md0 = MD0_HDLC | MD0_CRC_ITU;
+ break;
+ default:
+ md0 = MD0_HDLC | MD0_CRC_NONE;
}
sca_out(CMD_RESET, msci + CMD, card);
@@ -494,9 +507,9 @@ static void sca_open(struct net_device *dev)
sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
/* We're using the following interrupts:
- - TXINT (DMAC completed all transmisions, underrun or DCD change)
- - all DMA interrupts
-*/
+ * - TXINT (DMAC completed all transmisions, underrun or DCD change)
+ * - all DMA interrupts
+ */
sca_set_carrier(port);
/* MSCI TX INT and RX INT A IRQ enable */
@@ -517,11 +530,10 @@ static void sca_open(struct net_device *dev)
netif_start_queue(dev);
}
-
static void sca_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
- card_t* card = port_to_card(port);
+ card_t *card = port_to_card(port);
/* reset channel */
sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
@@ -535,7 +547,6 @@ static void sca_close(struct net_device *dev)
netif_stop_queue(dev);
}
-
static int sca_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -558,7 +569,6 @@ static int sca_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-
#ifdef DEBUG_RINGS
static void sca_dump_rings(struct net_device *dev)
{
@@ -613,7 +623,6 @@ static void sca_dump_rings(struct net_device *dev)
}
#endif /* DEBUG_RINGS */
-
static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
@@ -645,8 +654,9 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
memcpy_toio(winbase(card) + buff, skb->data, maxlen);
openwin(card, page + 1);
memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
- } else
+ } else {
memcpy_toio(winbase(card) + buff, skb->data, len);
+ }
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0); /* select pkt_desc table page back */
@@ -670,7 +680,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
#ifdef NEED_DETECT_RAM
static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
{
@@ -699,7 +708,6 @@ static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
}
#endif /* NEED_DETECT_RAM */
-
static void sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 1bdd3df0867a..dd6312b69861 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -36,8 +36,7 @@
#include <linux/slab.h>
#include <net/net_namespace.h>
-
-static const char* version = "HDLC support module revision 1.22";
+static const char *version = "HDLC support module revision 1.22";
#undef DEBUG_LINK
@@ -74,25 +73,24 @@ netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
return hdlc->xmit(skb, dev); /* call hardware driver directly */
}
+EXPORT_SYMBOL(hdlc_start_xmit);
static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+
if (hdlc->proto->start)
hdlc->proto->start(dev);
}
-
-
static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
+
if (hdlc->proto->stop)
hdlc->proto->stop(dev);
}
-
-
static int hdlc_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -141,8 +139,6 @@ carrier_exit:
return NOTIFY_DONE;
}
-
-
/* Must be called by hardware driver when HDLC device is being opened */
int hdlc_open(struct net_device *dev)
{
@@ -152,11 +148,12 @@ int hdlc_open(struct net_device *dev)
hdlc->carrier, hdlc->open);
#endif
- if (hdlc->proto == NULL)
+ if (!hdlc->proto)
return -ENOSYS; /* no protocol attached */
if (hdlc->proto->open) {
int result = hdlc->proto->open(dev);
+
if (result)
return result;
}
@@ -166,16 +163,16 @@ int hdlc_open(struct net_device *dev)
if (hdlc->carrier) {
netdev_info(dev, "Carrier detected\n");
hdlc_proto_start(dev);
- } else
+ } else {
netdev_info(dev, "No carrier\n");
+ }
hdlc->open = 1;
spin_unlock_irq(&hdlc->state_lock);
return 0;
}
-
-
+EXPORT_SYMBOL(hdlc_open);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev)
@@ -197,8 +194,7 @@ void hdlc_close(struct net_device *dev)
if (hdlc->proto->close)
hdlc->proto->close(dev);
}
-
-
+EXPORT_SYMBOL(hdlc_close);
int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
@@ -217,12 +213,14 @@ int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Not handled by currently attached protocol (if any) */
while (proto) {
- if ((result = proto->ioctl(dev, ifr)) != -EINVAL)
+ result = proto->ioctl(dev, ifr);
+ if (result != -EINVAL)
return result;
proto = proto->next;
}
return -EINVAL;
}
+EXPORT_SYMBOL(hdlc_ioctl);
static const struct header_ops hdlc_null_ops;
@@ -256,12 +254,14 @@ static void hdlc_setup(struct net_device *dev)
struct net_device *alloc_hdlcdev(void *priv)
{
struct net_device *dev;
+
dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d",
NET_NAME_UNKNOWN, hdlc_setup);
if (dev)
dev_to_hdlc(dev)->priv = priv;
return dev;
}
+EXPORT_SYMBOL(alloc_hdlcdev);
void unregister_hdlc_device(struct net_device *dev)
{
@@ -270,8 +270,7 @@ void unregister_hdlc_device(struct net_device *dev)
unregister_netdevice(dev);
rtnl_unlock();
}
-
-
+EXPORT_SYMBOL(unregister_hdlc_device);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
size_t size)
@@ -287,7 +286,7 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
if (size) {
dev_to_hdlc(dev)->state = kmalloc(size, GFP_KERNEL);
- if (dev_to_hdlc(dev)->state == NULL) {
+ if (!dev_to_hdlc(dev)->state) {
module_put(proto->module);
return -ENOBUFS;
}
@@ -296,7 +295,7 @@ int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
return 0;
}
-
+EXPORT_SYMBOL(attach_hdlc_protocol);
int detach_hdlc_protocol(struct net_device *dev)
{
@@ -322,7 +321,7 @@ int detach_hdlc_protocol(struct net_device *dev)
return 0;
}
-
+EXPORT_SYMBOL(detach_hdlc_protocol);
void register_hdlc_protocol(struct hdlc_proto *proto)
{
@@ -331,7 +330,7 @@ void register_hdlc_protocol(struct hdlc_proto *proto)
first_proto = proto;
rtnl_unlock();
}
-
+EXPORT_SYMBOL(register_hdlc_protocol);
void unregister_hdlc_protocol(struct hdlc_proto *proto)
{
@@ -346,54 +345,38 @@ void unregister_hdlc_protocol(struct hdlc_proto *proto)
*p = proto->next;
rtnl_unlock();
}
-
-
+EXPORT_SYMBOL(unregister_hdlc_protocol);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("HDLC support module");
MODULE_LICENSE("GPL v2");
-EXPORT_SYMBOL(hdlc_start_xmit);
-EXPORT_SYMBOL(hdlc_open);
-EXPORT_SYMBOL(hdlc_close);
-EXPORT_SYMBOL(hdlc_ioctl);
-EXPORT_SYMBOL(alloc_hdlcdev);
-EXPORT_SYMBOL(unregister_hdlc_device);
-EXPORT_SYMBOL(register_hdlc_protocol);
-EXPORT_SYMBOL(unregister_hdlc_protocol);
-EXPORT_SYMBOL(attach_hdlc_protocol);
-EXPORT_SYMBOL(detach_hdlc_protocol);
-
static struct packet_type hdlc_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_HDLC),
.func = hdlc_rcv,
};
-
static struct notifier_block hdlc_notifier = {
.notifier_call = hdlc_device_event,
};
-
static int __init hdlc_module_init(void)
{
int result;
pr_info("%s\n", version);
- if ((result = register_netdevice_notifier(&hdlc_notifier)) != 0)
+ result = register_netdevice_notifier(&hdlc_notifier);
+ if (result)
return result;
dev_add_pack(&hdlc_packet_type);
return 0;
}
-
-
static void __exit hdlc_module_exit(void)
{
dev_remove_pack(&hdlc_packet_type);
unregister_netdevice_notifier(&hdlc_notifier);
}
-
module_init(hdlc_module_init);
module_exit(hdlc_module_exit);
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index cb5898f7d68c..349ca18088e8 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -28,13 +28,11 @@
#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
-
struct hdlc_header {
u8 address;
u8 control;
__be16 protocol;
-}__packed;
-
+} __packed;
struct cisco_packet {
__be32 type; /* code */
@@ -42,11 +40,10 @@ struct cisco_packet {
__be32 par2;
__be16 rel; /* reliability */
__be32 time;
-}__packed;
+} __packed;
#define CISCO_PACKET_LEN 18
#define CISCO_BIG_PACKET_LEN 20
-
struct cisco_state {
cisco_proto settings;
@@ -59,16 +56,13 @@ struct cisco_state {
u32 rxseq; /* RX sequence number */
};
-
static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr);
-
-static inline struct cisco_state* state(hdlc_device *hdlc)
+static inline struct cisco_state *state(hdlc_device *hdlc)
{
return (struct cisco_state *)hdlc->state;
}
-
static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 type, const void *daddr, const void *saddr,
unsigned int len)
@@ -79,7 +73,7 @@ static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
#endif
skb_push(skb, sizeof(struct hdlc_header));
- data = (struct hdlc_header*)skb->data;
+ data = (struct hdlc_header *)skb->data;
if (type == CISCO_KEEPALIVE)
data->address = CISCO_MULTICAST;
else
@@ -90,8 +84,6 @@ static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
return sizeof(struct hdlc_header);
}
-
-
static void cisco_keepalive_send(struct net_device *dev, u32 type,
__be32 par1, __be32 par2)
{
@@ -100,13 +92,12 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cisco_packet));
- if (!skb) {
- netdev_warn(dev, "Memory squeeze on %s()\n", __func__);
+ if (!skb)
return;
- }
+
skb_reserve(skb, 4);
cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
- data = (struct cisco_packet*)(skb->data + 4);
+ data = (struct cisco_packet *)(skb->data + 4);
data->type = htonl(type);
data->par1 = par1;
@@ -124,11 +115,9 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
dev_queue_xmit(skb);
}
-
-
static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
{
- struct hdlc_header *data = (struct hdlc_header*)skb->data;
+ struct hdlc_header *data = (struct hdlc_header *)skb->data;
if (skb->len < sizeof(struct hdlc_header))
return cpu_to_be16(ETH_P_HDLC);
@@ -148,13 +137,12 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
}
}
-
static int cisco_rx(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
- struct hdlc_header *data = (struct hdlc_header*)skb->data;
+ struct hdlc_header *data = (struct hdlc_header *)skb->data;
struct cisco_packet *cisco_data;
struct in_device *in_dev;
__be32 addr, mask;
@@ -183,10 +171,10 @@ static int cisco_rx(struct sk_buff *skb)
goto rx_error;
}
- cisco_data = (struct cisco_packet*)(skb->data + sizeof
+ cisco_data = (struct cisco_packet *)(skb->data + sizeof
(struct hdlc_header));
- switch (ntohl (cisco_data->type)) {
+ switch (ntohl(cisco_data->type)) {
case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
@@ -226,6 +214,7 @@ static int cisco_rx(struct sk_buff *skb)
st->last_poll = jiffies;
if (!st->up) {
u32 sec, min, hrs, days;
+
sec = ntohl(cisco_data->time) / 1000;
min = sec / 60; sec -= min * 60;
hrs = min / 60; min -= hrs * 60;
@@ -253,8 +242,6 @@ rx_error:
return NET_RX_DROP;
}
-
-
static void cisco_timer(struct timer_list *t)
{
struct cisco_state *st = from_timer(st, t, timer);
@@ -276,8 +263,6 @@ static void cisco_timer(struct timer_list *t)
add_timer(&st->timer);
}
-
-
static void cisco_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -294,8 +279,6 @@ static void cisco_start(struct net_device *dev)
add_timer(&st->timer);
}
-
-
static void cisco_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -310,7 +293,6 @@ static void cisco_stop(struct net_device *dev)
spin_unlock_irqrestore(&st->lock, flags);
}
-
static struct hdlc_proto proto = {
.start = cisco_start,
.stop = cisco_stop,
@@ -359,7 +341,8 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
new_settings.timeout < 2)
return -EINVAL;
- result = hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,
+ PARITY_CRC16_PR1_CCITT);
if (result)
return result;
@@ -381,21 +364,17 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-
-
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-
module_init(mod_init);
module_exit(mod_exit);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0720f5f92caa..72250fe0a1df 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -6,16 +6,16 @@
* Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
*
- Theory of PVC state
+ Theory of PVC state
DCE mode:
(exist,new) -> 0,0 when "PVC create" or if "link unreliable"
- 0,x -> 1,1 if "link reliable" when sending FULL STATUS
- 1,1 -> 1,0 if received FULL STATUS ACK
+ 0,x -> 1,1 if "link reliable" when sending FULL STATUS
+ 1,1 -> 1,0 if received FULL STATUS ACK
(active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
- -> 1 when "PVC up" and (exist,new) = 1,0
+ -> 1 when "PVC up" and (exist,new) = 1,0
DTE mode:
(exist,new,active) = FULL STATUS if "link reliable"
@@ -60,7 +60,6 @@
#define NLPID_CCITT_ANSI_LMI 0x08
#define NLPID_CISCO_LMI 0x09
-
#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
#define LMI_CISCO_DLCI 1023
@@ -86,7 +85,6 @@
#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
#define LMI_ANSI_LENGTH 14
-
struct fr_hdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned ea1: 1;
@@ -111,7 +109,6 @@ struct fr_hdr {
#endif
} __packed;
-
struct pvc_device {
struct net_device *frad;
struct net_device *main;
@@ -128,7 +125,7 @@ struct pvc_device {
unsigned int fecn: 1;
unsigned int becn: 1;
unsigned int bandwidth; /* Cisco LMI reporting only */
- }state;
+ } state;
};
struct frad_state {
@@ -149,29 +146,24 @@ struct frad_state {
u8 rxseq; /* RX sequence number */
};
-
static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
-
static inline u16 q922_to_dlci(u8 *hdr)
{
return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
}
-
static inline void dlci_to_q922(u8 *hdr, u16 dlci)
{
hdr[0] = (dlci >> 2) & 0xFC;
hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
}
-
-static inline struct frad_state* state(hdlc_device *hdlc)
+static inline struct frad_state *state(hdlc_device *hdlc)
{
- return(struct frad_state *)(hdlc->state);
+ return (struct frad_state *)(hdlc->state);
}
-
static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
{
struct pvc_device *pvc = state(hdlc)->first_pvc;
@@ -187,7 +179,6 @@ static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
return NULL;
}
-
static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -215,13 +206,11 @@ static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
return pvc;
}
-
static inline int pvc_is_used(struct pvc_device *pvc)
{
return pvc->main || pvc->ether;
}
-
static inline void pvc_carrier(int on, struct pvc_device *pvc)
{
if (on) {
@@ -241,7 +230,6 @@ static inline void pvc_carrier(int on, struct pvc_device *pvc)
}
}
-
static inline void delete_unused_pvcs(hdlc_device *hdlc)
{
struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
@@ -260,7 +248,6 @@ static inline void delete_unused_pvcs(hdlc_device *hdlc)
}
}
-
static inline struct net_device **get_dev_p(struct pvc_device *pvc,
int type)
{
@@ -270,7 +257,6 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc,
return &pvc->main;
}
-
static int fr_hard_header(struct sk_buff *skb, u16 dlci)
{
if (!skb->dev) { /* Control packets */
@@ -334,8 +320,6 @@ static int fr_hard_header(struct sk_buff *skb, u16 dlci)
return 0;
}
-
-
static int pvc_open(struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
@@ -345,6 +329,7 @@ static int pvc_open(struct net_device *dev)
if (pvc->open_count++ == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
+
if (state(hdlc)->settings.lmi == LMI_NONE)
pvc->state.active = netif_carrier_ok(pvc->frad);
@@ -354,14 +339,13 @@ static int pvc_open(struct net_device *dev)
return 0;
}
-
-
static int pvc_close(struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
if (--pvc->open_count == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
+
if (state(hdlc)->settings.lmi == LMI_NONE)
pvc->state.active = 0;
@@ -373,8 +357,6 @@ static int pvc_close(struct net_device *dev)
return 0;
}
-
-
static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct pvc_device *pvc = dev->ml_priv;
@@ -465,15 +447,12 @@ static inline void fr_log_dlci_active(struct pvc_device *pvc)
pvc->state.active ? "active" : "inactive");
}
-
-
static inline u8 fr_lmi_nextseq(u8 x)
{
x++;
return x ? x : 1;
}
-
static void fr_lmi_send(struct net_device *dev, int fullrep)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -495,17 +474,16 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
}
skb = dev_alloc_skb(len);
- if (!skb) {
- netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
+ if (!skb)
return;
- }
+
memset(skb->data, 0, len);
skb_reserve(skb, 4);
- if (lmi == LMI_CISCO) {
+ if (lmi == LMI_CISCO)
fr_hard_header(skb, LMI_CISCO_DLCI);
- } else {
+ else
fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
- }
+
data = skb_tail_pointer(skb);
data[i++] = LMI_CALLREF;
data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
@@ -569,8 +547,6 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
dev_queue_xmit(skb);
}
-
-
static void fr_set_link_state(int reliable, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -603,7 +579,6 @@ static void fr_set_link_state(int reliable, struct net_device *dev)
}
}
-
static void fr_timer(struct timer_list *t)
{
struct frad_state *st = from_timer(st, t, timer);
@@ -637,10 +612,10 @@ static void fr_timer(struct timer_list *t)
fr_set_link_state(reliable, dev);
}
- if (state(hdlc)->settings.dce)
+ if (state(hdlc)->settings.dce) {
state(hdlc)->timer.expires = jiffies +
state(hdlc)->settings.t392 * HZ;
- else {
+ } else {
if (state(hdlc)->n391cnt)
state(hdlc)->n391cnt--;
@@ -655,7 +630,6 @@ static void fr_timer(struct timer_list *t)
add_timer(&state(hdlc)->timer);
}
-
static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -696,8 +670,9 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
return 1;
}
i = 7;
- } else
+ } else {
i = 6;
+ }
if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
LMI_ANSI_CISCO_REPTYPE)) {
@@ -814,8 +789,8 @@ static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
}
i++;
- new = !! (skb->data[i + 2] & 0x08);
- active = !! (skb->data[i + 2] & 0x02);
+ new = !!(skb->data[i + 2] & 0x08);
+ active = !!(skb->data[i + 2] & 0x02);
if (lmi == LMI_CISCO) {
dlci = (skb->data[i] << 8) | skb->data[i + 1];
bw = (skb->data[i + 3] << 16) |
@@ -962,8 +937,8 @@ static int fr_rx(struct sk_buff *skb)
pvc->state.becn ^= 1;
}
-
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb) {
frad->stats.rx_dropped++;
return NET_RX_DROP;
}
@@ -1018,8 +993,6 @@ rx_drop:
return NET_RX_DROP;
}
-
-
static void fr_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -1040,11 +1013,11 @@ static void fr_start(struct net_device *dev)
/* First poll after 1 s */
state(hdlc)->timer.expires = jiffies + HZ;
add_timer(&state(hdlc)->timer);
- } else
+ } else {
fr_set_link_state(1, dev);
+ }
}
-
static void fr_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -1056,7 +1029,6 @@ static void fr_stop(struct net_device *dev)
fr_set_link_state(0, dev);
}
-
static void fr_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -1071,7 +1043,6 @@ static void fr_close(struct net_device *dev)
}
}
-
static void pvc_setup(struct net_device *dev)
{
dev->type = ARPHRD_DLCI;
@@ -1095,7 +1066,8 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
struct net_device *dev;
int used;
- if ((pvc = add_pvc(frad, dlci)) == NULL) {
+ pvc = add_pvc(frad, dlci);
+ if (!pvc) {
netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
return -ENOBUFS;
}
@@ -1121,7 +1093,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
} else {
- *(__be16*)dev->dev_addr = htons(dlci);
+ *(__be16 *)dev->dev_addr = htons(dlci);
dlci_to_q922(dev->broadcast, dlci);
}
dev->netdev_ops = &pvc_ops;
@@ -1147,17 +1119,17 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
return 0;
}
-
-
static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
{
struct pvc_device *pvc;
struct net_device *dev;
- if ((pvc = find_pvc(hdlc, dlci)) == NULL)
+ pvc = find_pvc(hdlc, dlci);
+ if (!pvc)
return -ENOENT;
- if ((dev = *get_dev_p(pvc, type)) == NULL)
+ dev = *get_dev_p(pvc, type);
+ if (!dev)
return -ENOENT;
if (dev->flags & IFF_UP)
@@ -1174,12 +1146,11 @@ static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
return 0;
}
-
-
static void fr_destroy(struct net_device *frad)
{
hdlc_device *hdlc = dev_to_hdlc(frad);
struct pvc_device *pvc = state(hdlc)->first_pvc;
+
state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
state(hdlc)->dce_pvc_count = 0;
state(hdlc)->dce_changed = 1;
@@ -1198,7 +1169,6 @@ static void fr_destroy(struct net_device *frad)
}
}
-
static struct hdlc_proto proto = {
.close = fr_close,
.start = fr_start,
@@ -1209,7 +1179,6 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-
static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
{
fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
@@ -1259,7 +1228,8 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
new_settings.dce != 1))
return -EINVAL;
- result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,
+ PARITY_CRC16_PR1_CCITT);
if (result)
return result;
@@ -1309,20 +1279,17 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-
module_init(mod_init);
module_exit(mod_exit);
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index ba8c36c7ea91..d2bf72bf3bd7 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -56,10 +56,8 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
unsigned char *ptr;
skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
- if (!skb) {
- netdev_err(dev, "out of memory\n");
+ if (!skb)
return;
- }
ptr = skb_put(skb, 1);
*ptr = code;
@@ -70,22 +68,16 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
tasklet_schedule(&x25st->rx_tasklet);
}
-
-
static void x25_connected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
}
-
-
static void x25_disconnected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
}
-
-
static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
struct x25_state *x25st = state(dev_to_hdlc(dev));
@@ -108,8 +100,6 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-
-
static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -123,8 +113,6 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
hdlc->xmit(skb, dev); /* Ignore return value :-( */
}
-
-
static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -149,13 +137,15 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
- if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
+ result = lapb_data_request(dev, skb);
+ if (result != LAPB_OK)
dev_kfree_skb(skb);
spin_unlock_bh(&x25st->up_lock);
return NETDEV_TX_OK;
case X25_IFACE_CONNECT:
- if ((result = lapb_connect_request(dev))!= LAPB_OK) {
+ result = lapb_connect_request(dev);
+ if (result != LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
x25_connected(dev, 0);
@@ -166,7 +156,8 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
break;
case X25_IFACE_DISCONNECT:
- if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
+ result = lapb_disconnect_request(dev);
+ if (result != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
x25_disconnected(dev, 0);
@@ -185,8 +176,6 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
-
static int x25_open(struct net_device *dev)
{
static const struct lapb_register_struct cb = {
@@ -232,8 +221,6 @@ static int x25_open(struct net_device *dev)
return 0;
}
-
-
static void x25_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -247,15 +234,14 @@ static void x25_close(struct net_device *dev)
tasklet_kill(&x25st->rx_tasklet);
}
-
-
static int x25_rx(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct x25_state *x25st = state(hdlc);
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb) {
dev->stats.rx_dropped++;
return NET_RX_DROP;
}
@@ -279,7 +265,6 @@ static int x25_rx(struct sk_buff *skb)
return NET_RX_DROP;
}
-
static struct hdlc_proto proto = {
.open = x25_open,
.close = x25_close,
@@ -289,7 +274,6 @@ static struct hdlc_proto proto = {
.module = THIS_MODULE,
};
-
static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
{
x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
@@ -326,35 +310,36 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
new_settings.t1 = 3;
new_settings.t2 = 1;
new_settings.n2 = 10;
- }
- else {
+ } else {
if (copy_from_user(&new_settings, x25_s, size))
return -EFAULT;
if ((new_settings.dce != 0 &&
- new_settings.dce != 1) ||
- (new_settings.modulo != 8 &&
- new_settings.modulo != 128) ||
- new_settings.window < 1 ||
- (new_settings.modulo == 8 &&
- new_settings.window > 7) ||
- (new_settings.modulo == 128 &&
- new_settings.window > 127) ||
- new_settings.t1 < 1 ||
- new_settings.t1 > 255 ||
- new_settings.t2 < 1 ||
- new_settings.t2 > 255 ||
- new_settings.n2 < 1 ||
- new_settings.n2 > 255)
+ new_settings.dce != 1) ||
+ (new_settings.modulo != 8 &&
+ new_settings.modulo != 128) ||
+ new_settings.window < 1 ||
+ (new_settings.modulo == 8 &&
+ new_settings.window > 7) ||
+ (new_settings.modulo == 128 &&
+ new_settings.window > 127) ||
+ new_settings.t1 < 1 ||
+ new_settings.t1 > 255 ||
+ new_settings.t2 < 1 ||
+ new_settings.t2 > 255 ||
+ new_settings.n2 < 1 ||
+ new_settings.n2 > 255)
return -EINVAL;
}
- result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
+ result = hdlc->attach(dev, ENCODING_NRZ,
+ PARITY_CRC16_PR1_CCITT);
if (result)
return result;
- if ((result = attach_hdlc_protocol(dev, &proto,
- sizeof(struct x25_state))))
+ result = attach_hdlc_protocol(dev, &proto,
+ sizeof(struct x25_state));
+ if (result)
return result;
memcpy(&state(hdlc)->settings, &new_settings, size);
@@ -380,21 +365,17 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-
static int __init mod_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-
-
static void __exit mod_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-
module_init(mod_init);
module_exit(mod_exit);
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index ecea09fd21cb..e97521138f7e 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -83,7 +83,6 @@
#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
-
/* hss_config, PCRs */
/* Frame sync sampling, default = active low */
#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
@@ -150,26 +149,24 @@
/* HSS number, default = 0 (first) */
#define CCR_SECOND_HSS 0x01000000
-
/* hss_config, clkCR: main:10, num:10, denom:12 */
-#define CLK42X_SPEED_EXP ((0x3FF << 22) | ( 2 << 12) | 15) /*65 KHz*/
-
-#define CLK42X_SPEED_512KHZ (( 130 << 22) | ( 2 << 12) | 15)
-#define CLK42X_SPEED_1536KHZ (( 43 << 22) | ( 18 << 12) | 47)
-#define CLK42X_SPEED_1544KHZ (( 43 << 22) | ( 33 << 12) | 192)
-#define CLK42X_SPEED_2048KHZ (( 32 << 22) | ( 34 << 12) | 63)
-#define CLK42X_SPEED_4096KHZ (( 16 << 22) | ( 34 << 12) | 127)
-#define CLK42X_SPEED_8192KHZ (( 8 << 22) | ( 34 << 12) | 255)
-
-#define CLK46X_SPEED_512KHZ (( 130 << 22) | ( 24 << 12) | 127)
-#define CLK46X_SPEED_1536KHZ (( 43 << 22) | (152 << 12) | 383)
-#define CLK46X_SPEED_1544KHZ (( 43 << 22) | ( 66 << 12) | 385)
-#define CLK46X_SPEED_2048KHZ (( 32 << 22) | (280 << 12) | 511)
-#define CLK46X_SPEED_4096KHZ (( 16 << 22) | (280 << 12) | 1023)
-#define CLK46X_SPEED_8192KHZ (( 8 << 22) | (280 << 12) | 2047)
-
-/*
- * HSS_CONFIG_CLOCK_CR register consists of 3 parts:
+#define CLK42X_SPEED_EXP ((0x3FF << 22) | (2 << 12) | 15) /*65 KHz*/
+
+#define CLK42X_SPEED_512KHZ ((130 << 22) | (2 << 12) | 15)
+#define CLK42X_SPEED_1536KHZ ((43 << 22) | (18 << 12) | 47)
+#define CLK42X_SPEED_1544KHZ ((43 << 22) | (33 << 12) | 192)
+#define CLK42X_SPEED_2048KHZ ((32 << 22) | (34 << 12) | 63)
+#define CLK42X_SPEED_4096KHZ ((16 << 22) | (34 << 12) | 127)
+#define CLK42X_SPEED_8192KHZ ((8 << 22) | (34 << 12) | 255)
+
+#define CLK46X_SPEED_512KHZ ((130 << 22) | (24 << 12) | 127)
+#define CLK46X_SPEED_1536KHZ ((43 << 22) | (152 << 12) | 383)
+#define CLK46X_SPEED_1544KHZ ((43 << 22) | (66 << 12) | 385)
+#define CLK46X_SPEED_2048KHZ ((32 << 22) | (280 << 12) | 511)
+#define CLK46X_SPEED_4096KHZ ((16 << 22) | (280 << 12) | 1023)
+#define CLK46X_SPEED_8192KHZ ((8 << 22) | (280 << 12) | 2047)
+
+/* HSS_CONFIG_CLOCK_CR register consists of 3 parts:
* A (10 bits), B (10 bits) and C (12 bits).
* IXP42x HSS clock generator operation (verified with an oscilloscope):
* Each clock bit takes 7.5 ns (1 / 133.xx MHz).
@@ -208,7 +205,6 @@
#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
#define HSS_CONFIG_RX_LUT 0x38
-
/* NPE command codes */
/* writes the ConfigWord value to the location specified by offset */
#define PORT_CONFIG_WRITE 0x40
@@ -220,7 +216,8 @@
#define PORT_ERROR_READ 0x42
/* triggers the NPE to reset internal status and enable the HssPacketized
- operation for the flow specified by pPipe */
+ * operation for the flow specified by pPipe
+ */
#define PKT_PIPE_FLOW_ENABLE 0x50
#define PKT_PIPE_FLOW_DISABLE 0x51
#define PKT_NUM_PIPES_WRITE 0x52
@@ -235,12 +232,12 @@
#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
- this packet (if buf_len < pkt_len) */
+ * this packet (if buf_len < pkt_len)
+ */
#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
#define ERR_HDLC_ABORT 6 /* abort sequence received */
#define ERR_DISCONNECTING 7 /* disconnect is in progress */
-
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
@@ -308,7 +305,6 @@ struct desc {
u32 __reserved1[4];
};
-
#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
(n) * sizeof(struct desc))
#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
@@ -327,7 +323,7 @@ static DEFINE_SPINLOCK(npe_lock);
static const struct {
int tx, txdone, rx, rxfree;
-}queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
+} queue_ids[2] = {{HSS0_PKT_TX0_QUEUE, HSS0_PKT_TXDONE_QUEUE, HSS0_PKT_RX_QUEUE,
HSS0_PKT_RXFREE0_QUEUE},
{HSS1_PKT_TX0_QUEUE, HSS1_PKT_TXDONE_QUEUE, HSS1_PKT_RX_QUEUE,
HSS1_PKT_RXFREE0_QUEUE},
@@ -337,7 +333,7 @@ static const struct {
* utility functions
****************************************************************************/
-static inline struct port* dev_to_port(struct net_device *dev)
+static inline struct port *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
@@ -346,6 +342,7 @@ static inline struct port* dev_to_port(struct net_device *dev)
static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
{
int i;
+
for (i = 0; i < cnt; i++)
dest[i] = swab32(src[i]);
}
@@ -355,9 +352,10 @@ static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
* HSS access
****************************************************************************/
-static void hss_npe_send(struct port *port, struct msg *msg, const char* what)
+static void hss_npe_send(struct port *port, struct msg *msg, const char *what)
{
- u32 *val = (u32*)msg;
+ u32 *val = (u32 *)msg;
+
if (npe_send_message(port->npe, msg, what)) {
pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
port->id, val[0], val[1], npe_name(port->npe));
@@ -513,10 +511,12 @@ static int hss_load_firmware(struct port *port)
if (port->initialized)
return 0;
- if (!npe_running(port->npe) &&
- (err = npe_load_firmware(port->npe, npe_name(port->npe),
- port->dev)))
- return err;
+ if (!npe_running(port->npe)) {
+ err = npe_load_firmware(port->npe, npe_name(port->npe),
+ port->dev);
+ if (err)
+ return err;
+ }
/* HDLC mode configuration */
memset(&msg, 0, sizeof(msg));
@@ -567,7 +567,6 @@ static inline void debug_pkt(struct net_device *dev, const char *func,
#endif
}
-
static inline void debug_desc(u32 phys, struct desc *desc)
{
#if DEBUG_DESC
@@ -583,7 +582,8 @@ static inline int queue_get_desc(unsigned int queue, struct port *port,
u32 phys, tab_phys, n_desc;
struct desc *tab;
- if (!(phys = qmgr_get_entry(queue)))
+ phys = qmgr_get_entry(queue);
+ if (!phys)
return -1;
BUG_ON(phys & 0x1F);
@@ -603,10 +603,10 @@ static inline void queue_put_desc(unsigned int queue, u32 phys,
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
/* Don't check for queue overflow here, we've allocated sufficient
- length and queues >= 32 don't support this check anyway. */
+ * length and queues >= 32 don't support this check anyway.
+ */
}
-
static inline void dma_unmap_tx(struct port *port, struct desc *desc)
{
#ifdef __ARMEB__
@@ -619,7 +619,6 @@ static inline void dma_unmap_tx(struct port *port, struct desc *desc)
#endif
}
-
static void hss_hdlc_set_carrier(void *pdev, int carrier)
{
struct net_device *netdev = pdev;
@@ -670,7 +669,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
u32 phys;
#endif
- if ((n = queue_get_desc(rxq, port, 0)) < 0) {
+ n = queue_get_desc(rxq, port, 0);
+ if (n < 0) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" napi_complete\n", dev->name);
@@ -705,7 +705,8 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
switch (desc->status) {
case 0:
#ifdef __ARMEB__
- if ((skb = netdev_alloc_skb(dev, RX_SIZE)) != NULL) {
+ skb = netdev_alloc_skb(dev, RX_SIZE);
+ if (skb) {
phys = dma_map_single(&dev->dev, skb->data,
RX_SIZE,
DMA_FROM_DEVICE);
@@ -784,7 +785,6 @@ static int hss_hdlc_poll(struct napi_struct *napi, int budget)
return received; /* not all work done */
}
-
static void hss_hdlc_txdone_irq(void *pdev)
{
struct net_device *dev = pdev;
@@ -854,7 +854,8 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
#else
offset = (int)skb->data & 3; /* keep 32-bit alignment */
bytes = ALIGN(offset + len, 4);
- if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
+ mem = kmalloc(bytes, GFP_ATOMIC);
+ if (!mem) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
@@ -910,7 +911,6 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
static int request_hdlc_queues(struct port *port)
{
int err;
@@ -974,8 +974,9 @@ static int init_hdlc_queues(struct port *port)
return -ENOMEM;
}
- if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
- &port->desc_tab_phys)))
+ port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+ &port->desc_tab_phys);
+ if (!port->desc_tab)
return -ENOMEM;
memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
@@ -987,11 +988,13 @@ static int init_hdlc_queues(struct port *port)
buffer_t *buff;
void *data;
#ifdef __ARMEB__
- if (!(buff = netdev_alloc_skb(port->netdev, RX_SIZE)))
+ buff = netdev_alloc_skb(port->netdev, RX_SIZE);
+ if (!buff)
return -ENOMEM;
data = buff->data;
#else
- if (!(buff = kmalloc(RX_SIZE, GFP_KERNEL)))
+ buff = kmalloc(RX_SIZE, GFP_KERNEL);
+ if (!buff)
return -ENOMEM;
data = buff;
#endif
@@ -1016,6 +1019,7 @@ static void destroy_hdlc_queues(struct port *port)
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff = port->rx_buff_tab[i];
+
if (buff) {
dma_unmap_single(&port->netdev->dev,
desc->data, RX_SIZE,
@@ -1026,6 +1030,7 @@ static void destroy_hdlc_queues(struct port *port)
for (i = 0; i < TX_DESCS; i++) {
struct desc *desc = tx_desc_ptr(port, i);
buffer_t *buff = port->tx_buff_tab[i];
+
if (buff) {
dma_unmap_tx(port, desc);
free_buffer(buff);
@@ -1047,23 +1052,29 @@ static int hss_hdlc_open(struct net_device *dev)
unsigned long flags;
int i, err = 0;
- if ((err = hdlc_open(dev)))
+ err = hdlc_open(dev);
+ if (err)
return err;
- if ((err = hss_load_firmware(port)))
+ err = hss_load_firmware(port);
+ if (err)
goto err_hdlc_close;
- if ((err = request_hdlc_queues(port)))
+ err = request_hdlc_queues(port);
+ if (err)
goto err_hdlc_close;
- if ((err = init_hdlc_queues(port)))
+ err = init_hdlc_queues(port);
+ if (err)
goto err_destroy_queues;
spin_lock_irqsave(&npe_lock, flags);
- if (port->plat->open)
- if ((err = port->plat->open(port->id, dev,
- hss_hdlc_set_carrier)))
+ if (port->plat->open) {
+ err = port->plat->open(port->id, dev, hss_hdlc_set_carrier);
+ if (err)
goto err_unlock;
+ }
+
spin_unlock_irqrestore(&npe_lock, flags);
/* Populate queues with buffers, no failure after this point */
@@ -1160,7 +1171,6 @@ static int hss_hdlc_close(struct net_device *dev)
return 0;
}
-
static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -1169,7 +1179,7 @@ static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
if (encoding != ENCODING_NRZ)
return -EINVAL;
- switch(parity) {
+ switch (parity) {
case PARITY_CRC16_PR1_CCITT:
port->hdlc_cfg = 0;
return 0;
@@ -1224,6 +1234,7 @@ static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
+
do_div(c, timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
@@ -1255,7 +1266,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
@@ -1272,7 +1283,7 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case IF_IFACE_SYNC_SERIAL:
case IF_IFACE_V35:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
@@ -1288,11 +1299,11 @@ static int hss_hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
port->clock_type = clk; /* Update settings */
- if (clk == CLOCK_INT)
+ if (clk == CLOCK_INT) {
find_best_clock(port->plat->timer_freq,
new_line.clock_rate,
&port->clock_rate, &port->clock_reg);
- else {
+ } else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
}
@@ -1334,15 +1345,19 @@ static int hss_init_one(struct platform_device *pdev)
hdlc_device *hdlc;
int err;
- if ((port = kzalloc(sizeof(*port), GFP_KERNEL)) == NULL)
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
return -ENOMEM;
- if ((port->npe = npe_request(0)) == NULL) {
+ port->npe = npe_request(0);
+ if (!port->npe) {
err = -ENODEV;
goto err_free;
}
- if ((port->netdev = dev = alloc_hdlcdev(port)) == NULL) {
+ dev = alloc_hdlcdev(port);
+ port->netdev = alloc_hdlcdev(port);
+ if (!port->netdev) {
err = -ENOMEM;
goto err_plat;
}
@@ -1361,7 +1376,8 @@ static int hss_init_one(struct platform_device *pdev)
port->plat = pdev->dev.platform_data;
netif_napi_add(dev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
- if ((err = register_hdlc_device(dev)))
+ err = register_hdlc_device(dev);
+ if (err)
goto err_free_netdev;
platform_set_drvdata(pdev, port);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 59646865a3a4..89d31adc3809 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -6,7 +6,7 @@
*
* This is a "pseudo" network driver to allow LAPB over Ethernet.
*
- * This driver can use any ethernet destination address, and can be
+ * This driver can use any ethernet destination address, and can be
* limited to accept frames from one dedicated ethernet card only.
*
* History
@@ -44,7 +44,8 @@
static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
/* If this number is made larger, check that the temporary string buffer
- * in lapbeth_new_device is large enough to store the probe device name.*/
+ * in lapbeth_new_device is large enough to store the probe device name.
+ */
#define MAXLAPBDEV 100
struct lapbethdev {
@@ -64,15 +65,14 @@ static void lapbeth_disconnected(struct net_device *dev, int reason);
/* ------------------------------------------------------------------------ */
-/*
- * Get the LAPB device for the ethernet device
+/* Get the LAPB device for the ethernet device
*/
static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
- if (lapbeth->ethdev == dev)
+ if (lapbeth->ethdev == dev)
return lapbeth;
}
return NULL;
@@ -105,10 +105,10 @@ static int lapbeth_napi_poll(struct napi_struct *napi, int budget)
return processed;
}
-/*
- * Receive a LAPB frame via an ethernet interface.
+/* Receive a LAPB frame via an ethernet interface.
*/
-static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev)
+static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *ptype, struct net_device *orig_dev)
{
int len, err;
struct lapbethdev *lapbeth;
@@ -116,7 +116,8 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
if (dev_net(dev) != &init_net)
goto drop;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
return NET_RX_DROP;
if (!pskb_may_pull(skb, 2))
@@ -137,7 +138,8 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
skb_pull(skb, 2); /* Remove the length bytes */
skb_trim(skb, len); /* Set the length of the data */
- if ((err = lapb_data_received(lapbeth->axdev, skb)) != LAPB_OK) {
+ err = lapb_data_received(lapbeth->axdev, skb);
+ if (err != LAPB_OK) {
printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err);
goto drop_unlock;
}
@@ -177,11 +179,10 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
return NET_RX_SUCCESS;
}
-/*
- * Send a LAPB frame via an ethernet interface
+/* Send a LAPB frame via an ethernet interface
*/
static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
- struct net_device *dev)
+ struct net_device *dev)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
@@ -219,7 +220,8 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
skb_pull(skb, 1);
- if ((err = lapb_data_request(dev, skb)) != LAPB_OK) {
+ err = lapb_data_request(dev, skb);
+ if (err != LAPB_OK) {
pr_err("lapb_data_request error - %d\n", err);
goto drop;
}
@@ -263,10 +265,8 @@ static void lapbeth_connected(struct net_device *dev, int reason)
unsigned char *ptr;
struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
- if (!skb) {
- pr_err("out of memory\n");
+ if (!skb)
return;
- }
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_CONNECT;
@@ -283,10 +283,8 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
unsigned char *ptr;
struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
- if (!skb) {
- pr_err("out of memory\n");
+ if (!skb)
return;
- }
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_DISCONNECT;
@@ -297,17 +295,16 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
napi_schedule(&lapbeth->napi);
}
-/*
- * Set AX.25 callsign
+/* Set AX.25 callsign
*/
static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
+
memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
return 0;
}
-
static const struct lapb_register_struct lapbeth_callbacks = {
.connect_confirmation = lapbeth_connected,
.connect_indication = lapbeth_connected,
@@ -317,8 +314,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
.data_transmit = lapbeth_data_transmit,
};
-/*
- * open/close a device
+/* open/close a device
*/
static int lapbeth_open(struct net_device *dev)
{
@@ -327,7 +323,8 @@ static int lapbeth_open(struct net_device *dev)
napi_enable(&lapbeth->napi);
- if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
+ err = lapb_register(dev, &lapbeth_callbacks);
+ if (err != LAPB_OK) {
pr_err("lapb_register error: %d\n", err);
return -ENODEV;
}
@@ -348,7 +345,8 @@ static int lapbeth_close(struct net_device *dev)
lapbeth->up = false;
spin_unlock_bh(&lapbeth->up_lock);
- if ((err = lapb_unregister(dev)) != LAPB_OK)
+ err = lapb_unregister(dev);
+ if (err != LAPB_OK)
pr_err("lapb_unregister error: %d\n", err);
napi_disable(&lapbeth->napi);
@@ -375,8 +373,7 @@ static void lapbeth_setup(struct net_device *dev)
dev->addr_len = 0;
}
-/*
- * Setup a new device.
+/* Setup a new device.
*/
static int lapbeth_new_device(struct net_device *dev)
{
@@ -427,8 +424,7 @@ fail:
goto out;
}
-/*
- * Free a lapb network device.
+/* Free a lapb network device.
*/
static void lapbeth_free_device(struct lapbethdev *lapbeth)
{
@@ -437,8 +433,7 @@ static void lapbeth_free_device(struct lapbethdev *lapbeth)
unregister_netdevice(lapbeth->axdev);
}
-/*
- * Handle device status changes.
+/* Handle device status changes.
*
* Called from notifier with RTNL held.
*/
@@ -457,13 +452,13 @@ static int lapbeth_device_event(struct notifier_block *this,
switch (event) {
case NETDEV_UP:
/* New ethernet device -> new LAPB interface */
- if (lapbeth_get_x25_dev(dev) == NULL)
+ if (!lapbeth_get_x25_dev(dev))
lapbeth_new_device(dev);
break;
case NETDEV_GOING_DOWN:
/* ethernet device closes -> close LAPB interface */
lapbeth = lapbeth_get_x25_dev(dev);
- if (lapbeth)
+ if (lapbeth)
dev_close(lapbeth->axdev);
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/wan/n2.c b/drivers/net/wan/n2.c
index 5bf4463873b1..bdb6dc2409bc 100644
--- a/drivers/net/wan/n2.c
+++ b/drivers/net/wan/n2.c
@@ -32,9 +32,8 @@
#include <asm/io.h>
#include "hd64570.h"
-
-static const char* version = "SDL RISCom/N2 driver version: 1.15";
-static const char* devname = "RISCom/N2";
+static const char *version = "SDL RISCom/N2 driver version: 1.15";
+static const char *devname = "RISCom/N2";
#undef DEBUG_PKT
#define DEBUG_RINGS
@@ -64,11 +63,9 @@ static char *hw; /* pointer to hw=xxx command line string */
#define PCR_ENWIN 4 /* Open window */
#define PCR_BUS16 8 /* 16-bit bus */
-
/* Memory Base Address Register */
#define N2_BAR 2
-
/* Page Scan Register */
#define N2_PSR 4
#define WIN16K 0x00
@@ -78,7 +75,6 @@ static char *hw; /* pointer to hw=xxx command line string */
#define PSR_DMAEN 0x80
#define PSR_PAGEBITS 0x0F
-
/* Modem Control Reg */
#define N2_MCR 6
#define CLOCK_OUT_PORT1 0x80
@@ -90,7 +86,6 @@ static char *hw; /* pointer to hw=xxx command line string */
#define DTR_PORT1 0x02
#define DTR_PORT0 0x01
-
typedef struct port_s {
struct net_device *dev;
struct card_s *card;
@@ -106,9 +101,7 @@ typedef struct port_s {
u8 rxs, txs, tmc; /* SCA registers */
u8 phy_node; /* physical port # - 0 or 1 */
u8 log_node; /* logical port # */
-}port_t;
-
-
+} port_t;
typedef struct card_s {
u8 __iomem *winbase; /* ISA window base address */
@@ -122,13 +115,11 @@ typedef struct card_s {
port_t ports[2];
struct card_s *next_card;
-}card_t;
-
+} card_t;
static card_t *first_card;
static card_t **new_card = &first_card;
-
#define sca_reg(reg, card) (0x8000 | (card)->io | \
((reg) & 0x0F) | (((reg) & 0xF0) << 6))
#define sca_in(reg, card) inb(sca_reg(reg, card))
@@ -144,23 +135,20 @@ static card_t **new_card = &first_card;
#define get_port(card, port) ((card)->ports[port].valid ? \
&(card)->ports[port] : NULL)
-
static __inline__ u8 sca_get_page(card_t *card)
{
return inb(card->io + N2_PSR) & PSR_PAGEBITS;
}
-
static __inline__ void openwin(card_t *card, u8 page)
{
u8 psr = inb(card->io + N2_PSR);
+
outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
}
-
#include "hd64570.c"
-
static void n2_set_iface(port_t *port)
{
card_t *card = port->card;
@@ -170,7 +158,7 @@ static void n2_set_iface(port_t *port)
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
rxs |= CLK_BRG_RX; /* BRG output */
@@ -203,13 +191,12 @@ static void n2_set_iface(port_t *port)
sca_set_port(port);
}
-
-
static int n2_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int io = port->card->io;
- u8 mcr = inb(io + N2_MCR) | (port->phy_node ? TX422_PORT1:TX422_PORT0);
+ u8 mcr = inb(io + N2_MCR) |
+ (port->phy_node ? TX422_PORT1 : TX422_PORT0);
int result;
result = hdlc_open(dev);
@@ -226,13 +213,12 @@ static int n2_open(struct net_device *dev)
return 0;
}
-
-
static int n2_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int io = port->card->io;
- u8 mcr = inb(io+N2_MCR) | (port->phy_node ? TX422_PORT1 : TX422_PORT0);
+ u8 mcr = inb(io + N2_MCR) |
+ (port->phy_node ? TX422_PORT1 : TX422_PORT0);
sca_close(dev);
mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
@@ -241,8 +227,6 @@ static int n2_close(struct net_device *dev)
return 0;
}
-
-
static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -259,7 +243,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
if (ifr->ifr_settings.size < size) {
@@ -271,7 +255,7 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
case IF_IFACE_SYNC_SERIAL:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
@@ -295,8 +279,6 @@ static int n2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-
-
static void n2_destroy_card(card_t *card)
{
int cnt;
@@ -304,6 +286,7 @@ static void n2_destroy_card(card_t *card)
for (cnt = 0; cnt < 2; cnt++)
if (card->ports[cnt].card) {
struct net_device *dev = port_to_dev(&card->ports[cnt]);
+
unregister_hdlc_device(dev);
}
@@ -354,7 +337,7 @@ static int __init n2_run(unsigned long io, unsigned long irq,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL)
+ if (!card)
return -ENOBUFS;
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
@@ -486,11 +469,9 @@ static int __init n2_run(unsigned long io, unsigned long irq,
return 0;
}
-
-
static int __init n2_init(void)
{
- if (hw==NULL) {
+ if (!hw) {
#ifdef MODULE
pr_info("no card initialized\n");
#endif
@@ -515,7 +496,7 @@ static int __init n2_init(void)
if (*hw++ != ',')
break;
- while(1) {
+ while (1) {
if (*hw == '0' && !valid[0])
valid[0] = 1; /* Port 0 enabled */
else if (*hw == '1' && !valid[1])
@@ -533,25 +514,24 @@ static int __init n2_init(void)
if (*hw == '\x0')
return first_card ? 0 : -EINVAL;
- }while(*hw++ == ':');
+ } while (*hw++ == ':');
pr_err("invalid hardware parameters\n");
return first_card ? 0 : -EINVAL;
}
-
static void __exit n2_cleanup(void)
{
card_t *card = first_card;
while (card) {
card_t *ptr = card;
+
card = card->next_card;
n2_destroy_card(ptr);
}
}
-
module_init(n2_init);
module_exit(n2_cleanup);
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 001fd378d417..7b123a771aa6 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -44,7 +44,7 @@
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
-static int use_crystal_clock = 0;
+static int use_crystal_clock;
static unsigned int CLOCK_BASE;
/* Masks to access the init_ctrl PLX register */
@@ -52,11 +52,9 @@ static unsigned int CLOCK_BASE;
#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3))
#define PC300_CTYPE_MASK (0x00000800UL)
-
enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */
-/*
- * PLX PCI9050-1 local configuration and shared runtime registers.
+/* PLX PCI9050-1 local configuration and shared runtime registers.
* This structure can be used to access 9050 registers (memory mapped).
*/
typedef struct {
@@ -69,9 +67,7 @@ typedef struct {
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
-}plx9050;
-
-
+} plx9050;
typedef struct port_s {
struct napi_struct napi;
@@ -88,9 +84,7 @@ typedef struct port_s {
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
-}port_t;
-
-
+} port_t;
typedef struct card_s {
int type; /* RSV, X21, etc. */
@@ -105,26 +99,24 @@ typedef struct card_s {
u8 irq; /* interrupt request level */
port_t ports[2];
-}card_t;
-
+} card_t;
#define get_port(card, port) ((port) < (card)->n_ports ? \
(&(card)->ports[port]) : (NULL))
#include "hd64572.c"
-
static void pc300_set_iface(port_t *port)
{
card_t *card = port->card;
- u32 __iomem * init_ctrl = &card->plxbase->init_ctrl;
+ u32 __iomem *init_ctrl = &card->plxbase->init_ctrl;
u16 msci = get_msci(port);
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
@@ -162,13 +154,11 @@ static void pc300_set_iface(port_t *port)
}
}
-
-
static int pc300_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
-
int result = hdlc_open(dev);
+
if (result)
return result;
@@ -177,8 +167,6 @@ static int pc300_open(struct net_device *dev)
return 0;
}
-
-
static int pc300_close(struct net_device *dev)
{
sca_close(dev);
@@ -186,8 +174,6 @@ static int pc300_close(struct net_device *dev)
return 0;
}
-
-
static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -214,7 +200,6 @@ static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
-
}
if (port->card->type == PC300_X21 &&
@@ -255,8 +240,6 @@ static int pc300_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return 0;
}
-
-
static void pc300_pci_remove_one(struct pci_dev *pdev)
{
int i;
@@ -314,7 +297,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -338,9 +321,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
- if (card->plxbase == NULL ||
- card->scabase == NULL ||
- card->rambase == NULL) {
+ if (!card->plxbase || !card->scabase || !card->rambase) {
pr_err("ioremap() failed\n");
pc300_pci_remove_one(pdev);
return -ENOMEM;
@@ -365,12 +346,14 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
else
card->n_ports = 2;
- for (i = 0; i < card->n_ports; i++)
- if (!(card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]))) {
+ for (i = 0; i < card->n_ports; i++) {
+ card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]);
+ if (!card->ports[i].netdev) {
pr_err("unable to allocate memory\n");
pc300_pci_remove_one(pdev);
return -ENOMEM;
}
+ }
/* Reset PLX */
p = &card->plxbase->init_ctrl;
@@ -442,6 +425,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
+
port->chan = i;
spin_lock_init(&port->lock);
@@ -472,8 +456,6 @@ static int pc300_pci_init_one(struct pci_dev *pdev,
return 0;
}
-
-
static const struct pci_device_id pc300_pci_tbl[] = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
@@ -486,7 +468,6 @@ static const struct pci_device_id pc300_pci_tbl[] = {
{ 0, }
};
-
static struct pci_driver pc300_pci_driver = {
.name = "PC300",
.id_table = pc300_pci_tbl,
@@ -494,7 +475,6 @@ static struct pci_driver pc300_pci_driver = {
.remove = pc300_pci_remove_one,
};
-
static int __init pc300_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
@@ -511,8 +491,6 @@ static int __init pc300_init_module(void)
return pci_register_driver(&pc300_pci_driver);
}
-
-
static void __exit pc300_cleanup_module(void)
{
pci_unregister_driver(&pc300_pci_driver);
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index ba5cc0c53833..dee9c4e15eca 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -42,8 +42,7 @@
static int pci_clock_freq = 33000000;
#define CLOCK_BASE pci_clock_freq
-/*
- * PLX PCI9052 local configuration and shared runtime registers.
+/* PLX PCI9052 local configuration and shared runtime registers.
* This structure can be used to access 9052 registers (memory mapped).
*/
typedef struct {
@@ -56,9 +55,7 @@ typedef struct {
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
-}plx9052;
-
-
+} plx9052;
typedef struct port_s {
struct napi_struct napi;
@@ -74,9 +71,7 @@ typedef struct port_s {
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
-}port_t;
-
-
+} port_t;
typedef struct card_s {
u8 __iomem *rambase; /* buffer memory base (virtual) */
@@ -88,15 +83,15 @@ typedef struct card_s {
u8 irq; /* interrupt request level */
port_t ports[2];
-}card_t;
-
+} card_t;
-#define get_port(card, port) (&card->ports[port])
+#define get_port(card, port) (&(card)->ports[port])
#define sca_flush(card) (sca_in(IER0, card))
static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
{
int len;
+
do {
len = length > 256 ? 256 : length;
memcpy_toio(dest, src, len);
@@ -112,7 +107,6 @@ static inline void new_memcpy_toio(char __iomem *dest, char *src, int length)
#include "hd64572.c"
-
static void pci200_set_iface(port_t *port)
{
card_t *card = port->card;
@@ -122,7 +116,7 @@ static void pci200_set_iface(port_t *port)
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
- switch(port->settings.clock_type) {
+ switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
@@ -151,13 +145,11 @@ static void pci200_set_iface(port_t *port)
sca_set_port(port);
}
-
-
static int pci200_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
-
int result = hdlc_open(dev);
+
if (result)
return result;
@@ -167,8 +159,6 @@ static int pci200_open(struct net_device *dev)
return 0;
}
-
-
static int pci200_close(struct net_device *dev)
{
sca_close(dev);
@@ -177,8 +167,6 @@ static int pci200_close(struct net_device *dev)
return 0;
}
-
-
static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -195,7 +183,7 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (cmd != SIOCWANDEV)
return hdlc_ioctl(dev, ifr, cmd);
- switch(ifr->ifr_settings.type) {
+ switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
ifr->ifr_settings.type = IF_IFACE_V35;
if (ifr->ifr_settings.size < size) {
@@ -233,8 +221,6 @@ static int pci200_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-
-
static void pci200_pci_remove_one(struct pci_dev *pdev)
{
int i;
@@ -292,7 +278,7 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -314,18 +300,16 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
return -EFAULT;
}
- plxphys = pci_resource_start(pdev,0) & PCI_BASE_ADDRESS_MEM_MASK;
+ plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PCI200SYN_PLX_SIZE);
- scaphys = pci_resource_start(pdev,2) & PCI_BASE_ADDRESS_MEM_MASK;
+ scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PCI200SYN_SCA_SIZE);
- ramphys = pci_resource_start(pdev,3) & PCI_BASE_ADDRESS_MEM_MASK;
+ ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
- if (card->plxbase == NULL ||
- card->scabase == NULL ||
- card->rambase == NULL) {
+ if (!card->plxbase || !card->scabase || !card->rambase) {
pr_err("ioremap() failed\n");
pci200_pci_remove_one(pdev);
return -EFAULT;
@@ -380,6 +364,7 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
+
port->chan = i;
spin_lock_init(&port->lock);
@@ -407,15 +392,12 @@ static int pci200_pci_init_one(struct pci_dev *pdev,
return 0;
}
-
-
static const struct pci_device_id pci200_pci_tbl[] = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
};
-
static struct pci_driver pci200_pci_driver = {
.name = "PCI200SYN",
.id_table = pci200_pci_tbl,
@@ -423,7 +405,6 @@ static struct pci_driver pci200_pci_driver = {
.remove = pci200_pci_remove_one,
};
-
static int __init pci200_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
@@ -433,8 +414,6 @@ static int __init pci200_init_module(void)
return pci_register_driver(&pci200_pci_driver);
}
-
-
static void __exit pci200_cleanup_module(void)
{
pci_unregister_driver(&pci200_pci_driver);
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 7dddc9dcbe23..4403e219ca03 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Sealevel Systems 4021 driver.
+/* Sealevel Systems 4021 driver.
*
* (c) Copyright 1999, 2001 Alan Cox
* (c) Copyright 2001 Red Hat Inc.
@@ -29,32 +28,25 @@
#include <asm/byteorder.h>
#include "z85230.h"
-
-struct slvl_device
-{
+struct slvl_device {
struct z8530_channel *chan;
int channel;
};
-
-struct slvl_board
-{
+struct slvl_board {
struct slvl_device dev[2];
struct z8530_dev board;
int iobase;
};
-/*
- * Network driver support routines
- */
+ /* Network driver support routines */
-static inline struct slvl_device* dev_to_chan(struct net_device *dev)
+static inline struct slvl_device *dev_to_chan(struct net_device *dev)
{
return (struct slvl_device *)dev_to_hdlc(dev)->priv;
}
-/*
- * Frame receive. Simple for our card as we do HDLC and there
+/* Frame receive. Simple for our card as we do HDLC and there
* is no funny garbage involved
*/
@@ -68,9 +60,7 @@ static void sealevel_input(struct z8530_channel *c, struct sk_buff *skb)
netif_rx(skb);
}
-/*
- * We've been placed in the UP state
- */
+ /* We've been placed in the UP state */
static int sealevel_open(struct net_device *d)
{
@@ -78,17 +68,15 @@ static int sealevel_open(struct net_device *d)
int err = -1;
int unit = slvl->channel;
- /*
- * Link layer up.
- */
+ /* Link layer up. */
switch (unit) {
- case 0:
- err = z8530_sync_dma_open(d, slvl->chan);
- break;
- case 1:
- err = z8530_sync_open(d, slvl->chan);
- break;
+ case 0:
+ err = z8530_sync_dma_open(d, slvl->chan);
+ break;
+ case 1:
+ err = z8530_sync_open(d, slvl->chan);
+ break;
}
if (err)
@@ -97,21 +85,18 @@ static int sealevel_open(struct net_device *d)
err = hdlc_open(d);
if (err) {
switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
+ case 0:
+ z8530_sync_dma_close(d, slvl->chan);
+ break;
+ case 1:
+ z8530_sync_close(d, slvl->chan);
+ break;
}
return err;
}
slvl->chan->rx_function = sealevel_input;
- /*
- * Go go go
- */
netif_start_queue(d);
return 0;
}
@@ -121,9 +106,7 @@ static int sealevel_close(struct net_device *d)
struct slvl_device *slvl = dev_to_chan(d);
int unit = slvl->channel;
- /*
- * Discard new frames
- */
+ /* Discard new frames */
slvl->chan->rx_function = z8530_null_rx;
@@ -131,12 +114,12 @@ static int sealevel_close(struct net_device *d)
netif_stop_queue(d);
switch (unit) {
- case 0:
- z8530_sync_dma_close(d, slvl->chan);
- break;
- case 1:
- z8530_sync_close(d, slvl->chan);
- break;
+ case 0:
+ z8530_sync_dma_close(d, slvl->chan);
+ break;
+ case 1:
+ z8530_sync_close(d, slvl->chan);
+ break;
}
return 0;
}
@@ -144,16 +127,15 @@ static int sealevel_close(struct net_device *d)
static int sealevel_ioctl(struct net_device *d, struct ifreq *ifr, int cmd)
{
/* struct slvl_device *slvl=dev_to_chan(d);
- z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd) */
+ * z8530_ioctl(d,&slvl->sync.chanA,ifr,cmd)
+ */
return hdlc_ioctl(d, ifr, cmd);
}
-/*
- * Passed network frames, fire them downwind.
- */
+/* Passed network frames, fire them downwind. */
static netdev_tx_t sealevel_queue_xmit(struct sk_buff *skb,
- struct net_device *d)
+ struct net_device *d)
{
return z8530_queue_xmit(dev_to_chan(d)->chan, skb);
}
@@ -176,6 +158,7 @@ static const struct net_device_ops sealevel_ops = {
static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
{
struct net_device *dev = alloc_hdlcdev(sv);
+
if (!dev)
return -1;
@@ -195,10 +178,7 @@ static int slvl_setup(struct slvl_device *sv, int iobase, int irq)
return 0;
}
-
-/*
- * Allocate and setup Sealevel board.
- */
+/* Allocate and setup Sealevel board. */
static __init struct slvl_board *slvl_init(int iobase, int irq,
int txdma, int rxdma, int slow)
@@ -206,9 +186,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
struct z8530_dev *dev;
struct slvl_board *b;
- /*
- * Get the needed I/O space
- */
+ /* Get the needed I/O space */
if (!request_region(iobase, 8, "Sealevel 4021")) {
pr_warn("I/O 0x%X already in use\n", iobase);
@@ -227,17 +205,13 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
dev = &b->board;
- /*
- * Stuff in the I/O addressing
- */
+ /* Stuff in the I/O addressing */
dev->active = 0;
b->iobase = iobase;
- /*
- * Select 8530 delays for the old board
- */
+ /* Select 8530 delays for the old board */
if (slow)
iobase |= Z8530_PORT_SLEEP;
@@ -250,15 +224,13 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
dev->chanA.irqs = &z8530_nop;
dev->chanB.irqs = &z8530_nop;
- /*
- * Assert DTR enable DMA
- */
+ /* Assert DTR enable DMA */
outb(3 | (1 << 7), b->iobase + 4);
-
/* We want a fast IRQ for this device. Actually we'd like an even faster
- IRQ ;) - This is one driver RtLinux is made for */
+ * IRQ ;) - This is one driver RtLinux is made for
+ */
if (request_irq(irq, z8530_interrupt, 0,
"SeaLevel", dev) < 0) {
@@ -282,9 +254,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
disable_irq(irq);
- /*
- * Begin normal initialise
- */
+ /* Begin normal initialise */
if (z8530_init(dev) != 0) {
pr_err("Z8530 series device not found\n");
@@ -299,9 +269,7 @@ static __init struct slvl_board *slvl_init(int iobase, int irq,
z8530_channel_load(&dev->chanB, z8530_hdlc_kilostream_85230);
}
- /*
- * Now we can take the IRQ
- */
+ /* Now we can take the IRQ */
enable_irq(irq);
@@ -338,6 +306,7 @@ static void __exit slvl_shutdown(struct slvl_board *b)
for (u = 0; u < 2; u++) {
struct net_device *d = b->dev[u].chan->netdevice;
+
unregister_hdlc_device(d);
free_netdev(d);
}
@@ -351,12 +320,11 @@ static void __exit slvl_shutdown(struct slvl_board *b)
kfree(b);
}
-
-static int io=0x238;
-static int txdma=1;
-static int rxdma=3;
-static int irq=5;
-static bool slow=false;
+static int io = 0x238;
+static int txdma = 1;
+static int rxdma = 3;
+static int irq = 5;
+static bool slow;
module_param_hw(io, int, ioport, 0);
MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index f393684f203a..f22e48415e6f 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -32,7 +32,7 @@
#include "wanxl.h"
-static const char* version = "wanXL serial card driver version: 0.48";
+static const char *version = "wanXL serial card driver version: 0.48";
#define PLX_CTL_RESET 0x40000000 /* adapter reset */
@@ -50,24 +50,21 @@ static const char* version = "wanXL serial card driver version: 0.48";
/* MAILBOX #2 - DRAM SIZE */
#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
-
struct port {
struct net_device *dev;
struct card *card;
spinlock_t lock; /* for wanxl_xmit */
- int node; /* physical port #0 - 3 */
+ int node; /* physical port #0 - 3 */
unsigned int clock_type;
int tx_in, tx_out;
struct sk_buff *tx_skbs[TX_BUFFERS];
};
-
struct card_status {
desc_t rx_descs[RX_QUEUE_LENGTH];
port_status_t port_status[4];
};
-
struct card {
int n_ports; /* 1, 2 or 4 ports */
u8 irq;
@@ -81,25 +78,22 @@ struct card {
struct port ports[]; /* 1 - 4 port structures follow */
};
-
-
static inline struct port *dev_to_port(struct net_device *dev)
{
return (struct port *)dev_to_hdlc(dev)->priv;
}
-
static inline port_status_t *get_status(struct port *port)
{
return &port->card->status->port_status[port->node];
}
-
#ifdef DEBUG_PCI
static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
size_t size, int direction)
{
dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
+
if (addr + size > 0x100000000LL)
pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
pci_name(pdev), (unsigned long long)addr);
@@ -110,7 +104,6 @@ static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
#define pci_map_single pci_map_single_debug
#endif
-
/* Cable and/or personality module change interrupt service */
static inline void wanxl_cable_intr(struct port *port)
{
@@ -118,22 +111,46 @@ static inline void wanxl_cable_intr(struct port *port)
int valid = 1;
const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
- switch(value & 0x7) {
- case STATUS_CABLE_V35: cable = "V.35"; break;
- case STATUS_CABLE_X21: cable = "X.21"; break;
- case STATUS_CABLE_V24: cable = "V.24"; break;
- case STATUS_CABLE_EIA530: cable = "EIA530"; break;
- case STATUS_CABLE_NONE: cable = "no"; break;
- default: cable = "invalid";
+ switch (value & 0x7) {
+ case STATUS_CABLE_V35:
+ cable = "V.35";
+ break;
+ case STATUS_CABLE_X21:
+ cable = "X.21";
+ break;
+ case STATUS_CABLE_V24:
+ cable = "V.24";
+ break;
+ case STATUS_CABLE_EIA530:
+ cable = "EIA530";
+ break;
+ case STATUS_CABLE_NONE:
+ cable = "no";
+ break;
+ default:
+ cable = "invalid";
}
- switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
- case STATUS_CABLE_V35: pm = "V.35"; break;
- case STATUS_CABLE_X21: pm = "X.21"; break;
- case STATUS_CABLE_V24: pm = "V.24"; break;
- case STATUS_CABLE_EIA530: pm = "EIA530"; break;
- case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
- default: pm = "invalid personality"; valid = 0;
+ switch ((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
+ case STATUS_CABLE_V35:
+ pm = "V.35";
+ break;
+ case STATUS_CABLE_X21:
+ pm = "X.21";
+ break;
+ case STATUS_CABLE_V24:
+ pm = "V.24";
+ break;
+ case STATUS_CABLE_EIA530:
+ pm = "EIA530";
+ break;
+ case STATUS_CABLE_NONE:
+ pm = "no personality";
+ valid = 0;
+ break;
+ default:
+ pm = "invalid personality";
+ valid = 0;
}
if (valid) {
@@ -154,14 +171,13 @@ static inline void wanxl_cable_intr(struct port *port)
netif_carrier_off(port->dev);
}
-
-
/* Transmit complete interrupt service */
static inline void wanxl_tx_intr(struct port *port)
{
struct net_device *dev = port->dev;
+
while (1) {
- desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
+ desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
struct sk_buff *skb = port->tx_skbs[port->tx_in];
switch (desc->stat) {
@@ -179,34 +195,33 @@ static inline void wanxl_tx_intr(struct port *port)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
}
- desc->stat = PACKET_EMPTY; /* Free descriptor */
+ desc->stat = PACKET_EMPTY; /* Free descriptor */
dma_unmap_single(&port->card->pdev->dev, desc->address,
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
- port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
- }
+ port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
+ }
}
-
-
/* Receive complete interrupt service */
static inline void wanxl_rx_intr(struct card *card)
{
desc_t *desc;
+
while (desc = &card->status->rx_descs[card->rx_in],
desc->stat != PACKET_EMPTY) {
- if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
+ if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) {
pr_crit("%s: received packet for nonexistent port\n",
pci_name(card->pdev));
- else {
+ } else {
struct sk_buff *skb = card->rx_skbs[card->rx_in];
struct port *port = &card->ports[desc->stat &
PACKET_PORT_MASK];
struct net_device *dev = port->dev;
- if (!skb)
+ if (!skb) {
dev->stats.rx_dropped++;
- else {
+ } else {
dma_unmap_single(&card->pdev->dev,
desc->address, BUFFER_LENGTH,
DMA_FROM_DEVICE);
@@ -239,21 +254,18 @@ static inline void wanxl_rx_intr(struct card *card)
}
}
-
-
-static irqreturn_t wanxl_intr(int irq, void* dev_id)
+static irqreturn_t wanxl_intr(int irq, void *dev_id)
{
struct card *card = dev_id;
- int i;
- u32 stat;
- int handled = 0;
-
+ int i;
+ u32 stat;
+ int handled = 0;
- while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
- handled = 1;
+ while ((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
+ handled = 1;
writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
- for (i = 0; i < card->n_ports; i++) {
+ for (i = 0; i < card->n_ports; i++) {
if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
wanxl_tx_intr(&card->ports[i]);
if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
@@ -261,23 +273,21 @@ static irqreturn_t wanxl_intr(int irq, void* dev_id)
}
if (stat & (1 << DOORBELL_FROM_CARD_RX))
wanxl_rx_intr(card);
- }
+ }
- return IRQ_RETVAL(handled);
+ return IRQ_RETVAL(handled);
}
-
-
static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = dev_to_port(dev);
desc_t *desc;
- spin_lock(&port->lock);
+ spin_lock(&port->lock);
desc = &get_status(port)->tx_descs[port->tx_out];
- if (desc->stat != PACKET_EMPTY) {
- /* should never happen - previous xmit should stop queue */
+ if (desc->stat != PACKET_EMPTY) {
+ /* should never happen - previous xmit should stop queue */
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
#endif
@@ -312,8 +322,6 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-
-
static int wanxl_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
@@ -335,8 +343,6 @@ static int wanxl_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-
-
static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
const size_t size = sizeof(sync_serial_settings);
@@ -384,11 +390,9 @@ static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
default:
return hdlc_ioctl(dev, ifr, cmd);
- }
+ }
}
-
-
static int wanxl_open(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
@@ -400,7 +404,9 @@ static int wanxl_open(struct net_device *dev)
netdev_err(dev, "port already open\n");
return -EIO;
}
- if ((i = hdlc_open(dev)) != 0)
+
+ i = hdlc_open(dev);
+ if (i)
return i;
port->tx_in = port->tx_out = 0;
@@ -423,8 +429,6 @@ static int wanxl_open(struct net_device *dev)
return -EFAULT;
}
-
-
static int wanxl_close(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
@@ -461,8 +465,6 @@ static int wanxl_close(struct net_device *dev)
return 0;
}
-
-
static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
@@ -474,8 +476,6 @@ static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
return &dev->stats;
}
-
-
static int wanxl_puts_command(struct card *card, u32 cmd)
{
unsigned long timeout = jiffies + 5 * HZ;
@@ -486,13 +486,11 @@ static int wanxl_puts_command(struct card *card, u32 cmd)
return 0;
schedule();
- }while (time_after(timeout, jiffies));
+ } while (time_after(timeout, jiffies));
return -1;
}
-
-
static void wanxl_reset(struct card *card)
{
u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
@@ -505,8 +503,6 @@ static void wanxl_reset(struct card *card)
readl(card->plx + PLX_CONTROL); /* wait for posted write */
}
-
-
static void wanxl_pci_remove_one(struct pci_dev *pdev)
{
struct card *card = pci_get_drvdata(pdev);
@@ -543,7 +539,6 @@ static void wanxl_pci_remove_one(struct pci_dev *pdev)
kfree(card);
}
-
#include "wanxlfw.inc"
static const struct net_device_ops wanxl_ops = {
@@ -574,12 +569,14 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
return i;
/* QUICC can only access first 256 MB of host RAM directly,
- but PLX9060 DMA does 32-bits for actual packet data transfers */
+ * but PLX9060 DMA does 32-bits for actual packet data transfers
+ */
/* FIXME when PCI/DMA subsystems are fixed.
- We set both dma_mask and consistent_dma_mask to 28 bits
- and pray pci_alloc_consistent() will use this info. It should
- work on most platforms */
+ * We set both dma_mask and consistent_dma_mask to 28 bits
+ * and pray pci_alloc_consistent() will use this info. It should
+ * work on most platforms
+ */
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
pr_err("No usable DMA configuration\n");
@@ -594,13 +591,18 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
}
switch (pdev->device) {
- case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
- case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
- default: ports = 4;
+ case PCI_DEVICE_ID_SBE_WANXL100:
+ ports = 1;
+ break;
+ case PCI_DEVICE_ID_SBE_WANXL200:
+ ports = 2;
+ break;
+ default:
+ ports = 4;
}
card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
- if (card == NULL) {
+ if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
@@ -612,7 +614,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
card->status = dma_alloc_coherent(&pdev->dev,
sizeof(struct card_status),
&card->status_address, GFP_KERNEL);
- if (card->status == NULL) {
+ if (!card->status) {
wanxl_pci_remove_one(pdev);
return -ENOBUFS;
}
@@ -624,8 +626,9 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
#endif
/* FIXME when PCI/DMA subsystems are fixed.
- We set both dma_mask and consistent_dma_mask back to 32 bits
- to indicate the card can do 32-bit DMA addressing */
+ * We set both dma_mask and consistent_dma_mask back to 32 bits
+ * to indicate the card can do 32-bit DMA addressing
+ */
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_err("No usable DMA configuration\n");
@@ -656,7 +659,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
return -ENODEV;
}
- switch(stat & 0xC0) {
+ switch (stat & 0xC0) {
case 0x00: /* hmm - PUTS completed with non-zero code? */
case 0x80: /* PUTS still testing the hardware */
break;
@@ -677,7 +680,6 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
/* set up on-board RAM mapping */
mem_phy = pci_resource_start(pdev, 2);
-
/* sanity check the board's reported memory size */
if (ramsize < BUFFERS_ADDR +
(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
@@ -697,6 +699,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
for (i = 0; i < RX_QUEUE_LENGTH; i++) {
struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
+
card->rx_skbs[i] = skb;
if (skb)
card->status->rx_descs[i].address =
@@ -712,7 +715,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
}
for (i = 0; i < sizeof(firmware); i += 4)
- writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i);
+ writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i);
for (i = 0; i < ports; i++)
writel(card->status_address +
@@ -732,10 +735,11 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
timeout = jiffies + 5 * HZ;
do {
- if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
+ stat = readl(card->plx + PLX_MAILBOX_5);
+ if (stat)
break;
schedule();
- }while (time_after(timeout, jiffies));
+ } while (time_after(timeout, jiffies));
if (!stat) {
pr_warn("%s: timeout while initializing card firmware\n",
@@ -764,6 +768,7 @@ static int wanxl_pci_init_one(struct pci_dev *pdev,
hdlc_device *hdlc;
struct port *port = &card->ports[i];
struct net_device *dev = alloc_hdlcdev(port);
+
if (!dev) {
pr_err("%s: unable to allocate memory\n",
pci_name(pdev));
@@ -813,7 +818,6 @@ static const struct pci_device_id wanxl_pci_tbl[] = {
{ 0, }
};
-
static struct pci_driver wanxl_pci_driver = {
.name = "wanXL",
.id_table = wanxl_pci_tbl,
@@ -821,7 +825,6 @@ static struct pci_driver wanxl_pci_driver = {
.remove = wanxl_pci_remove_one,
};
-
static int __init wanxl_init_module(void)
{
#ifdef MODULE
@@ -835,7 +838,6 @@ static void __exit wanxl_cleanup_module(void)
pci_unregister_driver(&wanxl_pci_driver);
}
-
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 002b8c99ab5b..982a03488a00 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- *
- * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
+/* (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
* (c) Copyright 2000, 2001 Red Hat Inc
*
* Development of this driver was funded by Equiinet Ltd
@@ -12,7 +10,7 @@
* Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
* unification of all the Z85x30 asynchronous drivers for real.
*
- * DMA now uses get_free_page as kmalloc buffers may span a 64K
+ * DMA now uses get_free_page as kmalloc buffers may span a 64K
* boundary.
*
* Modified for SMP safety and SMP locking by Alan Cox
@@ -55,14 +53,13 @@
#include "z85230.h"
-
/**
* z8530_read_port - Architecture specific interface function
* @p: port to read
*
* Provided port access methods. The Comtrol SV11 requires no delays
* between accesses and uses PC I/O. Some drivers may need a 5uS delay
- *
+ *
* In the longer term this should become an architecture specific
* section so that this can become a generic driver interface for all
* platforms. For now we only handle PC I/O ports with or without the
@@ -74,8 +71,9 @@
static inline int z8530_read_port(unsigned long p)
{
- u8 r=inb(Z8530_PORT_OF(p));
- if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
+ u8 r = inb(Z8530_PORT_OF(p));
+
+ if (p & Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
udelay(5);
return r;
}
@@ -95,34 +93,30 @@ static inline int z8530_read_port(unsigned long p)
* dread 5uS sanity delay.
*/
-
static inline void z8530_write_port(unsigned long p, u8 d)
{
- outb(d,Z8530_PORT_OF(p));
- if(p&Z8530_PORT_SLEEP)
+ outb(d, Z8530_PORT_OF(p));
+ if (p & Z8530_PORT_SLEEP)
udelay(5);
}
-
-
static void z8530_rx_done(struct z8530_channel *c);
static void z8530_tx_done(struct z8530_channel *c);
-
/**
- * read_zsreg - Read a register from a Z85230
+ * read_zsreg - Read a register from a Z85230
* @c: Z8530 channel to read from (2 per chip)
* @reg: Register to read
* FIXME: Use a spinlock.
- *
+ *
* Most of the Z8530 registers are indexed off the control registers.
* A read is done by writing to the control register and reading the
* register back. The caller must hold the lock
*/
-
+
static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
{
- if(reg)
+ if (reg)
z8530_write_port(c->ctrlio, reg);
return z8530_read_port(c->ctrlio);
}
@@ -138,7 +132,8 @@ static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
static inline u8 read_zsdata(struct z8530_channel *c)
{
u8 r;
- r=z8530_read_port(c->dataio);
+
+ r = z8530_read_port(c->dataio);
return r;
}
@@ -156,10 +151,9 @@ static inline u8 read_zsdata(struct z8530_channel *c)
*/
static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
{
- if(reg)
+ if (reg)
z8530_write_port(c->ctrlio, reg);
z8530_write_port(c->ctrlio, val);
-
}
/**
@@ -182,108 +176,94 @@ static inline void write_zsctrl(struct z8530_channel *c, u8 val)
*
* Write directly to the data register on the Z8530
*/
-
-
static inline void write_zsdata(struct z8530_channel *c, u8 val)
{
z8530_write_port(c->dataio, val);
}
-/*
- * Register loading parameters for a dead port
+/* Register loading parameters for a dead port
*/
-
-u8 z8530_dead_port[]=
-{
+
+u8 z8530_dead_port[] = {
255
};
-
EXPORT_SYMBOL(z8530_dead_port);
-/*
- * Register loading parameters for currently supported circuit types
+/* Register loading parameters for currently supported circuit types
*/
-
-/*
- * Data clocked by telco end. This is the correct data for the UK
+/* Data clocked by telco end. This is the correct data for the UK
* "kilostream" service, and most other similar services.
*/
-
-u8 z8530_hdlc_kilostream[]=
-{
- 4, SYNC_ENAB|SDLC|X1CLK,
+
+u8 z8530_hdlc_kilostream[] = {
+ 4, SYNC_ENAB | SDLC | X1CLK,
2, 0, /* No vector */
1, 0,
- 3, ENT_HM|RxCRC_ENAB|Rx8,
- 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 3, ENT_HM | RxCRC_ENAB | Rx8,
+ 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
9, 0, /* Disable interrupts */
6, 0xFF,
7, FLAG,
- 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
+ 10, ABUNDER | NRZ | CRCPS,/*MARKIDLE ??*/
11, TCTRxCP,
14, DISDPLL,
- 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
- 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
- 9, NV|MIE|NORESET,
+ 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
+ 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
+ 9, NV | MIE | NORESET,
255
};
-
EXPORT_SYMBOL(z8530_hdlc_kilostream);
-/*
- * As above but for enhanced chips.
+/* As above but for enhanced chips.
*/
-
-u8 z8530_hdlc_kilostream_85230[]=
-{
- 4, SYNC_ENAB|SDLC|X1CLK,
+
+u8 z8530_hdlc_kilostream_85230[] = {
+ 4, SYNC_ENAB | SDLC | X1CLK,
2, 0, /* No vector */
1, 0,
- 3, ENT_HM|RxCRC_ENAB|Rx8,
- 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
+ 3, ENT_HM | RxCRC_ENAB | Rx8,
+ 5, TxCRC_ENAB | RTS | TxENAB | Tx8 | DTR,
9, 0, /* Disable interrupts */
6, 0xFF,
7, FLAG,
- 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
+ 10, ABUNDER | NRZ | CRCPS, /* MARKIDLE?? */
11, TCTRxCP,
14, DISDPLL,
- 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
- 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
- 9, NV|MIE|NORESET,
+ 15, DCDIE | SYNCIE | CTSIE | TxUIE | BRKIE,
+ 1, EXT_INT_ENAB | TxINT_ENAB | INT_ALL_Rx,
+ 9, NV | MIE | NORESET,
23, 3, /* Extended mode AUTO TX and EOM*/
-
+
255
};
-
EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
/**
* z8530_flush_fifo - Flush on chip RX FIFO
* @c: Channel to flush
*
- * Flush the receive FIFO. There is no specific option for this, we
+ * Flush the receive FIFO. There is no specific option for this, we
* blindly read bytes and discard them. Reading when there is no data
* is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
- *
+ *
* All locking is handled for the caller. On return data may still be
* present if it arrived during the flush.
*/
-
+
static void z8530_flush_fifo(struct z8530_channel *c)
{
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
- if(c->dev->type==Z85230)
- {
+ if (c->dev->type == Z85230) {
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
read_zsreg(c, R1);
}
-}
+}
/**
* z8530_rtsdtr - Control the outgoing DTS/RTS line
@@ -309,7 +289,7 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
* z8530_rx - Handle a PIO receive event
* @c: Z8530 channel to process
*
- * Receive handler for receiving in PIO mode. This is much like the
+ * Receive handler for receiving in PIO mode. This is much like the
* async one but not quite the same or as complex
*
* Note: Its intended that this handler can easily be separated from
@@ -322,77 +302,63 @@ static void z8530_rtsdtr(struct z8530_channel *c, int set)
* other code - this is true in the RT case too.
*
* We only cover the sync cases for this. If you want 2Mbit async
- * do it yourself but consider medical assistance first. This non DMA
- * synchronous mode is portable code. The DMA mode assumes PCI like
+ * do it yourself but consider medical assistance first. This non DMA
+ * synchronous mode is portable code. The DMA mode assumes PCI like
* ISA DMA
*
* Called with the device lock held
*/
-
+
static void z8530_rx(struct z8530_channel *c)
{
- u8 ch,stat;
+ u8 ch, stat;
- while(1)
- {
+ while (1) {
/* FIFO empty ? */
- if(!(read_zsreg(c, R0)&1))
+ if (!(read_zsreg(c, R0) & 1))
break;
- ch=read_zsdata(c);
- stat=read_zsreg(c, R1);
-
- /*
- * Overrun ?
+ ch = read_zsdata(c);
+ stat = read_zsreg(c, R1);
+
+ /* Overrun ?
*/
- if(c->count < c->max)
- {
- *c->dptr++=ch;
+ if (c->count < c->max) {
+ *c->dptr++ = ch;
c->count++;
}
- if(stat&END_FR)
- {
-
- /*
- * Error ?
+ if (stat & END_FR) {
+ /* Error ?
*/
- if(stat&(Rx_OVR|CRC_ERR))
- {
+ if (stat & (Rx_OVR | CRC_ERR)) {
/* Rewind the buffer and return */
- if(c->skb)
- c->dptr=c->skb->data;
- c->count=0;
- if(stat&Rx_OVR)
- {
+ if (c->skb)
+ c->dptr = c->skb->data;
+ c->count = 0;
+ if (stat & Rx_OVR) {
pr_warn("%s: overrun\n", c->dev->name);
c->rx_overrun++;
}
- if(stat&CRC_ERR)
- {
+ if (stat & CRC_ERR) {
c->rx_crc_err++;
/* printk("crc error\n"); */
}
/* Shove the frame upstream */
- }
- else
- {
- /*
- * Drop the lock for RX processing, or
- * there are deadlocks
- */
+ } else {
+ /* Drop the lock for RX processing, or
+ * there are deadlocks
+ */
z8530_rx_done(c);
write_zsctrl(c, RES_Rx_CRC);
}
}
}
- /*
- * Clear irq
+ /* Clear irq
*/
write_zsctrl(c, ERR_RES);
write_zsctrl(c, RES_H_IUS);
}
-
/**
* z8530_tx - Handle a PIO transmit event
* @c: Z8530 channel to process
@@ -402,35 +368,31 @@ static void z8530_rx(struct z8530_channel *c)
* in as possible, its quite possible that we won't keep up with the
* data rate otherwise.
*/
-
+
static void z8530_tx(struct z8530_channel *c)
{
- while(c->txcount) {
+ while (c->txcount) {
/* FIFO full ? */
- if(!(read_zsreg(c, R0)&4))
+ if (!(read_zsreg(c, R0) & 4))
return;
c->txcount--;
- /*
- * Shovel out the byte
+ /* Shovel out the byte
*/
write_zsreg(c, R8, *c->tx_ptr++);
write_zsctrl(c, RES_H_IUS);
/* We are about to underflow */
- if(c->txcount==0)
- {
+ if (c->txcount == 0) {
write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
}
}
-
- /*
- * End of frame TX - fire another one
+ /* End of frame TX - fire another one
*/
-
+
write_zsctrl(c, RES_Tx_P);
- z8530_tx_done(c);
+ z8530_tx_done(c);
write_zsctrl(c, RES_H_IUS);
}
@@ -460,8 +422,7 @@ static void z8530_status(struct z8530_channel *chan)
z8530_tx_done(chan);
}
- if (altered & chan->dcdcheck)
- {
+ if (altered & chan->dcdcheck) {
if (status & chan->dcdcheck) {
pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -474,7 +435,6 @@ static void z8530_status(struct z8530_channel *chan)
if (chan->netdevice)
netif_carrier_off(chan->netdevice);
}
-
}
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
@@ -485,7 +445,6 @@ struct z8530_irqhandler z8530_sync = {
.tx = z8530_tx,
.status = z8530_status,
};
-
EXPORT_SYMBOL(z8530_sync);
/**
@@ -497,31 +456,27 @@ EXPORT_SYMBOL(z8530_sync);
* events are handled by the DMA hardware. We get a kick here only if
* a frame ended.
*/
-
+
static void z8530_dma_rx(struct z8530_channel *chan)
{
- if(chan->rxdma_on)
- {
+ if (chan->rxdma_on) {
/* Special condition check only */
u8 status;
-
+
read_zsreg(chan, R7);
read_zsreg(chan, R6);
-
- status=read_zsreg(chan, R1);
-
- if(status&END_FR)
- {
+
+ status = read_zsreg(chan, R1);
+
+ if (status & END_FR)
z8530_rx_done(chan); /* Fire up the next one */
- }
+
write_zsctrl(chan, ERR_RES);
write_zsctrl(chan, RES_H_IUS);
- }
- else
- {
+ } else {
/* DMA is off right now, drain the slow way */
z8530_rx(chan);
- }
+ }
}
/**
@@ -531,11 +486,9 @@ static void z8530_dma_rx(struct z8530_channel *chan)
* We have received an interrupt while doing DMA transmissions. It
* shouldn't happen. Scream loudly if it does.
*/
-
static void z8530_dma_tx(struct z8530_channel *chan)
{
- if(!chan->dma_tx)
- {
+ if (!chan->dma_tx) {
pr_warn("Hey who turned the DMA off?\n");
z8530_tx(chan);
return;
@@ -548,40 +501,35 @@ static void z8530_dma_tx(struct z8530_channel *chan)
/**
* z8530_dma_status - Handle a DMA status exception
* @chan: Z8530 channel to process
- *
+ *
* A status event occurred on the Z8530. We receive these for two reasons
* when in DMA mode. Firstly if we finished a packet transfer we get one
* and kick the next packet out. Secondly we may see a DCD change.
*
*/
-
static void z8530_dma_status(struct z8530_channel *chan)
{
u8 status, altered;
- status=read_zsreg(chan, R0);
- altered=chan->status^status;
-
- chan->status=status;
+ status = read_zsreg(chan, R0);
+ altered = chan->status ^ status;
+ chan->status = status;
- if(chan->dma_tx)
- {
- if(status&TxEOM)
- {
+ if (chan->dma_tx) {
+ if (status & TxEOM) {
unsigned long flags;
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(chan->txdma);
- clear_dma_ff(chan->txdma);
- chan->txdma_on=0;
+ clear_dma_ff(chan->txdma);
+ chan->txdma_on = 0;
release_dma_lock(flags);
z8530_tx_done(chan);
}
}
- if (altered & chan->dcdcheck)
- {
+ if (altered & chan->dcdcheck) {
if (status & chan->dcdcheck) {
pr_info("%s: DCD raised\n", chan->dev->name);
write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
@@ -621,21 +569,18 @@ static struct z8530_irqhandler z8530_txdma_sync = {
* (eg the MacII) we must clear the interrupt cause or die.
*/
-
static void z8530_rx_clear(struct z8530_channel *c)
{
- /*
- * Data and status bytes
+ /* Data and status bytes
*/
u8 stat;
read_zsdata(c);
- stat=read_zsreg(c, R1);
-
- if(stat&END_FR)
+ stat = read_zsreg(c, R1);
+
+ if (stat & END_FR)
write_zsctrl(c, RES_Rx_CRC);
- /*
- * Clear irq
+ /* Clear irq
*/
write_zsctrl(c, ERR_RES);
write_zsctrl(c, RES_H_IUS);
@@ -667,8 +612,9 @@ static void z8530_tx_clear(struct z8530_channel *c)
static void z8530_status_clear(struct z8530_channel *chan)
{
- u8 status=read_zsreg(chan, R0);
- if(status&TxEOM)
+ u8 status = read_zsreg(chan, R0);
+
+ if (status & TxEOM)
write_zsctrl(chan, ERR_RES);
write_zsctrl(chan, RES_EXT_INT);
write_zsctrl(chan, RES_H_IUS);
@@ -679,13 +625,11 @@ struct z8530_irqhandler z8530_nop = {
.tx = z8530_tx_clear,
.status = z8530_status_clear,
};
-
-
EXPORT_SYMBOL(z8530_nop);
/**
* z8530_interrupt - Handle an interrupt from a Z8530
- * @irq: Interrupt number
+ * @irq: Interrupt number
* @dev_id: The Z8530 device that is interrupting.
*
* A Z85[2]30 device has stuck its hand in the air for attention.
@@ -701,78 +645,73 @@ EXPORT_SYMBOL(z8530_nop);
irqreturn_t z8530_interrupt(int irq, void *dev_id)
{
- struct z8530_dev *dev=dev_id;
+ struct z8530_dev *dev = dev_id;
u8 intr;
static volatile int locker=0;
- int work=0;
+ int work = 0;
struct z8530_irqhandler *irqs;
-
- if(locker)
- {
+
+ if (locker) {
pr_err("IRQ re-enter\n");
return IRQ_NONE;
}
- locker=1;
+ locker = 1;
spin_lock(&dev->lock);
- while(++work<5000)
- {
-
+ while (++work < 5000) {
intr = read_zsreg(&dev->chanA, R3);
- if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
+ if (!(intr &
+ (CHARxIP | CHATxIP | CHAEXT | CHBRxIP | CHBTxIP | CHBEXT)))
break;
-
- /* This holds the IRQ status. On the 8530 you must read it from chan
- A even though it applies to the whole chip */
-
+
+ /* This holds the IRQ status. On the 8530 you must read it
+ * from chan A even though it applies to the whole chip
+ */
+
/* Now walk the chip and see what it is wanting - it may be
- an IRQ for someone else remember */
-
- irqs=dev->chanA.irqs;
+ * an IRQ for someone else remember
+ */
+
+ irqs = dev->chanA.irqs;
- if(intr & (CHARxIP|CHATxIP|CHAEXT))
- {
- if(intr&CHARxIP)
+ if (intr & (CHARxIP | CHATxIP | CHAEXT)) {
+ if (intr & CHARxIP)
irqs->rx(&dev->chanA);
- if(intr&CHATxIP)
+ if (intr & CHATxIP)
irqs->tx(&dev->chanA);
- if(intr&CHAEXT)
+ if (intr & CHAEXT)
irqs->status(&dev->chanA);
}
- irqs=dev->chanB.irqs;
+ irqs = dev->chanB.irqs;
- if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
- {
- if(intr&CHBRxIP)
+ if (intr & (CHBRxIP | CHBTxIP | CHBEXT)) {
+ if (intr & CHBRxIP)
irqs->rx(&dev->chanB);
- if(intr&CHBTxIP)
+ if (intr & CHBTxIP)
irqs->tx(&dev->chanB);
- if(intr&CHBEXT)
+ if (intr & CHBEXT)
irqs->status(&dev->chanB);
}
}
spin_unlock(&dev->lock);
- if(work==5000)
+ if (work == 5000)
pr_err("%s: interrupt jammed - abort(0x%X)!\n",
dev->name, intr);
/* Ok all done */
- locker=0;
+ locker = 0;
return IRQ_HANDLED;
}
-
EXPORT_SYMBOL(z8530_interrupt);
-static const u8 reg_init[16]=
-{
- 0,0,0,0,
- 0,0,0,0,
- 0,0,0,0,
- 0x55,0,0,0
+static const u8 reg_init[16] = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0x55, 0, 0, 0
};
-
/**
* z8530_sync_open - Open a Z8530 channel for PIO
* @dev: The network interface we are using
@@ -781,7 +720,6 @@ static const u8 reg_init[16]=
* Switch a Z8530 into synchronous mode without DMA assist. We
* raise the RTS/DTR and commence network operation.
*/
-
int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
{
unsigned long flags;
@@ -789,7 +727,7 @@ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
spin_lock_irqsave(c->lock, flags);
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
@@ -798,17 +736,15 @@ int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
/* This loads the double buffer up */
z8530_rx_done(c); /* Load the frame ring */
z8530_rx_done(c); /* Load the backup frame */
- z8530_rtsdtr(c,1);
+ z8530_rtsdtr(c, 1);
c->dma_tx = 0;
- c->regs[R1]|=TxINT_ENAB;
+ c->regs[R1] |= TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
-
EXPORT_SYMBOL(z8530_sync_open);
/**
@@ -819,25 +755,23 @@ EXPORT_SYMBOL(z8530_sync_open);
* Close down a Z8530 interface and switch its interrupt handlers
* to discard future events.
*/
-
int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
{
u8 chk;
unsigned long flags;
-
+
spin_lock_irqsave(c->lock, flags);
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- chk=read_zsreg(c,R0);
+
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_close);
/**
@@ -849,91 +783,83 @@ EXPORT_SYMBOL(z8530_sync_close);
* ISA DMA channels must be available for this to work. We assume ISA
* DMA driven I/O and PC limits on access.
*/
-
int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
{
unsigned long cflags, dflags;
-
+
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
- /*
- * Load the DMA interfaces up
+
+ /* Load the DMA interfaces up
*/
c->rxdma_on = 0;
c->txdma_on = 0;
-
- /*
- * Allocate the DMA flip buffers. Limit by page size.
+
+ /* Allocate the DMA flip buffers. Limit by page size.
* Everyone runs 1500 mtu or less on wan links so this
* should be fine.
*/
-
- if(c->mtu > PAGE_SIZE/2)
+
+ if (c->mtu > PAGE_SIZE / 2)
return -EMSGSIZE;
-
- c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->rx_buf[0]==NULL)
+
+ c->rx_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->rx_buf[0])
return -ENOBUFS;
- c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
-
- c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->tx_dma_buf[0]==NULL)
- {
+ c->rx_buf[1] = c->rx_buf[0] + PAGE_SIZE / 2;
+
+ c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->tx_dma_buf[0]) {
free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0]=NULL;
+ c->rx_buf[0] = NULL;
return -ENOBUFS;
}
- c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
+ c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
- c->tx_dma_used=0;
+ c->tx_dma_used = 0;
c->dma_tx = 1;
- c->dma_num=0;
- c->dma_ready=1;
-
- /*
- * Enable DMA control mode
+ c->dma_num = 0;
+ c->dma_ready = 1;
+
+ /* Enable DMA control mode
*/
spin_lock_irqsave(c->lock, cflags);
-
- /*
- * TX DMA via DIR/REQ
+
+ /* TX DMA via DIR/REQ
+ */
+
+ c->regs[R14] |= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1] &= ~TxINT_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+
+ /* RX DMA via W/Req
*/
-
- c->regs[R14]|= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R1] |= WT_FN_RDYFN;
+ c->regs[R1] |= WT_RDY_RT;
+ c->regs[R1] |= INT_ERR_Rx;
+ c->regs[R1] &= ~TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * RX DMA via W/Req
- */
-
- c->regs[R1]|= WT_FN_RDYFN;
- c->regs[R1]|= WT_RDY_RT;
- c->regs[R1]|= INT_ERR_Rx;
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R1] |= WT_RDY_ENAB;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]|= WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * DMA interrupts
+
+ /* DMA interrupts
+ */
+
+ /* Set up the DMA configuration
*/
-
- /*
- * Set up the DMA configuration
- */
-
- dflags=claim_dma_lock();
-
+
+ dflags = claim_dma_lock();
+
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
- set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+ set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
set_dma_count(c->rxdma, c->mtu);
enable_dma(c->rxdma);
@@ -942,26 +868,24 @@ int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
clear_dma_ff(c->txdma);
set_dma_mode(c->txdma, DMA_MODE_WRITE);
disable_dma(c->txdma);
-
+
release_dma_lock(dflags);
-
- /*
- * Select the DMA interrupt handlers
+
+ /* Select the DMA interrupt handlers
*/
c->rxdma_on = 1;
c->txdma_on = 1;
c->tx_dma_used = 1;
-
+
c->irqs = &z8530_dma_sync;
- z8530_rtsdtr(c,1);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ z8530_rtsdtr(c, 1);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, cflags);
-
+
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_dma_open);
/**
@@ -972,66 +896,60 @@ EXPORT_SYMBOL(z8530_sync_dma_open);
* Shut down a DMA mode synchronous interface. Halt the DMA, and
* free the buffers.
*/
-
int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
{
u8 chk;
unsigned long flags;
-
+
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- /*
- * Disable the PC DMA channels
+
+ /* Disable the PC DMA channels
*/
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
-
+
c->rxdma_on = 0;
-
+
disable_dma(c->txdma);
clear_dma_ff(c->txdma);
release_dma_lock(flags);
-
+
c->txdma_on = 0;
c->tx_dma_used = 0;
spin_lock_irqsave(c->lock, flags);
- /*
- * Disable DMA control mode
+ /* Disable DMA control mode
*/
-
- c->regs[R1]&= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
- c->regs[R1]|= INT_ALL_Rx;
+
+ c->regs[R1] &= ~WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
+ c->regs[R1] |= INT_ALL_Rx;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14]&= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if(c->rx_buf[0])
- {
+ c->regs[R14] &= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if (c->rx_buf[0]) {
free_page((unsigned long)c->rx_buf[0]);
- c->rx_buf[0]=NULL;
+ c->rx_buf[0] = NULL;
}
- if(c->tx_dma_buf[0])
- {
+ if (c->tx_dma_buf[0]) {
free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0]=NULL;
+ c->tx_dma_buf[0] = NULL;
}
- chk=read_zsreg(c,R0);
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_dma_close);
/**
@@ -1050,65 +968,58 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
printk("Opening sync interface for TX-DMA\n");
c->sync = 1;
- c->mtu = dev->mtu+64;
+ c->mtu = dev->mtu + 64;
c->count = 0;
c->skb = NULL;
c->skb2 = NULL;
-
- /*
- * Allocate the DMA flip buffers. Limit by page size.
+
+ /* Allocate the DMA flip buffers. Limit by page size.
* Everyone runs 1500 mtu or less on wan links so this
* should be fine.
*/
-
- if(c->mtu > PAGE_SIZE/2)
+
+ if (c->mtu > PAGE_SIZE / 2)
return -EMSGSIZE;
-
- c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if(c->tx_dma_buf[0]==NULL)
- return -ENOBUFS;
- c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
+ c->tx_dma_buf[0] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!c->tx_dma_buf[0])
+ return -ENOBUFS;
+ c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE / 2;
spin_lock_irqsave(c->lock, cflags);
- /*
- * Load the PIO receive ring
+ /* Load the PIO receive ring
*/
z8530_rx_done(c);
z8530_rx_done(c);
- /*
- * Load the DMA interfaces up
+ /* Load the DMA interfaces up
*/
c->rxdma_on = 0;
c->txdma_on = 0;
-
- c->tx_dma_used=0;
- c->dma_num=0;
- c->dma_ready=1;
+
+ c->tx_dma_used = 0;
+ c->dma_num = 0;
+ c->dma_ready = 1;
c->dma_tx = 1;
- /*
- * Enable DMA control mode
+ /* Enable DMA control mode
*/
- /*
- * TX DMA via DIR/REQ
+ /* TX DMA via DIR/REQ
*/
- c->regs[R14]|= DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- c->regs[R1]&= ~TxINT_ENAB;
+ c->regs[R14] |= DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ c->regs[R1] &= ~TxINT_ENAB;
write_zsreg(c, R1, c->regs[R1]);
-
- /*
- * Set up the DMA configuration
- */
-
+
+ /* Set up the DMA configuration
+ */
+
dflags = claim_dma_lock();
disable_dma(c->txdma);
@@ -1117,23 +1028,21 @@ int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
disable_dma(c->txdma);
release_dma_lock(dflags);
-
- /*
- * Select the DMA interrupt handlers
+
+ /* Select the DMA interrupt handlers
*/
c->rxdma_on = 0;
c->txdma_on = 1;
c->tx_dma_used = 1;
-
+
c->irqs = &z8530_txdma_sync;
- z8530_rtsdtr(c,1);
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ z8530_rtsdtr(c, 1);
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, cflags);
-
+
return 0;
}
-
EXPORT_SYMBOL(z8530_sync_txdma_open);
/**
@@ -1141,7 +1050,7 @@ EXPORT_SYMBOL(z8530_sync_txdma_open);
* @dev: Network device to detach
* @c: Z8530 channel to move into discard mode
*
- * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
+ * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
* and free the buffers.
*/
@@ -1150,17 +1059,15 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
unsigned long dflags, cflags;
u8 chk;
-
spin_lock_irqsave(c->lock, cflags);
-
+
c->irqs = &z8530_nop;
c->max = 0;
c->sync = 0;
-
- /*
- * Disable the PC DMA channels
+
+ /* Disable the PC DMA channels
*/
-
+
dflags = claim_dma_lock();
disable_dma(c->txdma);
@@ -1170,41 +1077,34 @@ int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
release_dma_lock(dflags);
- /*
- * Disable DMA control mode
+ /* Disable DMA control mode
*/
-
- c->regs[R1]&= ~WT_RDY_ENAB;
- write_zsreg(c, R1, c->regs[R1]);
- c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
- c->regs[R1]|= INT_ALL_Rx;
+
+ c->regs[R1] &= ~WT_RDY_ENAB;
+ write_zsreg(c, R1, c->regs[R1]);
+ c->regs[R1] &= ~(WT_RDY_RT | WT_FN_RDYFN | INT_ERR_Rx);
+ c->regs[R1] |= INT_ALL_Rx;
write_zsreg(c, R1, c->regs[R1]);
- c->regs[R14]&= ~DTRREQ;
- write_zsreg(c, R14, c->regs[R14]);
-
- if(c->tx_dma_buf[0])
- {
+ c->regs[R14] &= ~DTRREQ;
+ write_zsreg(c, R14, c->regs[R14]);
+
+ if (c->tx_dma_buf[0]) {
free_page((unsigned long)c->tx_dma_buf[0]);
- c->tx_dma_buf[0]=NULL;
+ c->tx_dma_buf[0] = NULL;
}
- chk=read_zsreg(c,R0);
+ chk = read_zsreg(c, R0);
write_zsreg(c, R3, c->regs[R3]);
- z8530_rtsdtr(c,0);
+ z8530_rtsdtr(c, 0);
spin_unlock_irqrestore(c->lock, cflags);
return 0;
}
-
-
EXPORT_SYMBOL(z8530_sync_txdma_close);
-
-/*
- * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
+/* Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
* it exists...
*/
-
-static const char *z8530_type_name[]={
+static const char * const z8530_type_name[] = {
"Z8530",
"Z85C30",
"Z85230"
@@ -1224,78 +1124,71 @@ static const char *z8530_type_name[]={
void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
{
pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
- dev->name,
+ dev->name,
z8530_type_name[dev->type],
mapping,
Z8530_PORT_OF(io),
dev->irq);
}
-
EXPORT_SYMBOL(z8530_describe);
-/*
- * Locked operation part of the z8530 init code
+/* Locked operation part of the z8530 init code
*/
-
static inline int do_z8530_init(struct z8530_dev *dev)
{
/* NOP the interrupt handlers first - we might get a
- floating IRQ transition when we reset the chip */
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
- dev->chanA.dcdcheck=DCD;
- dev->chanB.dcdcheck=DCD;
+ * floating IRQ transition when we reset the chip
+ */
+ dev->chanA.irqs = &z8530_nop;
+ dev->chanB.irqs = &z8530_nop;
+ dev->chanA.dcdcheck = DCD;
+ dev->chanB.dcdcheck = DCD;
/* Reset the chip */
write_zsreg(&dev->chanA, R9, 0xC0);
udelay(200);
/* Now check its valid */
write_zsreg(&dev->chanA, R12, 0xAA);
- if(read_zsreg(&dev->chanA, R12)!=0xAA)
+ if (read_zsreg(&dev->chanA, R12) != 0xAA)
return -ENODEV;
write_zsreg(&dev->chanA, R12, 0x55);
- if(read_zsreg(&dev->chanA, R12)!=0x55)
+ if (read_zsreg(&dev->chanA, R12) != 0x55)
return -ENODEV;
-
- dev->type=Z8530;
-
- /*
- * See the application note.
+
+ dev->type = Z8530;
+
+ /* See the application note.
*/
-
+
write_zsreg(&dev->chanA, R15, 0x01);
-
- /*
- * If we can set the low bit of R15 then
+
+ /* If we can set the low bit of R15 then
* the chip is enhanced.
*/
-
- if(read_zsreg(&dev->chanA, R15)==0x01)
- {
+
+ if (read_zsreg(&dev->chanA, R15) == 0x01) {
/* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
/* Put a char in the fifo */
write_zsreg(&dev->chanA, R8, 0);
- if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
+ if (read_zsreg(&dev->chanA, R0) & Tx_BUF_EMP)
dev->type = Z85230; /* Has a FIFO */
else
dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
}
-
- /*
- * The code assumes R7' and friends are
+
+ /* The code assumes R7' and friends are
* off. Use write_zsext() for these and keep
* this bit clear.
*/
-
+
write_zsreg(&dev->chanA, R15, 0);
-
- /*
- * At this point it looks like the chip is behaving
+
+ /* At this point it looks like the chip is behaving
*/
-
+
memcpy(dev->chanA.regs, reg_init, 16);
- memcpy(dev->chanB.regs, reg_init ,16);
-
+ memcpy(dev->chanB.regs, reg_init, 16);
+
return 0;
}
@@ -1332,36 +1225,32 @@ int z8530_init(struct z8530_dev *dev)
return ret;
}
-
-
EXPORT_SYMBOL(z8530_init);
/**
* z8530_shutdown - Shutdown a Z8530 device
* @dev: The Z8530 chip to shutdown
*
- * We set the interrupt handlers to silence any interrupts. We then
+ * We set the interrupt handlers to silence any interrupts. We then
* reset the chip and wait 100uS to be sure the reset completed. Just
* in case the caller then tries to do stuff.
*
* This is called without the lock held
*/
-
int z8530_shutdown(struct z8530_dev *dev)
{
unsigned long flags;
/* Reset the chip */
spin_lock_irqsave(&dev->lock, flags);
- dev->chanA.irqs=&z8530_nop;
- dev->chanB.irqs=&z8530_nop;
+ dev->chanA.irqs = &z8530_nop;
+ dev->chanB.irqs = &z8530_nop;
write_zsreg(&dev->chanA, R9, 0xC0);
/* We must lock the udelay, the chip is offlimits here */
udelay(100);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_shutdown);
/**
@@ -1370,7 +1259,7 @@ EXPORT_SYMBOL(z8530_shutdown);
* @rtable: table of register, value pairs
* FIXME: ioctl to allow user uploaded tables
*
- * Load a Z8530 channel up from the system data. We use +16 to
+ * Load a Z8530 channel up from the system data. We use +16 to
* indicate the "prime" registers. The value 255 terminates the
* table.
*/
@@ -1381,41 +1270,39 @@ int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
spin_lock_irqsave(c->lock, flags);
- while(*rtable!=255)
- {
- int reg=*rtable++;
- if(reg>0x0F)
- write_zsreg(c, R15, c->regs[15]|1);
- write_zsreg(c, reg&0x0F, *rtable);
- if(reg>0x0F)
- write_zsreg(c, R15, c->regs[15]&~1);
- c->regs[reg]=*rtable++;
+ while (*rtable != 255) {
+ int reg = *rtable++;
+
+ if (reg > 0x0F)
+ write_zsreg(c, R15, c->regs[15] | 1);
+ write_zsreg(c, reg & 0x0F, *rtable);
+ if (reg > 0x0F)
+ write_zsreg(c, R15, c->regs[15] & ~1);
+ c->regs[reg] = *rtable++;
}
- c->rx_function=z8530_null_rx;
- c->skb=NULL;
- c->tx_skb=NULL;
- c->tx_next_skb=NULL;
- c->mtu=1500;
- c->max=0;
- c->count=0;
- c->status=read_zsreg(c, R0);
- c->sync=1;
- write_zsreg(c, R3, c->regs[R3]|RxENABLE);
+ c->rx_function = z8530_null_rx;
+ c->skb = NULL;
+ c->tx_skb = NULL;
+ c->tx_next_skb = NULL;
+ c->mtu = 1500;
+ c->max = 0;
+ c->count = 0;
+ c->status = read_zsreg(c, R0);
+ c->sync = 1;
+ write_zsreg(c, R3, c->regs[R3] | RxENABLE);
spin_unlock_irqrestore(c->lock, flags);
return 0;
}
-
EXPORT_SYMBOL(z8530_channel_load);
-
/**
* z8530_tx_begin - Begin packet transmission
* @c: The Z8530 channel to kick
*
* This is the speed sensitive side of transmission. If we are called
* and no buffer is being transmitted we commence the next buffer. If
- * nothing is queued we idle the sync.
+ * nothing is queued we idle the sync.
*
* Note: We are handling this code path in the interrupt path, keep it
* fast or bad things will happen.
@@ -1426,85 +1313,68 @@ EXPORT_SYMBOL(z8530_channel_load);
static void z8530_tx_begin(struct z8530_channel *c)
{
unsigned long flags;
- if(c->tx_skb)
+
+ if (c->tx_skb)
return;
-
- c->tx_skb=c->tx_next_skb;
- c->tx_next_skb=NULL;
- c->tx_ptr=c->tx_next_ptr;
-
- if(c->tx_skb==NULL)
- {
+
+ c->tx_skb = c->tx_next_skb;
+ c->tx_next_skb = NULL;
+ c->tx_ptr = c->tx_next_ptr;
+
+ if (!c->tx_skb) {
/* Idle on */
- if(c->dma_tx)
- {
- flags=claim_dma_lock();
+ if (c->dma_tx) {
+ flags = claim_dma_lock();
disable_dma(c->txdma);
- /*
- * Check if we crapped out.
+ /* Check if we crapped out.
*/
- if (get_dma_residue(c->txdma))
- {
+ if (get_dma_residue(c->txdma)) {
c->netdevice->stats.tx_dropped++;
c->netdevice->stats.tx_fifo_errors++;
}
release_dma_lock(flags);
}
- c->txcount=0;
- }
- else
- {
- c->txcount=c->tx_skb->len;
-
-
- if(c->dma_tx)
- {
- /*
- * FIXME. DMA is broken for the original 8530,
+ c->txcount = 0;
+ } else {
+ c->txcount = c->tx_skb->len;
+
+ if (c->dma_tx) {
+ /* FIXME. DMA is broken for the original 8530,
* on the older parts we need to set a flag and
* wait for a further TX interrupt to fire this
- * stage off
+ * stage off
*/
-
- flags=claim_dma_lock();
+
+ flags = claim_dma_lock();
disable_dma(c->txdma);
- /*
- * These two are needed by the 8530/85C30
+ /* These two are needed by the 8530/85C30
* and must be issued when idling.
*/
-
- if(c->dev->type!=Z85230)
- {
+ if (c->dev->type != Z85230) {
write_zsctrl(c, RES_Tx_CRC);
write_zsctrl(c, RES_EOM_L);
- }
- write_zsreg(c, R10, c->regs[10]&~ABUNDER);
+ }
+ write_zsreg(c, R10, c->regs[10] & ~ABUNDER);
clear_dma_ff(c->txdma);
set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
set_dma_count(c->txdma, c->txcount);
enable_dma(c->txdma);
release_dma_lock(flags);
write_zsctrl(c, RES_EOM_L);
- write_zsreg(c, R5, c->regs[R5]|TxENAB);
- }
- else
- {
-
+ write_zsreg(c, R5, c->regs[R5] | TxENAB);
+ } else {
/* ABUNDER off */
write_zsreg(c, R10, c->regs[10]);
write_zsctrl(c, RES_Tx_CRC);
-
- while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
- {
+
+ while (c->txcount && (read_zsreg(c, R0) & Tx_BUF_EMP)) {
write_zsreg(c, R8, *c->tx_ptr++);
c->txcount--;
}
-
}
}
- /*
- * Since we emptied tx_skb we can ask for more
+ /* Since we emptied tx_skb we can ask for more
*/
netif_wake_queue(c->netdevice);
}
@@ -1525,7 +1395,7 @@ static void z8530_tx_done(struct z8530_channel *c)
struct sk_buff *skb;
/* Actually this can happen.*/
- if (c->tx_skb == NULL)
+ if (!c->tx_skb)
return;
skb = c->tx_skb;
@@ -1544,12 +1414,10 @@ static void z8530_tx_done(struct z8530_channel *c)
* We point the receive handler at this function when idle. Instead
* of processing the frames we get to throw them away.
*/
-
void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
{
dev_kfree_skb_any(skb);
}
-
EXPORT_SYMBOL(z8530_null_rx);
/**
@@ -1564,67 +1432,58 @@ EXPORT_SYMBOL(z8530_null_rx);
*
* Called with the lock held
*/
-
static void z8530_rx_done(struct z8530_channel *c)
{
struct sk_buff *skb;
int ct;
-
- /*
- * Is our receive engine in DMA mode
+
+ /* Is our receive engine in DMA mode
*/
-
- if(c->rxdma_on)
- {
- /*
- * Save the ready state and the buffer currently
+ if (c->rxdma_on) {
+ /* Save the ready state and the buffer currently
* being used as the DMA target
*/
-
- int ready=c->dma_ready;
- unsigned char *rxb=c->rx_buf[c->dma_num];
+ int ready = c->dma_ready;
+ unsigned char *rxb = c->rx_buf[c->dma_num];
unsigned long flags;
-
- /*
- * Complete this DMA. Necessary to find the length
- */
-
- flags=claim_dma_lock();
-
+
+ /* Complete this DMA. Necessary to find the length
+ */
+ flags = claim_dma_lock();
+
disable_dma(c->rxdma);
clear_dma_ff(c->rxdma);
- c->rxdma_on=0;
- ct=c->mtu-get_dma_residue(c->rxdma);
- if(ct<0)
- ct=2; /* Shit happens.. */
- c->dma_ready=0;
-
- /*
- * Normal case: the other slot is free, start the next DMA
+ c->rxdma_on = 0;
+ ct = c->mtu - get_dma_residue(c->rxdma);
+ if (ct < 0)
+ ct = 2; /* Shit happens.. */
+ c->dma_ready = 0;
+
+ /* Normal case: the other slot is free, start the next DMA
* into it immediately.
*/
-
- if(ready)
- {
- c->dma_num^=1;
- set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
+
+ if (ready) {
+ c->dma_num ^= 1;
+ set_dma_mode(c->rxdma, DMA_MODE_READ | 0x10);
set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
set_dma_count(c->rxdma, c->mtu);
c->rxdma_on = 1;
enable_dma(c->rxdma);
- /* Stop any frames that we missed the head of
- from passing */
+ /* Stop any frames that we missed the head of
+ * from passing
+ */
write_zsreg(c, R0, RES_Rx_CRC);
- }
- else
+ } else {
/* Can't occur as we dont reenable the DMA irq until
- after the flip is done */
+ * after the flip is done
+ */
netdev_warn(c->netdevice, "DMA flip overrun!\n");
+ }
release_dma_lock(flags);
- /*
- * Shove the old buffer into an sk_buff. We can't DMA
+ /* Shove the old buffer into an sk_buff. We can't DMA
* directly into one on a PC - it might be above the 16Mb
* boundary. Optimisation - we could check to see if we
* can avoid the copy. Optimisation 2 - make the memcpy
@@ -1632,7 +1491,7 @@ static void z8530_rx_done(struct z8530_channel *c)
*/
skb = dev_alloc_skb(ct);
- if (skb == NULL) {
+ if (!skb) {
c->netdevice->stats.rx_dropped++;
netdev_warn(c->netdevice, "Memory squeeze\n");
} else {
@@ -1646,8 +1505,7 @@ static void z8530_rx_done(struct z8530_channel *c)
RT_LOCK;
skb = c->skb;
- /*
- * The game we play for non DMA is similar. We want to
+ /* The game we play for non DMA is similar. We want to
* get the controller set up for the next packet as fast
* as possible. We potentially only have one byte + the
* fifo length for this. Thus we want to flip to the new
@@ -1658,7 +1516,7 @@ static void z8530_rx_done(struct z8530_channel *c)
* sync IRQ for the RT_LOCK area.
*
*/
- ct=c->count;
+ ct = c->count;
c->skb = c->skb2;
c->count = 0;
@@ -1673,15 +1531,13 @@ static void z8530_rx_done(struct z8530_channel *c)
RT_UNLOCK;
c->skb2 = dev_alloc_skb(c->mtu);
- if (c->skb2 == NULL)
- netdev_warn(c->netdevice, "memory squeeze\n");
- else
+ if (c->skb2)
skb_put(c->skb2, c->mtu);
+
c->netdevice->stats.rx_packets++;
c->netdevice->stats.rx_bytes += ct;
}
- /*
- * If we received a frame we must now process it.
+ /* If we received a frame we must now process it.
*/
if (skb) {
skb_trim(skb, ct);
@@ -1702,9 +1558,10 @@ static void z8530_rx_done(struct z8530_channel *c)
static inline int spans_boundary(struct sk_buff *skb)
{
- unsigned long a=(unsigned long)skb->data;
- a^=(a+skb->len);
- if(a&0x00010000) /* If the 64K bit is different.. */
+ unsigned long a = (unsigned long)skb->data;
+
+ a ^= (a + skb->len);
+ if (a & 0x00010000) /* If the 64K bit is different.. */
return 1;
return 0;
}
@@ -1715,60 +1572,54 @@ static inline int spans_boundary(struct sk_buff *skb)
* @skb: The packet to kick down the channel
*
* Queue a packet for transmission. Because we have rather
- * hard to hit interrupt latencies for the Z85230 per packet
+ * hard to hit interrupt latencies for the Z85230 per packet
* even in DMA mode we do the flip to DMA buffer if needed here
* not in the IRQ.
*
- * Called from the network code. The lock is not held at this
+ * Called from the network code. The lock is not held at this
* point.
*/
-
netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
{
unsigned long flags;
-
+
netif_stop_queue(c->netdevice);
- if(c->tx_next_skb)
+ if (c->tx_next_skb)
return NETDEV_TX_BUSY;
-
/* PC SPECIFIC - DMA limits */
-
- /*
- * If we will DMA the transmit and its gone over the ISA bus
+ /* If we will DMA the transmit and its gone over the ISA bus
* limit, then copy to the flip buffer
*/
-
- if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
- {
- /*
- * Send the flip buffer, and flip the flippy bit.
+
+ if (c->dma_tx &&
+ ((unsigned long)(virt_to_bus(skb->data + skb->len)) >=
+ 16 * 1024 * 1024 || spans_boundary(skb))) {
+ /* Send the flip buffer, and flip the flippy bit.
* We don't care which is used when just so long as
* we never use the same buffer twice in a row. Since
* only one buffer can be going out at a time the other
* has to be safe.
*/
- c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
- c->tx_dma_used^=1; /* Flip temp buffer */
+ c->tx_next_ptr = c->tx_dma_buf[c->tx_dma_used];
+ c->tx_dma_used ^= 1; /* Flip temp buffer */
skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
+ } else {
+ c->tx_next_ptr = skb->data;
}
- else
- c->tx_next_ptr=skb->data;
RT_LOCK;
- c->tx_next_skb=skb;
+ c->tx_next_skb = skb;
RT_UNLOCK;
-
+
spin_lock_irqsave(c->lock, flags);
z8530_tx_begin(c);
spin_unlock_irqrestore(c->lock, flags);
-
+
return NETDEV_TX_OK;
}
-
EXPORT_SYMBOL(z8530_queue_xmit);
-/*
- * Module support
+/* Module support
*/
static const char banner[] __initconst =
KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile
index fc52b2cb500b..dbe1f8514efc 100644
--- a/drivers/net/wireguard/Makefile
+++ b/drivers/net/wireguard/Makefile
@@ -1,5 +1,4 @@
-ccflags-y := -O3
-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
+ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt'
ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG
wireguard-y := main.o
wireguard-y += noise.o
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 3725e9cd85f4..b7197e80f226 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -6,6 +6,8 @@
#include "allowedips.h"
#include "peer.h"
+static struct kmem_cache *node_cache;
+
static void swap_endian(u8 *dst, const u8 *src, u8 bits)
{
if (bits == 32) {
@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src,
node->bitlen = bits;
memcpy(node->bits, src, bits / 8U);
}
-#define CHOOSE_NODE(parent, key) \
- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1]
+
+static inline u8 choose(struct allowedips_node *node, const u8 *key)
+{
+ return (key[node->bit_at_a] >> node->bit_at_b) & 1;
+}
static void push_rcu(struct allowedips_node **stack,
struct allowedips_node __rcu *p, unsigned int *len)
@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack,
}
}
+static void node_free_rcu(struct rcu_head *rcu)
+{
+ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu));
+}
+
static void root_free_rcu(struct rcu_head *rcu)
{
struct allowedips_node *node, *stack[128] = {
@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu)
while (len > 0 && (node = stack[--len])) {
push_rcu(stack, node->bit[0], &len);
push_rcu(stack, node->bit[1], &len);
- kfree(node);
+ kmem_cache_free(node_cache, node);
}
}
@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root)
}
}
-static void walk_remove_by_peer(struct allowedips_node __rcu **top,
- struct wg_peer *peer, struct mutex *lock)
-{
-#define REF(p) rcu_access_pointer(p)
-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock))
-#define PUSH(p) ({ \
- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \
- stack[len++] = p; \
- })
-
- struct allowedips_node __rcu **stack[128], **nptr;
- struct allowedips_node *node, *prev;
- unsigned int len;
-
- if (unlikely(!peer || !REF(*top)))
- return;
-
- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) {
- nptr = stack[len - 1];
- node = DEREF(nptr);
- if (!node) {
- --len;
- continue;
- }
- if (!prev || REF(prev->bit[0]) == node ||
- REF(prev->bit[1]) == node) {
- if (REF(node->bit[0]))
- PUSH(&node->bit[0]);
- else if (REF(node->bit[1]))
- PUSH(&node->bit[1]);
- } else if (REF(node->bit[0]) == prev) {
- if (REF(node->bit[1]))
- PUSH(&node->bit[1]);
- } else {
- if (rcu_dereference_protected(node->peer,
- lockdep_is_held(lock)) == peer) {
- RCU_INIT_POINTER(node->peer, NULL);
- list_del_init(&node->peer_list);
- if (!node->bit[0] || !node->bit[1]) {
- rcu_assign_pointer(*nptr, DEREF(
- &node->bit[!REF(node->bit[0])]));
- kfree_rcu(node, rcu);
- node = DEREF(nptr);
- }
- }
- --len;
- }
- }
-
-#undef REF
-#undef DEREF
-#undef PUSH
-}
-
static unsigned int fls128(u64 a, u64 b)
{
return a ? fls64(a) + 64U : fls64(b);
@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits,
found = node;
if (node->cidr == bits)
break;
- node = rcu_dereference_bh(CHOOSE_NODE(node, key));
+ node = rcu_dereference_bh(node->bit[choose(node, key)]);
}
return found;
}
@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
u8 cidr, u8 bits, struct allowedips_node **rnode,
struct mutex *lock)
{
- struct allowedips_node *node = rcu_dereference_protected(trie,
- lockdep_is_held(lock));
+ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock));
struct allowedips_node *parent = NULL;
bool exact = false;
@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key,
exact = true;
break;
}
- node = rcu_dereference_protected(CHOOSE_NODE(parent, key),
- lockdep_is_held(lock));
+ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock));
}
*rnode = parent;
return exact;
}
+static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node)
+{
+ node->parent_bit_packed = (unsigned long)parent | bit;
+ rcu_assign_pointer(*parent, node);
+}
+
+static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node)
+{
+ u8 bit = choose(parent, node->bits);
+ connect_node(&parent->bit[bit], bit, node);
+}
+
static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
u8 cidr, struct wg_peer *peer, struct mutex *lock)
{
@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return -EINVAL;
if (!rcu_access_pointer(*trie)) {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
RCU_INIT_POINTER(node->peer, peer);
list_add_tail(&node->peer_list, &peer->allowedips_list);
copy_and_assign_cidr(node, key, cidr, bits);
- rcu_assign_pointer(*trie, node);
+ connect_node(trie, 2, node);
return 0;
}
if (node_placement(*trie, key, cidr, bits, &node, lock)) {
@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
return 0;
}
- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL);
+ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL);
if (unlikely(!newnode))
return -ENOMEM;
RCU_INIT_POINTER(newnode->peer, peer);
@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
if (!node) {
down = rcu_dereference_protected(*trie, lockdep_is_held(lock));
} else {
- down = rcu_dereference_protected(CHOOSE_NODE(node, key),
- lockdep_is_held(lock));
+ const u8 bit = choose(node, key);
+ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock));
if (!down) {
- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode);
+ connect_node(&node->bit[bit], bit, newnode);
return 0;
}
}
@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key,
parent = node;
if (newnode->cidr == cidr) {
- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down);
+ choose_and_connect_node(newnode, down);
if (!parent)
- rcu_assign_pointer(*trie, newnode);
+ connect_node(trie, 2, newnode);
else
- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits),
- newnode);
- } else {
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (unlikely(!node)) {
- list_del(&newnode->peer_list);
- kfree(newnode);
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&node->peer_list);
- copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+ choose_and_connect_node(parent, newnode);
+ return 0;
+ }
- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down);
- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode);
- if (!parent)
- rcu_assign_pointer(*trie, node);
- else
- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits),
- node);
+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL);
+ if (unlikely(!node)) {
+ list_del(&newnode->peer_list);
+ kmem_cache_free(node_cache, newnode);
+ return -ENOMEM;
}
+ INIT_LIST_HEAD(&node->peer_list);
+ copy_and_assign_cidr(node, newnode->bits, cidr, bits);
+
+ choose_and_connect_node(node, down);
+ choose_and_connect_node(node, newnode);
+ if (!parent)
+ connect_node(trie, 2, node);
+ else
+ choose_and_connect_node(parent, node);
return 0;
}
@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip,
void wg_allowedips_remove_by_peer(struct allowedips *table,
struct wg_peer *peer, struct mutex *lock)
{
+ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp;
+ bool free_parent;
+
+ if (list_empty(&peer->allowedips_list))
+ return;
++table->seq;
- walk_remove_by_peer(&table->root4, peer, lock);
- walk_remove_by_peer(&table->root6, peer, lock);
+ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) {
+ list_del_init(&node->peer_list);
+ RCU_INIT_POINTER(node->peer, NULL);
+ if (node->bit[0] && node->bit[1])
+ continue;
+ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])],
+ lockdep_is_held(lock));
+ if (child)
+ child->parent_bit_packed = node->parent_bit_packed;
+ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL);
+ *parent_bit = child;
+ parent = (void *)parent_bit -
+ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]);
+ free_parent = !rcu_access_pointer(node->bit[0]) &&
+ !rcu_access_pointer(node->bit[1]) &&
+ (node->parent_bit_packed & 3) <= 1 &&
+ !rcu_access_pointer(parent->peer);
+ if (free_parent)
+ child = rcu_dereference_protected(
+ parent->bit[!(node->parent_bit_packed & 1)],
+ lockdep_is_held(lock));
+ call_rcu(&node->rcu, node_free_rcu);
+ if (!free_parent)
+ continue;
+ if (child)
+ child->parent_bit_packed = parent->parent_bit_packed;
+ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child;
+ call_rcu(&parent->rcu, node_free_rcu);
+ }
}
int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr)
@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
return NULL;
}
+int __init wg_allowedips_slab_init(void)
+{
+ node_cache = KMEM_CACHE(allowedips_node, 0);
+ return node_cache ? 0 : -ENOMEM;
+}
+
+void wg_allowedips_slab_uninit(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(node_cache);
+}
+
#include "selftest/allowedips.c"
diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h
index e5c83cafcef4..2346c797eb4d 100644
--- a/drivers/net/wireguard/allowedips.h
+++ b/drivers/net/wireguard/allowedips.h
@@ -15,14 +15,11 @@ struct wg_peer;
struct allowedips_node {
struct wg_peer __rcu *peer;
struct allowedips_node __rcu *bit[2];
- /* While it may seem scandalous that we waste space for v4,
- * we're alloc'ing to the nearest power of 2 anyway, so this
- * doesn't actually make a difference.
- */
- u8 bits[16] __aligned(__alignof(u64));
u8 cidr, bit_at_a, bit_at_b, bitlen;
+ u8 bits[16] __aligned(__alignof(u64));
- /* Keep rarely used list at bottom to be beyond cache line. */
+ /* Keep rarely used members at bottom to be beyond cache line. */
+ unsigned long parent_bit_packed;
union {
struct list_head peer_list;
struct rcu_head rcu;
@@ -33,7 +30,7 @@ struct allowedips {
struct allowedips_node __rcu *root4;
struct allowedips_node __rcu *root6;
u64 seq;
-};
+} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */
void wg_allowedips_init(struct allowedips *table);
void wg_allowedips_free(struct allowedips *table, struct mutex *mutex);
@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table,
bool wg_allowedips_selftest(void);
#endif
+int wg_allowedips_slab_init(void);
+void wg_allowedips_slab_uninit(void);
+
#endif /* _WG_ALLOWEDIPS_H */
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index 7a7d5f1a80fc..75dbe77b0b4b 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -21,13 +21,22 @@ static int __init mod_init(void)
{
int ret;
+ ret = wg_allowedips_slab_init();
+ if (ret < 0)
+ goto err_allowedips;
+
#ifdef DEBUG
+ ret = -ENOTRECOVERABLE;
if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() ||
!wg_ratelimiter_selftest())
- return -ENOTRECOVERABLE;
+ goto err_peer;
#endif
wg_noise_init();
+ ret = wg_peer_init();
+ if (ret < 0)
+ goto err_peer;
+
ret = wg_device_init();
if (ret < 0)
goto err_device;
@@ -44,6 +53,10 @@ static int __init mod_init(void)
err_netlink:
wg_device_uninit();
err_device:
+ wg_peer_uninit();
+err_peer:
+ wg_allowedips_slab_uninit();
+err_allowedips:
return ret;
}
@@ -51,6 +64,8 @@ static void __exit mod_exit(void)
{
wg_genetlink_uninit();
wg_device_uninit();
+ wg_peer_uninit();
+ wg_allowedips_slab_uninit();
}
module_init(mod_init);
diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c
index cd5cb0292cb6..1acd00ab2fbc 100644
--- a/drivers/net/wireguard/peer.c
+++ b/drivers/net/wireguard/peer.c
@@ -15,6 +15,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
+static struct kmem_cache *peer_cache;
static atomic64_t peer_counter = ATOMIC64_INIT(0);
struct wg_peer *wg_peer_create(struct wg_device *wg,
@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
if (wg->num_peers >= MAX_PEERS_PER_DEVICE)
return ERR_PTR(ret);
- peer = kzalloc(sizeof(*peer), GFP_KERNEL);
+ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL);
if (unlikely(!peer))
return ERR_PTR(ret);
- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))
+ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)))
goto err;
peer->device = wg;
@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg,
return peer;
err:
- kfree(peer);
+ kmem_cache_free(peer_cache, peer);
return ERR_PTR(ret);
}
@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer)
/* Mark as dead, so that we don't allow jumping contexts after. */
WRITE_ONCE(peer->is_dead, true);
- /* The caller must now synchronize_rcu() for this to take effect. */
+ /* The caller must now synchronize_net() for this to take effect. */
}
static void peer_remove_after_dead(struct wg_peer *peer)
@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer)
lockdep_assert_held(&peer->device->device_update_lock);
peer_make_dead(peer);
- synchronize_rcu();
+ synchronize_net();
peer_remove_after_dead(peer);
}
@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg)
peer_make_dead(peer);
list_add_tail(&peer->peer_list, &dead_peers);
}
- synchronize_rcu();
+ synchronize_net();
list_for_each_entry_safe(peer, temp, &dead_peers, peer_list)
peer_remove_after_dead(peer);
}
@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu)
/* The final zeroing takes care of clearing any remaining handshake key
* material and other potentially sensitive information.
*/
- kfree_sensitive(peer);
+ memzero_explicit(peer, sizeof(*peer));
+ kmem_cache_free(peer_cache, peer);
}
static void kref_release(struct kref *refcount)
@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer)
return;
kref_put(&peer->refcount, kref_release);
}
+
+int __init wg_peer_init(void)
+{
+ peer_cache = KMEM_CACHE(wg_peer, 0);
+ return peer_cache ? 0 : -ENOMEM;
+}
+
+void wg_peer_uninit(void)
+{
+ kmem_cache_destroy(peer_cache);
+}
diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h
index 8d53b687a1d1..76e4d3128ad4 100644
--- a/drivers/net/wireguard/peer.h
+++ b/drivers/net/wireguard/peer.h
@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer);
void wg_peer_remove(struct wg_peer *peer);
void wg_peer_remove_all(struct wg_device *wg);
+int wg_peer_init(void);
+void wg_peer_uninit(void);
+
#endif /* _WG_PEER_H */
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 846db14cb046..e173204ae7d7 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -19,32 +19,22 @@
#include <linux/siphash.h>
-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits,
- u8 cidr)
-{
- swap_endian(dst, src, bits);
- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8);
- if (cidr)
- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8);
-}
-
static __init void print_node(struct allowedips_node *node, u8 bits)
{
char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n";
- char *fmt_declaration = KERN_DEBUG
- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n";
+ u8 ip1[16], ip2[16], cidr1, cidr2;
char *style = "dotted";
- u8 ip1[16], ip2[16];
u32 color = 0;
+ if (node == NULL)
+ return;
if (bits == 32) {
fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n";
- fmt_declaration = KERN_DEBUG
- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
+ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n";
} else if (bits == 128) {
fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n";
- fmt_declaration = KERN_DEBUG
- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
+ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n";
}
if (node->peer) {
hsiphash_key_t key = { { 0 } };
@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits)
hsiphash_1u32(0xabad1dea, &key) % 200;
style = "bold";
}
- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr);
- printk(fmt_declaration, ip1, node->cidr, style, color);
+ wg_allowedips_read_node(node, ip1, &cidr1);
+ printk(fmt_declaration, ip1, cidr1, style, color);
if (node->bit[0]) {
- swap_endian_and_apply_cidr(ip2,
- rcu_dereference_raw(node->bit[0])->bits, bits,
- node->cidr);
- printk(fmt_connection, ip1, node->cidr, ip2,
- rcu_dereference_raw(node->bit[0])->cidr);
- print_node(rcu_dereference_raw(node->bit[0]), bits);
+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2);
+ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
if (node->bit[1]) {
- swap_endian_and_apply_cidr(ip2,
- rcu_dereference_raw(node->bit[1])->bits,
- bits, node->cidr);
- printk(fmt_connection, ip1, node->cidr, ip2,
- rcu_dereference_raw(node->bit[1])->cidr);
- print_node(rcu_dereference_raw(node->bit[1]), bits);
+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2);
+ printk(fmt_connection, ip1, cidr1, ip2, cidr2);
}
+ if (node->bit[0])
+ print_node(rcu_dereference_raw(node->bit[0]), bits);
+ if (node->bit[1])
+ print_node(rcu_dereference_raw(node->bit[1]), bits);
}
static __init void print_tree(struct allowedips_node __rcu *top, u8 bits)
@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr)
{
union nf_inet_addr mask;
- memset(&mask, 0x00, 128 / 8);
- memset(&mask, 0xff, cidr / 8);
+ memset(&mask, 0, sizeof(mask));
+ memset(&mask.all, 0xff, cidr / 8);
if (cidr % 32)
mask.all[cidr / 32] = (__force u32)htonl(
(0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL);
@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node)
}
static __init inline bool
-horrible_match_v4(const struct horrible_allowedips_node *node,
- struct in_addr *ip)
+horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip)
{
return (ip->s_addr & node->mask.ip) == node->ip.ip;
}
static __init inline bool
-horrible_match_v6(const struct horrible_allowedips_node *node,
- struct in6_addr *ip)
+horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip)
{
- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) ==
- node->ip.ip6[0] &&
- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) ==
- node->ip.ip6[1] &&
- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) ==
- node->ip.ip6[2] &&
+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] &&
+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] &&
+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] &&
(ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3];
}
static __init void
-horrible_insert_ordered(struct horrible_allowedips *table,
- struct horrible_allowedips_node *node)
+horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node)
{
struct horrible_allowedips_node *other = NULL, *where = NULL;
u8 my_cidr = horrible_mask_to_cidr(node->mask);
hlist_for_each_entry(other, &table->head, table) {
- if (!memcmp(&other->mask, &node->mask,
- sizeof(union nf_inet_addr)) &&
- !memcmp(&other->ip, &node->ip,
- sizeof(union nf_inet_addr)) &&
- other->ip_version == node->ip_version) {
+ if (other->ip_version == node->ip_version &&
+ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) &&
+ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) {
other->value = node->value;
kfree(node);
return;
}
+ }
+ hlist_for_each_entry(other, &table->head, table) {
where = other;
if (horrible_mask_to_cidr(other->mask) <= my_cidr)
break;
@@ -201,8 +181,7 @@ static __init int
horrible_allowedips_insert_v4(struct horrible_allowedips *table,
struct in_addr *ip, u8 cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
- GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -219,8 +198,7 @@ static __init int
horrible_allowedips_insert_v6(struct horrible_allowedips *table,
struct in6_addr *ip, u8 cidr, void *value)
{
- struct horrible_allowedips_node *node = kzalloc(sizeof(*node),
- GFP_KERNEL);
+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(!node))
return -ENOMEM;
@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table,
}
static __init void *
-horrible_allowedips_lookup_v4(struct horrible_allowedips *table,
- struct in_addr *ip)
+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip)
{
struct horrible_allowedips_node *node;
- void *ret = NULL;
hlist_for_each_entry(node, &table->head, table) {
- if (node->ip_version != 4)
- continue;
- if (horrible_match_v4(node, ip)) {
- ret = node->value;
- break;
- }
+ if (node->ip_version == 4 && horrible_match_v4(node, ip))
+ return node->value;
}
- return ret;
+ return NULL;
}
static __init void *
-horrible_allowedips_lookup_v6(struct horrible_allowedips *table,
- struct in6_addr *ip)
+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip)
{
struct horrible_allowedips_node *node;
- void *ret = NULL;
hlist_for_each_entry(node, &table->head, table) {
- if (node->ip_version != 6)
+ if (node->ip_version == 6 && horrible_match_v6(node, ip))
+ return node->value;
+ }
+ return NULL;
+}
+
+
+static __init void
+horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value)
+{
+ struct horrible_allowedips_node *node;
+ struct hlist_node *h;
+
+ hlist_for_each_entry_safe(node, h, &table->head, table) {
+ if (node->value != value)
continue;
- if (horrible_match_v6(node, ip)) {
- ret = node->value;
- break;
- }
+ hlist_del(&node->table);
+ kfree(node);
}
- return ret;
+
}
static __init bool randomized_test(void)
@@ -296,6 +278,7 @@ static __init bool randomized_test(void)
goto free;
}
kref_init(&peers[i]->refcount);
+ INIT_LIST_HEAD(&peers[i]->allowedips_list);
}
mutex_lock(&mutex);
@@ -333,7 +316,7 @@ static __init bool randomized_test(void)
if (wg_allowedips_insert_v4(&t,
(struct in_addr *)mutated,
cidr, peer, &mutex) < 0) {
- pr_err("allowedips random malloc: FAIL\n");
+ pr_err("allowedips random self-test malloc: FAIL\n");
goto free_locked;
}
if (horrible_allowedips_insert_v4(&h,
@@ -396,23 +379,33 @@ static __init bool randomized_test(void)
print_tree(t.root6, 128);
}
- for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 4);
- if (lookup(t.root4, 32, ip) !=
- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
- pr_err("allowedips random self-test: FAIL\n");
- goto free;
+ for (j = 0;; ++j) {
+ for (i = 0; i < NUM_QUERIES; ++i) {
+ prandom_bytes(ip, 4);
+ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) {
+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip);
+ pr_err("allowedips random v4 self-test: FAIL\n");
+ goto free;
+ }
+ prandom_bytes(ip, 16);
+ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
+ pr_err("allowedips random v6 self-test: FAIL\n");
+ goto free;
+ }
}
+ if (j >= NUM_PEERS)
+ break;
+ mutex_lock(&mutex);
+ wg_allowedips_remove_by_peer(&t, peers[j], &mutex);
+ mutex_unlock(&mutex);
+ horrible_allowedips_remove_by_value(&h, peers[j]);
}
- for (i = 0; i < NUM_QUERIES; ++i) {
- prandom_bytes(ip, 16);
- if (lookup(t.root6, 128, ip) !=
- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) {
- pr_err("allowedips random self-test: FAIL\n");
- goto free;
- }
+ if (t.root4 || t.root6) {
+ pr_err("allowedips random self-test removal: FAIL\n");
+ goto free;
}
+
ret = true;
free:
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index d9ad850daa79..8c496b747108 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
if (new4)
wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
mutex_unlock(&wg->socket_update_lock);
- synchronize_rcu();
+ synchronize_net();
sock_free(old4);
sock_free(old6);
}
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 869524852fba..ab8f77ae5e66 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -442,14 +442,7 @@ static int ath10k_ahb_resource_init(struct ath10k *ar)
pdev = ar_ahb->pdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ath10k_err(ar, "failed to get memory resource\n");
- ret = -ENXIO;
- goto out;
- }
-
- ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res);
+ ar_ahb->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ar_ahb->mem)) {
ath10k_err(ar, "mem ioremap error\n");
ret = PTR_ERR(ar_ahb->mem);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 648ed36f845f..5aeff2d9f6cf 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -301,7 +301,7 @@ struct ath10k_fw_stats_pdev {
s32 underrun;
u32 hw_paused;
s32 tx_abort;
- s32 mpdus_requed;
+ s32 mpdus_requeued;
u32 tx_ko;
u32 data_rc;
u32 self_triggers;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index fd052f6ed019..39378e3f9b2b 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1105,7 +1105,7 @@ static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"d_tx_ppdu_reaped",
"d_tx_fifo_underrun",
"d_tx_ppdu_abort",
- "d_tx_mpdu_requed",
+ "d_tx_mpdu_requeued",
"d_tx_excessive_retries",
"d_tx_hw_rate",
"d_tx_dropped_sw_retries",
@@ -1205,7 +1205,7 @@ void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
data[i++] = pdev_stats->hw_reaped;
data[i++] = pdev_stats->underrun;
data[i++] = pdev_stats->tx_abort;
- data[i++] = pdev_stats->mpdus_requed;
+ data[i++] = pdev_stats->mpdus_requeued;
data[i++] = pdev_stats->tx_ko;
data[i++] = pdev_stats->data_rc;
data[i++] = pdev_stats->sw_retry_failure;
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 956157946106..ec689e3ce48a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -845,6 +845,7 @@ enum htt_security_types {
#define ATH10K_HTT_TXRX_PEER_SECURITY_MAX 2
#define ATH10K_TXRX_NUM_EXT_TIDS 19
+#define ATH10K_TXRX_NON_QOS_TID 16
enum htt_security_flags {
#define HTT_SECURITY_TYPE_MASK 0x7F
@@ -1282,8 +1283,8 @@ struct htt_dbg_stats_wal_tx_stats {
/* Num PPDUs cleaned up in TX abort */
__le32 tx_abort;
- /* Num MPDUs requed by SW */
- __le32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
/* excessive retries */
__le32 tx_ko;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 1a08156d5011..adbaeb67eedf 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -1746,16 +1746,95 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
}
+static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ ehdr = skb->data + offset + ieee80211_hdrlen(hdr->frame_control);
+
+ if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+ }
+ return pn;
+}
+
+static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 offset)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ return !is_multicast_ether_addr(hdr->addr1);
+}
+
+static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
+ struct sk_buff *skb,
+ u16 peer_id,
+ u16 offset,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ath10k_peer *peer;
+ union htt_rx_pn_t *last_pn, new_pn = {0};
+ struct ieee80211_hdr *hdr;
+ u8 tid, frag_number;
+ u32 seq;
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
+ return false;
+ }
+
+ hdr = (struct ieee80211_hdr *)(skb->data + offset);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ else
+ tid = ATH10K_TXRX_NON_QOS_TID;
+
+ last_pn = &peer->frag_tids_last_pn[tid];
+ new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, offset, enctype);
+ frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+ seq = (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+
+ if (frag_number == 0) {
+ last_pn->pn48 = new_pn.pn48;
+ peer->frag_tids_seq[tid] = seq;
+ } else {
+ if (seq != peer->frag_tids_seq[tid])
+ return false;
+
+ if (new_pn.pn48 != last_pn->pn48 + 1)
+ return false;
+
+ last_pn->pn48 = new_pn.pn48;
+ }
+
+ return true;
+}
+
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status,
bool fill_crypt_header,
u8 *rx_hdr,
- enum ath10k_pkt_rx_err *err)
+ enum ath10k_pkt_rx_err *err,
+ u16 peer_id,
+ bool frag)
{
struct sk_buff *first;
struct sk_buff *last;
- struct sk_buff *msdu;
+ struct sk_buff *msdu, *temp;
struct htt_rx_desc *rxd;
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
@@ -1768,6 +1847,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool is_decrypted;
bool is_mgmt;
u32 attention;
+ bool frag_pn_check = true, multicast_check = true;
if (skb_queue_empty(amsdu))
return;
@@ -1866,7 +1946,37 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
}
skb_queue_walk(amsdu, msdu) {
+ if (frag && !fill_crypt_header && is_decrypted &&
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
+ msdu,
+ peer_id,
+ 0,
+ enctype);
+
+ if (frag)
+ multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
+ msdu,
+ 0);
+
+ if (!frag_pn_check || !multicast_check) {
+ /* Discard the fragment with invalid PN or multicast DA
+ */
+ temp = msdu->prev;
+ __skb_unlink(msdu, amsdu);
+ dev_kfree_skb_any(msdu);
+ msdu = temp;
+ frag_pn_check = true;
+ multicast_check = true;
+ continue;
+ }
+
ath10k_htt_rx_h_csum_offload(msdu);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_MMIC_STRIPPED;
+
ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
is_decrypted);
@@ -1884,6 +1994,11 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+
+ if (frag && !fill_crypt_header &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ status->flag &= ~RX_FLAG_IV_STRIPPED &
+ ~RX_FLAG_MMIC_STRIPPED;
}
}
@@ -1991,14 +2106,62 @@ static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
ath10k_unchain_msdu(amsdu, unchain_cnt);
}
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu)
+{
+ u8 *subframe_hdr;
+ struct sk_buff *first;
+ bool is_first, is_last;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ first = skb_peek(amsdu);
+
+ rxd = (void *)first->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Return in case of non-aggregated msdu */
+ if (is_first && is_last)
+ return true;
+
+ /* First msdu flag is not set for the first msdu of the list */
+ if (!is_first)
+ return false;
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+ crypto_len;
+
+ /* Validate if the amsdu has a proper first subframe.
+ * There are chances a single msdu can be received as amsdu when
+ * the unauthenticated amsdu flag of a QoS header
+ * gets flipped in non-SPP AMSDU's, in such cases the first
+ * subframe has llc/snap header in place of a valid da.
+ * return false if the da matches rfc1042 pattern
+ */
+ if (ether_addr_equal(subframe_hdr, rfc1042_header))
+ return false;
+
+ return true;
+}
+
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- /* FIXME: It might be a good idea to do some fuzzy-testing to drop
- * invalid/dangerous frames.
- */
-
if (!rx_status->freq) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
return false;
@@ -2009,6 +2172,11 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
+ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+ return false;
+ }
+
return true;
}
@@ -2071,7 +2239,8 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
+ false);
msdus_to_queue = skb_queue_len(&amsdu);
ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
@@ -2204,6 +2373,11 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
+ if (fw_desc->u.bits.discard) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
+ goto err;
+ }
+
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitiation here as well.
@@ -2509,6 +2683,13 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
rx_desc_info = __le32_to_cpu(rx_desc->info);
+ hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
+
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ /* Discard the fragment with multicast DA */
+ goto err;
+ }
+
if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
@@ -2516,8 +2697,6 @@ static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
HTT_RX_NON_TKIP_MIC);
}
- hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
-
if (ieee80211_has_retry(hdr->frame_control))
goto err;
@@ -3027,7 +3206,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
- NULL);
+ NULL, peer_id, frag);
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 5ce4f8d038b9..c272b290fa73 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5592,6 +5592,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->nohwcrypt &&
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ret = -EINVAL;
ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
goto err;
}
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index e7fde635e0ee..71878ab35b93 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3685,8 +3685,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id != 0xffffffff) {
if (!ath10k_pci_chip_is_supported(pdev->device,
- bus_params.chip_id))
+ bus_params.chip_id)) {
+ ret = -ENODEV;
goto err_unsupported;
+ }
}
}
@@ -3697,11 +3699,15 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
bus_params.chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
- if (bus_params.chip_id == 0xffffffff)
+ if (bus_params.chip_id == 0xffffffff) {
+ ret = -ENODEV;
goto err_unsupported;
+ }
- if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id))
- goto err_free_irq;
+ if (!ath10k_pci_chip_is_supported(pdev->device, bus_params.chip_id)) {
+ ret = -ENODEV;
+ goto err_unsupported;
+ }
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 862d0901c5b8..cf64898b9447 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -235,7 +235,6 @@ u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe);
void ath10k_pci_hif_power_down(struct ath10k *ar);
int ath10k_pci_alloc_pipes(struct ath10k *ar);
void ath10k_pci_free_pipes(struct ath10k *ar);
-void ath10k_pci_free_pipes(struct ath10k *ar);
void ath10k_pci_rx_replenish_retry(struct timer_list *t);
void ath10k_pci_ce_deinit(struct ath10k *ar);
void ath10k_pci_init_napi(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index f2b6bf8f0d60..705b6295e466 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -1282,7 +1282,19 @@ struct fw_rx_desc_base {
#define FW_RX_DESC_UDP (1 << 6)
struct fw_rx_desc_hl {
- u8 info0;
+ union {
+ struct {
+ u8 discard:1,
+ forward:1,
+ any_err:1,
+ dup_err:1,
+ reserved:1,
+ inspect:1,
+ extension:2;
+ } bits;
+ u8 info0;
+ } u;
+
u8 version;
u8 len;
u8 flags;
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index d48b922215eb..f42bf2c8f9e7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2867,7 +2867,7 @@ void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
dst->underrun = __le32_to_cpu(src->underrun);
dst->tx_abort = __le32_to_cpu(src->tx_abort);
- dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
dst->tx_ko = __le32_to_cpu(src->tx_ko);
dst->data_rc = __le32_to_cpu(src->data_rc);
dst->self_triggers = __le32_to_cpu(src->self_triggers);
@@ -2895,7 +2895,7 @@ ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
dst->underrun = __le32_to_cpu(src->underrun);
dst->tx_abort = __le32_to_cpu(src->tx_abort);
- dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->mpdus_requeued = __le32_to_cpu(src->mpdus_requeued);
dst->tx_ko = __le32_to_cpu(src->tx_ko);
dst->data_rc = __le32_to_cpu(src->data_rc);
dst->self_triggers = __le32_to_cpu(src->self_triggers);
@@ -8270,7 +8270,7 @@ ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs requed", pdev->mpdus_requed);
+ "MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Excessive retries", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index d870f7067cb7..41c1a3d339c2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -4371,8 +4371,8 @@ struct wmi_pdev_stats_tx {
/* Num PPDUs cleaned up in TX abort */
__le32 tx_abort;
- /* Num MPDUs requed by SW */
- __le32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
/* excessive retries */
__le32 tx_ko;
@@ -4444,8 +4444,8 @@ struct wmi_10_4_pdev_stats_tx {
/* Num PPDUs cleaned up in TX abort */
__le32 tx_abort;
- /* Num MPDUs requed by SW */
- __le32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ __le32 mpdus_requeued;
/* excessive retries */
__le32 tx_ko;
@@ -7418,7 +7418,6 @@ int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_connect(struct ath10k *ar);
-struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
u32 cmd_id);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 77ce3347ab86..969bf1a590d9 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -70,6 +70,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = true,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .fix_l1ss = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -110,6 +111,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = true,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .fix_l1ss = true,
},
{
.name = "qca6390 hw2.0",
@@ -149,6 +151,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = false,
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .fix_l1ss = true,
},
{
.name = "qcn9074 hw1.0",
@@ -186,6 +189,47 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.cold_boot_calib = false,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .fix_l1ss = true,
+ },
+ {
+ .name = "wcn6855 hw2.0",
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ .fw = {
+ .dir = "WCN6855/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+ .tcl_0_only = true,
+ .spectral_fft_sz = 0,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .cold_boot_calib = false,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .fix_l1ss = false,
},
};
@@ -488,7 +532,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
if (len < ALIGN(ie_len, 4)) {
ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
ie_id, ie_len, len);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
switch (ie_id) {
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 55af982deca7..018fb2385f2a 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -107,6 +107,7 @@ enum ath11k_hw_rev {
ATH11K_HW_QCA6390_HW20,
ATH11K_HW_IPQ6018_HW10,
ATH11K_HW_QCN9074_HW10,
+ ATH11K_HW_WCN6855_HW20,
};
enum ath11k_firmware_mode {
@@ -795,8 +796,8 @@ struct ath11k_fw_stats_pdev {
s32 underrun;
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
- /* Num MPDUs requed by SW */
- s32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
/* excessive retries */
u32 tx_ko;
/* data hw rate code */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index ec93f14e6d2a..9e0c90da99d3 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -89,7 +89,7 @@ static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
htt_stats_buf->tx_abort);
len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
- htt_stats_buf->mpdu_requed);
+ htt_stats_buf->mpdu_requeued);
len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
htt_stats_buf->tx_xretry);
len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 567a26d485a9..d428f52003a4 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -147,7 +147,7 @@ struct htt_tx_pdev_stats_cmn_tlv {
u32 hw_flush;
u32 hw_filt;
u32 tx_abort;
- u32 mpdu_requed;
+ u32 mpdu_requeued;
u32 tx_xretry;
u32 data_rc;
u32 mpdu_dropped_xretry;
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 04f6c4e0658b..b0c8f6290099 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -342,7 +342,6 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
struct ath11k_dp *dp = &ab->dp;
struct hal_srng *srng;
int i, ret;
- u32 ring_hash_map;
ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
HAL_SW2WBM_RELEASE, 0, 0,
@@ -439,20 +438,9 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
}
/* When hash based routing of rx packet is enabled, 32 entries to map
- * the hash values to the ring will be configured. Each hash entry uses
- * three bits to map to a particular ring. The ring mapping will be
- * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:Not used.
+ * the hash values to the ring will be configured.
*/
- ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
- HAL_HASH_ROUTING_RING_SW2 << 3 |
- HAL_HASH_ROUTING_RING_SW3 << 6 |
- HAL_HASH_ROUTING_RING_SW4 << 9 |
- HAL_HASH_ROUTING_RING_SW1 << 12 |
- HAL_HASH_ROUTING_RING_SW2 << 15 |
- HAL_HASH_ROUTING_RING_SW3 << 18 |
- HAL_HASH_ROUTING_RING_SW4 << 21;
-
- ath11k_hal_reo_hw_setup(ab, ring_hash_map);
+ ab->hw_params.hw_ops->reo_setup(ab);
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 1d9aa1bb6b6e..603d2f93ac18 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -260,6 +260,16 @@ static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
+static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
+
+ return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
+ (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+ __le32_to_cpu(attn->info1)));
+}
+
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -852,6 +862,24 @@ static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_d
__skb_queue_purge(&rx_tid->rx_frags);
}
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ }
+}
+
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
{
struct dp_rx_tid *rx_tid;
@@ -3450,6 +3478,7 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
u8 tid;
int ret = 0;
bool more_frags;
+ bool is_mcbc;
rx_desc = (struct hal_rx_desc *)msdu->data;
peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
@@ -3457,6 +3486,11 @@ static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+ is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+
+ /* Multicast/Broadcast fragments are not expected */
+ if (is_mcbc)
+ return -EINVAL;
if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
!ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index bf399312b5ff..623da3bf9dc8 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -49,6 +49,7 @@ int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
const u8 *peer_addr,
enum set_key_cmd key_cmd,
struct ieee80211_key_conf *key);
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
void ath11k_peer_rx_tid_delete(struct ath11k *ar,
struct ath11k_peer *peer, u8 tid);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 08e3c72d9237..eaa0edca5576 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -382,6 +382,16 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
+ if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
+ }
+
/* interrupt setup */
/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
* unit of 8 usecs instead of 1 usec (as required by v1).
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 91d1428b8b94..35ed3a14e200 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -120,6 +120,7 @@ struct ath11k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_MISC_CTL 0x00000630
#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
@@ -280,6 +281,7 @@ struct ath11k_base;
#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+#define HAL_REO1_MISC_CTL_FRAGMENT_DST_RING GENMASK(20, 17)
/* CE ring bit field mask and shift */
#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
@@ -906,7 +908,6 @@ void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
u32 start_seq, enum hal_pn_type type);
void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
struct hal_srng *srng);
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map);
void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
struct hal_wbm_idle_scatter_list *sbuf,
u32 nsbufs, u32 tot_link_desc,
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index fac2396edf32..325055ca41ab 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -801,43 +801,6 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
}
}
-void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map)
-{
- u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
- u32 val;
-
- val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
-
- val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
- val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
- HAL_SRNG_RING_ID_REO2SW1) |
- FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
- FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
-
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
- HAL_DEFAULT_REO_TIMEOUT_USEC);
-
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
- ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
- FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
- ring_hash_map));
-}
-
static enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
struct hal_rx_mon_ppdu_info *ppdu_info,
@@ -1128,12 +1091,9 @@ ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
break;
}
case HAL_RX_MPDU_START: {
- struct hal_rx_mpdu_info *mpdu_info =
- (struct hal_rx_mpdu_info *)tlv_data;
u16 peer_id;
- peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
- __le32_to_cpu(mpdu_info->info0));
+ peer_id = ab->hw_params.hw_ops->mpdu_info_get_peerid(tlv_data);
if (peer_id)
ppdu_info->peer_id = peer_id;
break;
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index d464a270c049..0f1f04b812b9 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -254,12 +254,20 @@ struct hal_rx_phyrx_rssi_legacy_info {
} __packed;
#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
+
struct hal_rx_mpdu_info {
__le32 rsvd0;
__le32 info0;
__le32 rsvd1[21];
} __packed;
+struct hal_rx_mpdu_info_wcn6855 {
+ __le32 rsvd0[8];
+ __le32 info0;
+ __le32 rsvd1[14];
+} __packed;
+
#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
struct hal_rx_ppdu_end_duration {
__le32 rsvd0[9];
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index 377ae8d5b58f..d9596903b0a5 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -10,6 +10,7 @@
#include "hw.h"
#include "core.h"
#include "ce.h"
+#include "hif.h"
/* Map from pdev index to hw mac index */
static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
@@ -45,6 +46,13 @@ static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab,
true);
}
+static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
+ true);
+}
+
static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
struct target_resource_config *config)
{
@@ -91,6 +99,52 @@ static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
config->num_keep_alive_pattern = 0;
}
+static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 3 |
+ HAL_HASH_ROUTING_RING_SW3 << 6 |
+ HAL_HASH_ROUTING_RING_SW4 << 9 |
+ HAL_HASH_ROUTING_RING_SW1 << 12 |
+ HAL_HASH_ROUTING_RING_SW2 << 15 |
+ HAL_HASH_ROUTING_RING_SW3 << 18 |
+ HAL_HASH_ROUTING_RING_SW4 << 21;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+}
+
static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
struct target_resource_config *config)
{
@@ -489,6 +543,228 @@ static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.qcn9074.msdu_payload[0];
}
+static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.hdr_status;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info1));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.wcn6855.msdu_end, (u8 *)&ldesc->u.wcn6855.msdu_end,
+ sizeof(struct rx_msdu_end_wcn6855));
+ memcpy((u8 *)&fdesc->u.wcn6855.attention, (u8 *)&ldesc->u.wcn6855.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.wcn6855.mpdu_end, (u8 *)&ldesc->u.wcn6855.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn6855.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.wcn6855.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.attention;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.msdu_payload[0];
+}
+
+static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses four bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL);
+ val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
+static u16 ath11k_hw_ipq8074_mpdu_info_get_peerid(u8 *tlv_data)
+{
+ u16 peer_id = 0;
+ struct hal_rx_mpdu_info *mpdu_info =
+ (struct hal_rx_mpdu_info *)tlv_data;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->info0));
+
+ return peer_id;
+}
+
+static u16 ath11k_hw_wcn6855_mpdu_info_get_peerid(u8 *tlv_data)
+{
+ u16 peer_id = 0;
+ struct hal_rx_mpdu_info_wcn6855 *mpdu_info =
+ (struct hal_rx_mpdu_info_wcn6855 *)tlv_data;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855,
+ __le32_to_cpu(mpdu_info->info0));
+ return peer_id;
+}
+
const struct ath11k_hw_ops ipq8074_ops = {
.get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
.wmi_init_config = ath11k_init_wmi_config_ipq8074,
@@ -521,6 +797,8 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -555,6 +833,8 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -589,6 +869,8 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -623,6 +905,44 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
.rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+};
+
+const struct ath11k_hw_ops wcn6855_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_wcn6855_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_wcn6855_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_wcn6855_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_wcn6855_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_wcn6855_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
};
#define ATH11K_TX_RING_MASK_0 0x1
@@ -1688,3 +2008,74 @@ const struct ath11k_hw_regs qcn9074_regs = {
.pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
.pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
};
+
+const struct ath11k_hw_regs wcn6855_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000690,
+ .hal_tcl1_ring_base_msb = 0x00000694,
+ .hal_tcl1_ring_id = 0x00000698,
+ .hal_tcl1_ring_misc = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b0,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006dc,
+ .hal_tcl1_ring_msi1_data = 0x000006e0,
+ .hal_tcl2_ring_base_lsb = 0x000006e8,
+ .hal_tcl_ring_base_lsb = 0x00000798,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a0,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x000005bc,
+ .hal_reo1_aging_thresh_ix_1 = 0x000005c0,
+ .hal_reo1_aging_thresh_ix_2 = 0x000005c4,
+ .hal_reo1_aging_thresh_ix_3 = 0x000005c8,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x00000454,
+ .hal_reo_tcl_ring_hp = 0x00003060,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x0000055c,
+ .hal_reo_status_hp = 0x00003078,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000870,
+ .hal_wbm_idle_link_ring_misc = 0x00000880,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001e8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000920,
+ .hal_wbm1_release_ring_base_lsb = 0x00000978,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index c81a6328361d..62f5978b3005 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -162,6 +162,7 @@ struct ath11k_hw_params {
bool cold_boot_calib;
bool supports_suspend;
u32 hal_desc_sz;
+ bool fix_l1ss;
};
struct ath11k_hw_ops {
@@ -199,12 +200,15 @@ struct ath11k_hw_ops {
void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+ void (*reo_setup)(struct ath11k_base *ab);
+ u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
};
extern const struct ath11k_hw_ops ipq8074_ops;
extern const struct ath11k_hw_ops ipq6018_ops;
extern const struct ath11k_hw_ops qca6390_ops;
extern const struct ath11k_hw_ops qcn9074_ops;
+extern const struct ath11k_hw_ops wcn6855_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
@@ -318,5 +322,6 @@ struct ath11k_hw_regs {
extern const struct ath11k_hw_regs ipq8074_regs;
extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs;
+extern const struct ath11k_hw_regs wcn6855_regs;
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 4df425dd31a2..eb52332dbe3f 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -2779,6 +2779,12 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
*/
spin_lock_bh(&ab->base_lock);
peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+
+ /* flush the fragments cache during key (re)install to
+ * ensure all frags in the new frag list belong to the same key.
+ */
+ if (peer && cmd == SET_KEY)
+ ath11k_peer_frags_flush(ar, peer);
spin_unlock_bh(&ab->base_lock);
if (!peer) {
@@ -5373,11 +5379,6 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
if (WARN_ON(!arvif->is_up))
continue;
- ret = ath11k_mac_setup_bcn_tmpl(arvif);
- if (ret)
- ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
- ret);
-
ret = ath11k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
if (ret) {
ath11k_warn(ab, "failed to restart vdev %d: %d\n",
@@ -5385,6 +5386,11 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
continue;
}
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
arvif->bssid);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index 27b394d115e2..75cc2d80fde8 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -354,6 +354,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
ath11k_mhi_config = &ath11k_mhi_config_qcn9074;
break;
case ATH11K_HW_QCA6390_HW20:
+ case ATH11K_HW_WCN6855_HW20:
ath11k_mhi_config = &ath11k_mhi_config_qca6390;
break;
default:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 0f31eb566fb6..f8f6b2090dad 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -36,9 +36,11 @@
#define QCA6390_DEVICE_ID 0x1101
#define QCN9074_DEVICE_ID 0x1104
+#define WCN6855_DEVICE_ID 0x1103
static const struct pci_device_id ath11k_pci_id_table[] = {
{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
/* TODO: add QCN9074_DEVICE_ID) once firmware issues are resolved */
{0}
};
@@ -432,7 +434,8 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
ath11k_pci_enable_ltssm(ab);
ath11k_pci_clear_all_intrs(ab);
ath11k_pci_set_wlaon_pwr_ctrl(ab);
- ath11k_pci_fix_l1ss(ab);
+ if (ab->hw_params.fix_l1ss)
+ ath11k_pci_fix_l1ss(ab);
}
ath11k_mhi_clear_vector(ab);
@@ -1176,12 +1179,26 @@ static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
.get_ce_msi_idx = ath11k_pci_get_ce_msi_idx,
};
+static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
+{
+ u32 soc_hw_version;
+
+ soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
+ *major, *minor);
+}
+
static int ath11k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_dev)
{
struct ath11k_base *ab;
struct ath11k_pci *ab_pci;
- u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
+ u32 soc_hw_version_major, soc_hw_version_minor;
int ret;
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
@@ -1209,15 +1226,8 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA6390_DEVICE_ID:
- soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
- soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
- soc_hw_version);
- soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
- soc_hw_version);
-
- ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
- soc_hw_version_major, soc_hw_version_minor);
-
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
switch (soc_hw_version_major) {
case 2:
ab->hw_rev = ATH11K_HW_QCA6390_HW20;
@@ -1235,6 +1245,21 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
ab->bus_params.static_window_map = true;
ab->hw_rev = ATH11K_HW_QCN9074_HW10;
break;
+ case WCN6855_DEVICE_ID:
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case 2:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW20;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+ ab_pci->msi_config = &ath11k_msi_config[0];
+ break;
default:
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
pci_dev->device);
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
index 0cdb4a1f816e..79c50804d7dc 100644
--- a/drivers/net/wireless/ath/ath11k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -368,6 +368,7 @@ struct rx_attention {
#define RX_MPDU_START_INFO2_BSSID_HIT BIT(9)
#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(13, 10)
#define RX_MPDU_START_INFO2_TID GENMASK(17, 14)
+#define RX_MPDU_START_INFO2_TID_WCN6855 GENMASK(18, 15)
#define RX_MPDU_START_INFO3_REO_DEST_IND GENMASK(4, 0)
#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ BIT(7)
@@ -546,6 +547,31 @@ struct rx_mpdu_start_qcn9074 {
__le32 ht_ctrl;
} __packed;
+struct rx_mpdu_start_wcn6855 {
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+} __packed;
+
/* rx_mpdu_start
*
* rxpcu_mpdu_filter_in_category
@@ -804,6 +830,20 @@ struct rx_msdu_start_qcn9074 {
__le16 vlan_stag_c1;
} __packed;
+struct rx_msdu_start_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+ __le16 vlan_ctag_ci;
+ __le16 vlan_stag_ci;
+} __packed;
+
/* rx_msdu_start
*
* rxpcu_mpdu_filter_in_category
@@ -988,7 +1028,9 @@ struct rx_msdu_start_qcn9074 {
#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN GENMASK(13, 0)
#define RX_MSDU_END_INFO2_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855 BIT(28)
#define RX_MSDU_END_INFO2_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO2_LAST_MSDU_WCN6855 BIT(29)
#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT BIT(16)
#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT BIT(17)
#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR BIT(18)
@@ -1037,6 +1079,31 @@ struct rx_msdu_end_ipq8074 {
__le16 sa_sw_peer_id;
} __packed;
+struct rx_msdu_end_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 reported_mpdu_len;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info4;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info2;
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+ __le32 rule_indication[2];
+ __le32 info6;
+ __le32 info7;
+} __packed;
+
#define RX_MSDU_END_MPDU_LENGTH_INFO GENMASK(13, 0)
#define RX_MSDU_END_INFO2_DA_OFFSET GENMASK(5, 0)
@@ -1400,10 +1467,30 @@ struct hal_rx_desc_qcn9074 {
u8 msdu_payload[0];
} __packed;
+struct hal_rx_desc_wcn6855 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_wcn6855 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_wcn6855 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_wcn6855 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+} __packed;
+
struct hal_rx_desc {
union {
struct hal_rx_desc_ipq8074 ipq8074;
struct hal_rx_desc_qcn9074 qcn9074;
+ struct hal_rx_desc_wcn6855 wcn6855;
} u;
} __packed;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 5ca2d80679b6..6c253eae9d06 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -5235,7 +5235,7 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->hw_reaped = src->hw_reaped;
dst->underrun = src->underrun;
dst->tx_abort = src->tx_abort;
- dst->mpdus_requed = src->mpdus_requed;
+ dst->mpdus_requeued = src->mpdus_requeued;
dst->tx_ko = src->tx_ko;
dst->data_rc = src->data_rc;
dst->self_triggers = src->self_triggers;
@@ -5505,7 +5505,7 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
- "MPDUs requed", pdev->mpdus_requed);
+ "MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Excessive retries", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 3ade1ddd35c9..d35c47e0b19d 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -4171,8 +4171,8 @@ struct wmi_pdev_stats_tx {
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
- /* Num MPDUs requed by SW */
- s32 mpdus_requed;
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
/* excessive retries */
u32 tx_ko;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index f2db7cf16566..3f4ce4e9c532 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -855,7 +855,7 @@ ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
}
/**
- * at5k_hw_stop_rx_pcu() - Stop RX engine
+ * ath5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 29527e8dcced..fefdc6753acd 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3303,8 +3303,8 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (ret < 0)
return ret;
} else {
- ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
- MATCHED_SSID_FILTER, 0);
+ ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ MATCHED_SSID_FILTER, 0);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7506cea46f58..433a047f3747 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -1027,14 +1027,17 @@ static ssize_t ath6kl_lrssi_roam_write(struct file *file,
{
struct ath6kl *ar = file->private_data;
unsigned long lrssi_roam_threshold;
+ int ret;
if (kstrtoul_from_user(user_buf, count, 0, &lrssi_roam_threshold))
return -EINVAL;
ar->lrssi_roam_threshold = lrssi_roam_threshold;
- ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ ret = ath6kl_wmi_set_roam_lrssi_cmd(ar->wmi, ar->lrssi_roam_threshold);
+ if (ret)
+ return ret;
return count;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 76b538942a79..5184a0aacfe2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -522,6 +522,8 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
rxs->enc_flags |= (rxsp->status4 & AR_GI) ? RX_ENC_FLAG_SHORT_GI : 0;
+ rxs->enc_flags |=
+ (rxsp->status4 & AR_STBC) ? (1 << RX_ENC_FLAG_STBC_SHIFT) : 0;
rxs->bw = (rxsp->status4 & AR_2040) ? RATE_INFO_BW_40 : RATE_INFO_BW_20;
rxs->evm0 = rxsp->status6;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 45f6402478b5..97c3a53f9cef 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -307,6 +307,11 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
hchan = ah->curchan;
}
+ if (!hchan) {
+ fastcc = false;
+ hchan = ath9k_cmn_get_channel(sc->hw, ah, &sc->cur_chan->chandef);
+ }
+
if (!ath_prepare_reset(sc))
fastcc = false;
diff --git a/drivers/net/wireless/ath/carl9170/Kconfig b/drivers/net/wireless/ath/carl9170/Kconfig
index b2d760873992..ba9bea79381c 100644
--- a/drivers/net/wireless/ath/carl9170/Kconfig
+++ b/drivers/net/wireless/ath/carl9170/Kconfig
@@ -16,13 +16,11 @@ config CARL9170
config CARL9170_LEDS
bool "SoftLED Support"
- depends on CARL9170
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
default y
+ depends on CARL9170
+ depends on MAC80211_LEDS
help
- This option is necessary, if you want your device' LEDs to blink
+ This option is necessary, if you want your device's LEDs to blink.
Say Y, unless you need the LEDs for firmware debugging.
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index eae9abf540a7..b53ebb3ac9a2 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -24,7 +24,7 @@
#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
/**
- * ath_hw_set_bssid_mask - filter out bssids we listen
+ * ath_hw_setbssidmask - filter out bssids we listen
*
* @common: the ath_common struct for the device.
*
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 63079231e48e..8e1dbfda6538 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -800,7 +800,7 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
(char *)ctl_skb->skb->data, ctl_skb->skb->len);
/* Move the head of the ring to the next empty descriptor */
- ch->head_blk_ctl = ctl_skb->next;
+ ch->head_blk_ctl = ctl_skb->next;
/* Commit all previous writes and set descriptors to VALID */
wmb();
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 65ef893f2736..455143c4164e 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -3464,8 +3464,12 @@ struct wcn36xx_hal_rem_bcn_filter_req {
#define WCN36XX_HAL_OFFLOAD_DISABLE 0
#define WCN36XX_HAL_OFFLOAD_ENABLE 1
#define WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE 0x2
+#define WCN36XX_HAL_OFFLOAD_MCAST_FILTER_ENABLE 0x4
+#define WCN36XX_HAL_OFFLOAD_NS_AND_MCAST_FILTER_ENABLE \
+ (WCN36XX_HAL_OFFLOAD_ENABLE | WCN36XX_HAL_OFFLOAD_MCAST_FILTER_ENABLE)
#define WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE \
- (HAL_OFFLOAD_ENABLE|HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+ (WCN36XX_HAL_OFFLOAD_ENABLE | WCN36XX_HAL_OFFLOAD_BCAST_FILTER_ENABLE)
+#define WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX 0x02
struct wcn36xx_hal_ns_offload_params {
u8 src_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
@@ -3487,10 +3491,10 @@ struct wcn36xx_hal_ns_offload_params {
/* slot index for this offload */
u32 slot_index;
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_host_offload_req {
- u8 offload_Type;
+ u8 offload_type;
/* enable or disable */
u8 enable;
@@ -3499,13 +3503,13 @@ struct wcn36xx_hal_host_offload_req {
u8 host_ipv4_addr[4];
u8 host_ipv6_addr[WCN36XX_HAL_IPV6_ADDR_LEN];
} u;
-};
+} __packed;
struct wcn36xx_hal_host_offload_req_msg {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_host_offload_req host_offload_params;
struct wcn36xx_hal_ns_offload_params ns_offload_params;
-};
+} __packed;
/* Packet Types. */
#define WCN36XX_HAL_KEEP_ALIVE_NULL_PKT 1
@@ -4901,7 +4905,7 @@ struct wcn36xx_hal_gtk_offload_req_msg {
u64 key_replay_counter;
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_gtk_offload_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4915,7 +4919,7 @@ struct wcn36xx_hal_gtk_offload_rsp_msg {
struct wcn36xx_hal_gtk_offload_get_info_req_msg {
struct wcn36xx_hal_msg_header header;
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4939,7 +4943,7 @@ struct wcn36xx_hal_gtk_offload_get_info_rsp_msg {
u32 igtk_rekey_count;
u8 bss_index;
-};
+} __packed;
struct dhcp_info {
/* Indicates the device mode which indicates about the DHCP activity */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index afb4877eaad8..d202f2128df2 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -25,6 +25,7 @@
#include <linux/rpmsg.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/soc/qcom/wcnss_ctrl.h>
+#include <net/ipv6.h>
#include "wcn36xx.h"
#include "testmode.h"
@@ -172,7 +173,9 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support wowlan_support = {
- .flags = WIPHY_WOWLAN_ANY
+ .flags = WIPHY_WOWLAN_ANY |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY
};
#endif
@@ -293,23 +296,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
goto out_free_dxe_pool;
}
- wcn->hal_buf = kmalloc(WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
- if (!wcn->hal_buf) {
- wcn36xx_err("Failed to allocate smd buf\n");
- ret = -ENOMEM;
- goto out_free_dxe_ctl;
- }
-
ret = wcn36xx_smd_load_nv(wcn);
if (ret) {
wcn36xx_err("Failed to push NV to chip\n");
- goto out_free_smd_buf;
+ goto out_free_dxe_ctl;
}
ret = wcn36xx_smd_start(wcn);
if (ret) {
wcn36xx_err("Failed to start chip\n");
- goto out_free_smd_buf;
+ goto out_free_dxe_ctl;
}
if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
@@ -336,8 +332,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
out_smd_stop:
wcn36xx_smd_stop(wcn);
-out_free_smd_buf:
- kfree(wcn->hal_buf);
out_free_dxe_ctl:
wcn36xx_dxe_free_ctl_blks(wcn);
out_free_dxe_pool:
@@ -372,8 +366,6 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
wcn36xx_dxe_free_mem_pools(wcn);
wcn36xx_dxe_free_ctl_blks(wcn);
-
- kfree(wcn->hal_buf);
}
static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
@@ -1088,28 +1080,91 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
#ifdef CONFIG_PM
+static struct ieee80211_vif *wcn36xx_get_first_assoc_vif(struct wcn36xx *wcn)
+{
+ struct wcn36xx_vif *vif_priv = NULL;
+ struct ieee80211_vif *vif = NULL;
+
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
+ if (vif_priv->sta_assoc) {
+ vif = wcn36xx_priv_to_vif(vif_priv);
+ break;
+ }
+ }
+ return vif;
+}
+
static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
{
struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
+ int ret = 0;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
- flush_workqueue(wcn->hal_ind_wq);
- wcn36xx_smd_set_power_params(wcn, true);
- return 0;
+ mutex_lock(&wcn->conf_mutex);
+
+ vif = wcn36xx_get_first_assoc_vif(wcn);
+ if (vif) {
+ ret = wcn36xx_smd_arp_offload(wcn, vif, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_ipv6_ns_offload(wcn, vif, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_gtk_offload(wcn, vif, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_set_power_params(wcn, true);
+ if (ret)
+ goto out;
+ ret = wcn36xx_smd_wlan_host_suspend_ind(wcn);
+ }
+out:
+ mutex_unlock(&wcn->conf_mutex);
+ return ret;
}
static int wcn36xx_resume(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
+ struct ieee80211_vif *vif = NULL;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
- flush_workqueue(wcn->hal_ind_wq);
- wcn36xx_smd_set_power_params(wcn, false);
+ mutex_lock(&wcn->conf_mutex);
+ vif = wcn36xx_get_first_assoc_vif(wcn);
+ if (vif) {
+ wcn36xx_smd_host_resume(wcn);
+ wcn36xx_smd_set_power_params(wcn, false);
+ wcn36xx_smd_gtk_offload_get_info(wcn, vif);
+ wcn36xx_smd_gtk_offload(wcn, vif, false);
+ wcn36xx_smd_ipv6_ns_offload(wcn, vif, false);
+ wcn36xx_smd_arp_offload(wcn, vif, false);
+ }
+ mutex_unlock(&wcn->conf_mutex);
+
return 0;
}
+static void wcn36xx_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+
+ mutex_lock(&wcn->conf_mutex);
+
+ memcpy(vif_priv->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+ memcpy(vif_priv->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ vif_priv->rekey_data.replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+ vif_priv->rekey_data.valid = true;
+
+ mutex_unlock(&wcn->conf_mutex);
+}
+
#endif
static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
@@ -1176,6 +1231,34 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static void wcn36xx_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct inet6_ifaddr *ifa;
+ int idx = 0;
+
+ memset(vif_priv->tentative_addrs, 0, sizeof(vif_priv->tentative_addrs));
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ vif_priv->target_ipv6_addrs[idx] = ifa->addr;
+ if (ifa->flags & IFA_F_TENTATIVE)
+ __set_bit(idx, vif_priv->tentative_addrs);
+ idx++;
+ if (idx >= WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX)
+ break;
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "%pI6 %s\n", &ifa->addr,
+ (ifa->flags & IFA_F_TENTATIVE) ? "tentative" : NULL);
+ }
+ read_unlock_bh(&idev->lock);
+
+ vif_priv->num_target_ipv6_addrs = idx;
+}
+#endif
+
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
@@ -1184,6 +1267,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
#ifdef CONFIG_PM
.suspend = wcn36xx_suspend,
.resume = wcn36xx_resume,
+ .set_rekey_data = wcn36xx_set_rekey_data,
#endif
.config = wcn36xx_config,
.prepare_multicast = wcn36xx_prepare_multicast,
@@ -1199,6 +1283,9 @@ static const struct ieee80211_ops wcn36xx_ops = {
.sta_add = wcn36xx_sta_add,
.sta_remove = wcn36xx_sta_remove,
.ampdu_action = wcn36xx_ampdu_action,
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = wcn36xx_ipv6_addr_change,
+#endif
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
@@ -1401,6 +1488,12 @@ static int wcn36xx_probe(struct platform_device *pdev)
mutex_init(&wcn->hal_mutex);
mutex_init(&wcn->scan_lock);
+ wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
+ if (!wcn->hal_buf) {
+ ret = -ENOMEM;
+ goto out_wq;
+ }
+
ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
if (ret < 0) {
wcn36xx_err("failed to set DMA mask: %d\n", ret);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index d0c3a1557e8d..cf8e52cbdd9b 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2756,6 +2756,269 @@ out:
return ret;
}
+int wcn36xx_smd_arp_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_host_offload_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_OFFLOAD_REQ);
+ msg_body.host_offload_params.offload_type =
+ WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD;
+ if (enable) {
+ msg_body.host_offload_params.enable =
+ WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE;
+ memcpy(&msg_body.host_offload_params.u,
+ &vif->bss_conf.arp_addr_list[0], sizeof(__be32));
+ }
+ msg_body.ns_offload_params.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending host_offload_arp failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("host_offload_arp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_host_offload_req_msg msg_body;
+ struct wcn36xx_hal_ns_offload_params *ns_params;
+ struct wcn36xx_hal_host_offload_req *ho_params;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_OFFLOAD_REQ);
+ ho_params = &msg_body.host_offload_params;
+ ns_params = &msg_body.ns_offload_params;
+
+ ho_params->offload_type = WCN36XX_HAL_IPV6_NS_OFFLOAD;
+ if (enable) {
+ ho_params->enable =
+ WCN36XX_HAL_OFFLOAD_NS_AND_MCAST_FILTER_ENABLE;
+ if (vif_priv->num_target_ipv6_addrs) {
+ memcpy(&ho_params->u,
+ &vif_priv->target_ipv6_addrs[0].in6_u,
+ sizeof(struct in6_addr));
+ memcpy(&ns_params->target_ipv6_addr1,
+ &vif_priv->target_ipv6_addrs[0].in6_u,
+ sizeof(struct in6_addr));
+ ns_params->target_ipv6_addr1_valid = 1;
+ }
+ if (vif_priv->num_target_ipv6_addrs > 1) {
+ memcpy(&ns_params->target_ipv6_addr2,
+ &vif_priv->target_ipv6_addrs[1].in6_u,
+ sizeof(struct in6_addr));
+ ns_params->target_ipv6_addr2_valid = 1;
+ }
+ }
+ memcpy(&ns_params->self_addr, vif->addr, ETH_ALEN);
+ ns_params->bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending host_offload_arp failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("host_offload_arp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+#else
+int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ return 0;
+}
+#endif
+
+int wcn36xx_smd_gtk_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_gtk_offload_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_GTK_OFFLOAD_REQ);
+
+ if (enable) {
+ memcpy(&msg_body.kek, vif_priv->rekey_data.kek, NL80211_KEK_LEN);
+ memcpy(&msg_body.kck, vif_priv->rekey_data.kck, NL80211_KCK_LEN);
+ msg_body.key_replay_counter =
+ le64_to_cpu(vif_priv->rekey_data.replay_ctr);
+ msg_body.bss_index = vif_priv->bss_index;
+ } else {
+ msg_body.flags = WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE;
+ }
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending host_offload_arp failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("host_offload_arp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+static int wcn36xx_smd_gtk_offload_get_info_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_gtk_offload_get_info_rsp_msg *rsp;
+ __be64 replay_ctr;
+
+ if (wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len))
+ return -EIO;
+
+ rsp = (struct wcn36xx_hal_gtk_offload_get_info_rsp_msg *)wcn->hal_buf;
+
+ if (rsp->bss_index != vif_priv->bss_index) {
+ wcn36xx_err("gtk_offload_info invalid response bss index %d\n",
+ rsp->bss_index);
+ return -ENOENT;
+ }
+
+ if (vif_priv->rekey_data.replay_ctr != cpu_to_le64(rsp->key_replay_counter)) {
+ replay_ctr = cpu_to_be64(rsp->key_replay_counter);
+ vif_priv->rekey_data.replay_ctr =
+ cpu_to_le64(rsp->key_replay_counter);
+ ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+ (void *)&replay_ctr, GFP_KERNEL);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "GTK replay counter increment %llu\n",
+ rsp->key_replay_counter);
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "gtk offload info status %d last_rekey_status %d "
+ "replay_counter %llu total_rekey_count %d gtk_rekey_count %d "
+ "igtk_rekey_count %d bss_index %d\n",
+ rsp->status, rsp->last_rekey_status,
+ rsp->key_replay_counter, rsp->total_rekey_count,
+ rsp->gtk_rekey_count, rsp->igtk_rekey_count,
+ rsp->bss_index);
+
+ return 0;
+}
+
+int wcn36xx_smd_gtk_offload_get_info(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_gtk_offload_get_info_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ);
+
+ msg_body.bss_index = vif_priv->bss_index;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending gtk_offload_get_info failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("gtk_offload_get_info failed err=%d\n", ret);
+ goto out;
+ }
+ ret = wcn36xx_smd_gtk_offload_get_info_rsp(wcn, vif);
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_wlan_host_suspend_ind_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_SUSPEND_IND);
+ msg_body.configured_mcst_bcst_filter_setting = 0;
+ msg_body.active_session_count = 1;
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, msg_body.header.len);
+
+ mutex_unlock(&wcn->hal_mutex);
+
+ return ret;
+}
+
+int wcn36xx_smd_host_resume(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_wlan_host_resume_req_msg msg_body;
+ struct wcn36xx_hal_host_resume_rsp_msg *rsp;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_RESUME_REQ);
+ msg_body.configured_mcst_bcst_filter_setting = 0;
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending wlan_host_resume failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("wlan_host_resume err=%d\n", ret);
+ goto out;
+ }
+
+ rsp = (struct wcn36xx_hal_host_resume_rsp_msg *)wcn->hal_buf;
+ if (rsp->status)
+ wcn36xx_warn("wlan_host_resume status=%d\n", rsp->status);
+
+out:
+ mutex_unlock(&wcn->hal_mutex);
+
+ return ret;
+}
+
int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
void *buf, int len, void *priv, u32 addr)
{
@@ -2804,6 +3067,10 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
+ case WCN36XX_HAL_HOST_OFFLOAD_RSP:
+ case WCN36XX_HAL_GTK_OFFLOAD_RSP:
+ case WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP:
+ case WCN36XX_HAL_HOST_RESUME_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 462860572e1f..d8bded03945d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -146,4 +146,21 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
+
+int wcn36xx_smd_arp_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable);
+
+int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable);
+
+int wcn36xx_smd_gtk_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ bool enable);
+
+int wcn36xx_smd_gtk_offload_get_info(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif);
+
+int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn);
+
+int wcn36xx_smd_host_resume(struct wcn36xx *wcn);
+
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 71fa9992b118..6121d8a5641a 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -18,6 +18,7 @@
#define _WCN36XX_H_
#include <linux/completion.h>
+#include <linux/in6.h>
#include <linux/printk.h>
#include <linux/spinlock.h>
#include <net/mac80211.h>
@@ -136,6 +137,19 @@ struct wcn36xx_vif {
u8 self_dpu_desc_index;
u8 self_ucast_dpu_sign;
+#if IS_ENABLED(CONFIG_IPV6)
+ /* IPv6 addresses for WoWLAN */
+ struct in6_addr target_ipv6_addrs[WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX];
+ unsigned long tentative_addrs[BITS_TO_LONGS(WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX)];
+ int num_target_ipv6_addrs;
+#endif
+ /* WoWLAN GTK rekey data */
+ struct {
+ u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ __le64 replay_ctr;
+ bool valid;
+ } rekey_data;
+
struct list_head sta_list;
};
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 6746fd206d2a..1ff2679963f0 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -2842,9 +2842,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil)
wil->radio_wdev = wil->main_ndev->ieee80211_ptr;
mutex_unlock(&wil->vif_mutex);
if (p2p_wdev) {
- wiphy_lock(wil->wiphy);
cfg80211_unregister_wdev(p2p_wdev);
- wiphy_unlock(wil->wiphy);
kfree(p2p_wdev);
}
}
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index d13d081fdcc6..67172385a5d6 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -9,7 +9,7 @@
#include "wil6210.h"
#include "trace.h"
-/**
+/*
* Theory of operation:
*
* There is ISR pseudo-cause register,
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 02ad44997e87..2dc8406736f4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -224,7 +224,7 @@ struct auth_no_hdr {
u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
- * return AHB address for given firmware internal (linker) address
+ * wmi_addr_remap - return AHB address for given firmware internal (linker) address
* @x: internal address
* If address have no valid AHB mapping, return 0
*/
@@ -242,7 +242,7 @@ static u32 wmi_addr_remap(u32 x)
}
/**
- * find fw_mapping entry by section name
+ * wil_find_fw_mapping - find fw_mapping entry by section name
* @section: section name
*
* Return pointer to section or NULL if not found
@@ -260,7 +260,7 @@ struct fw_map *wil_find_fw_mapping(const char *section)
}
/**
- * Check address validity for WMI buffer; remap if needed
+ * wmi_buffer_block - Check address validity for WMI buffer; remap if needed
* @wil: driver data
* @ptr_: internal (linker) fw/ucode address
* @size: if non zero, validate the block does not
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 665b737fbb0d..cf3ccf4ddfe7 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -4592,58 +4592,11 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
{
struct b43_phy_n *nphy = dev->phy.n;
- u8 channel = dev->phy.channel;
- int tone[2] = { 57, 58 };
- u32 noise[2] = { 0x3FF, 0x3FF };
-
B43_WARN_ON(dev->phy.rev < 3);
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- if (nphy->gband_spurwar_en) {
- /* TODO: N PHY Adjust Analog Pfbw (7) */
- if (channel == 11 && b43_is_40mhz(dev)) {
- ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
- } else {
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- }
- /* TODO: N PHY Adjust CRS Min Power (0x1E) */
- }
-
- if (nphy->aband_spurwar_en) {
- if (channel == 54) {
- tone[0] = 0x20;
- noise[0] = 0x25F;
- } else if (channel == 38 || channel == 102 || channel == 118) {
- if (0 /* FIXME */) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
- } else if (channel == 134) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else if (channel == 151) {
- tone[0] = 0x10;
- noise[0] = 0x23F;
- } else if (channel == 153 || channel == 161) {
- tone[0] = 0x30;
- noise[0] = 0x23F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
-
- if (!tone[0] && !noise[0]) {
- ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
- } else {
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- }
- }
-
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 0);
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index 7e2f70c4207c..6869f2bf1bae 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -213,19 +213,6 @@ return dev->dma.tx_ring1;
return ring;
}
-/* Bcm4301-ring to mac80211-queue mapping */
-static inline int txring_to_priority(struct b43legacy_dmaring *ring)
-{
- static const u8 idx_to_prio[] =
- { 3, 2, 1, 0, 4, 5, };
-
-/*FIXME: have only one queue, for now */
-return 0;
-
- return idx_to_prio[ring->index];
-}
-
-
static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
int controller_idx)
{
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index f64ebff68308..eec3af9c3745 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -391,7 +391,7 @@ void b43legacy_tsf_read(struct b43legacy_wldev *dev, u64 *tsf)
* registers, we should take care of register overflows.
* In theory, the whole tsf read process should be atomic.
* We try to be atomic here, by restaring the read process,
- * if any of the high registers changed (overflew).
+ * if any of the high registers changed (overflowed).
*/
if (dev->dev->id.revision >= 3) {
u32 low;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index ce8c102df7b3..633d0ab19031 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1217,13 +1217,9 @@ static struct sdio_driver brcmf_sdmmc_driver = {
},
};
-void brcmf_sdio_register(void)
+int brcmf_sdio_register(void)
{
- int ret;
-
- ret = sdio_register_driver(&brcmf_sdmmc_driver);
- if (ret)
- brcmf_err("sdio_register_driver failed: %d\n", ret);
+ return sdio_register_driver(&brcmf_sdmmc_driver);
}
void brcmf_sdio_exit(void)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 08f9d47f2e5c..3f5da3bb6aa5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -275,11 +275,26 @@ void brcmf_bus_add_txhdrlen(struct device *dev, uint len);
#ifdef CONFIG_BRCMFMAC_SDIO
void brcmf_sdio_exit(void);
-void brcmf_sdio_register(void);
+int brcmf_sdio_register(void);
+#else
+static inline void brcmf_sdio_exit(void) { }
+static inline int brcmf_sdio_register(void) { return 0; }
#endif
+
#ifdef CONFIG_BRCMFMAC_USB
void brcmf_usb_exit(void);
-void brcmf_usb_register(void);
+int brcmf_usb_register(void);
+#else
+static inline void brcmf_usb_exit(void) { }
+static inline int brcmf_usb_register(void) { return 0; }
+#endif
+
+#ifdef CONFIG_BRCMFMAC_PCIE
+void brcmf_pcie_exit(void);
+int brcmf_pcie_register(void);
+#else
+static inline void brcmf_pcie_exit(void) { }
+static inline int brcmf_pcie_register(void) { return 0; }
#endif
#endif /* BRCMFMAC_BUS_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index f4405d7861b6..65fb038d88e7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -2767,8 +2767,9 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
- s32 total_rssi;
- s32 count_rssi;
+ s32 total_rssi_avg = 0;
+ s32 total_rssi = 0;
+ s32 count_rssi = 0;
int rssi;
u32 i;
@@ -2834,25 +2835,27 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES);
sinfo->rx_bytes = le64_to_cpu(sta_info_le.rx_tot_bytes);
}
- total_rssi = 0;
- count_rssi = 0;
for (i = 0; i < BRCMF_ANT_MAX; i++) {
- if (sta_info_le.rssi[i]) {
- sinfo->chain_signal_avg[count_rssi] =
- sta_info_le.rssi[i];
- sinfo->chain_signal[count_rssi] =
- sta_info_le.rssi[i];
- total_rssi += sta_info_le.rssi[i];
- count_rssi++;
- }
+ if (sta_info_le.rssi[i] == 0 ||
+ sta_info_le.rx_lastpkt_rssi[i] == 0)
+ continue;
+ sinfo->chains |= BIT(count_rssi);
+ sinfo->chain_signal[count_rssi] =
+ sta_info_le.rx_lastpkt_rssi[i];
+ sinfo->chain_signal_avg[count_rssi] =
+ sta_info_le.rssi[i];
+ total_rssi += sta_info_le.rx_lastpkt_rssi[i];
+ total_rssi_avg += sta_info_le.rssi[i];
+ count_rssi++;
}
if (count_rssi) {
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
- sinfo->chains = count_rssi;
-
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
- total_rssi /= count_rssi;
- sinfo->signal = total_rssi;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ sinfo->filled |=
+ BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL_AVG);
+ sinfo->signal = total_rssi / count_rssi;
+ sinfo->signal_avg = total_rssi_avg / count_rssi;
} else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
&ifp->vif->sme_state)) {
memset(&scb_val, 0, sizeof(scb_val));
@@ -7442,18 +7445,23 @@ static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2],
s32 found_index;
int i;
- country_codes = drvr->settings->country_codes;
- if (!country_codes) {
- brcmf_dbg(TRACE, "No country codes configured for device\n");
- return -EINVAL;
- }
-
if ((alpha2[0] == ccreq->country_abbrev[0]) &&
(alpha2[1] == ccreq->country_abbrev[1])) {
brcmf_dbg(TRACE, "Country code already set\n");
return -EAGAIN;
}
+ country_codes = drvr->settings->country_codes;
+ if (!country_codes) {
+ brcmf_dbg(TRACE, "No country codes configured for device, using ISO3166 code and 0 rev\n");
+ memset(ccreq, 0, sizeof(*ccreq));
+ ccreq->country_abbrev[0] = alpha2[0];
+ ccreq->country_abbrev[1] = alpha2[1];
+ ccreq->ccode[0] = alpha2[0];
+ ccreq->ccode[1] = alpha2[1];
+ return 0;
+ }
+
found_index = -1;
for (i = 0; i < country_codes->table_size; i++) {
cc = &country_codes->table[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 838b09b23abf..cee1682d2333 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1518,40 +1518,34 @@ void brcmf_bus_change_state(struct brcmf_bus *bus, enum brcmf_bus_state state)
}
}
-static void brcmf_driver_register(struct work_struct *work)
-{
-#ifdef CONFIG_BRCMFMAC_SDIO
- brcmf_sdio_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
- brcmf_usb_register();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
- brcmf_pcie_register();
-#endif
-}
-static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register);
-
int __init brcmf_core_init(void)
{
- if (!schedule_work(&brcmf_driver_work))
- return -EBUSY;
+ int err;
+ err = brcmf_sdio_register();
+ if (err)
+ return err;
+
+ err = brcmf_usb_register();
+ if (err)
+ goto error_usb_register;
+
+ err = brcmf_pcie_register();
+ if (err)
+ goto error_pcie_register;
return 0;
+
+error_pcie_register:
+ brcmf_usb_exit();
+error_usb_register:
+ brcmf_sdio_exit();
+ return err;
}
void __exit brcmf_core_exit(void)
{
- cancel_work_sync(&brcmf_driver_work);
-
-#ifdef CONFIG_BRCMFMAC_SDIO
brcmf_sdio_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_USB
brcmf_usb_exit();
-#endif
-#ifdef CONFIG_BRCMFMAC_PCIE
brcmf_pcie_exit();
-#endif
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
index 46c66415b4a6..e290dec9c53d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h
@@ -32,6 +32,13 @@ static const char BRCM_ ## fw_name ## _FIRMWARE_BASENAME[] = \
BRCMF_FW_DEFAULT_PATH fw_base; \
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw_base ".bin")
+/* Firmware and Country Local Matrix files */
+#define BRCMF_FW_CLM_DEF(fw_name, fw_base) \
+static const char BRCM_ ## fw_name ## _FIRMWARE_BASENAME[] = \
+ BRCMF_FW_DEFAULT_PATH fw_base; \
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw_base ".bin"); \
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH fw_base ".clm_blob")
+
#define BRCMF_FW_ENTRY(chipid, mask, name) \
{ chipid, mask, BRCM_ ## name ## _FIRMWARE_BASENAME }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 34cd8a7401fe..9ac0d8c73d5a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2037,7 +2037,7 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
}
/**
- * Change a P2P Role.
+ * brcmf_p2p_ifchange - Change a P2P Role.
* @cfg: driver private data for cfg80211 interface.
* @if_type: interface type.
* Returns 0 if success.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index ad79e3b7e74a..c49dd0c36ae4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -48,8 +48,8 @@ enum brcmf_pcie_state {
BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
-BRCMF_FW_DEF(4356, "brcmfmac4356-pcie");
-BRCMF_FW_DEF(43570, "brcmfmac43570-pcie");
+BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie");
+BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
@@ -2140,15 +2140,10 @@ static struct pci_driver brcmf_pciedrvr = {
};
-void brcmf_pcie_register(void)
+int brcmf_pcie_register(void)
{
- int err;
-
brcmf_dbg(PCIE, "Enter\n");
- err = pci_register_driver(&brcmf_pciedrvr);
- if (err)
- brcmf_err(NULL, "PCIE driver registration failed, err=%d\n",
- err);
+ return pci_register_driver(&brcmf_pciedrvr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
index d026401d2001..8e6c227e8315 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.h
@@ -11,9 +11,4 @@ struct brcmf_pciedev {
struct brcmf_pciedev_info *devinfo;
};
-
-void brcmf_pcie_exit(void);
-void brcmf_pcie_register(void);
-
-
#endif /* BRCMFMAC_PCIE_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 16ed325795a8..97ee9e2e2e35 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -616,18 +616,18 @@ BRCMF_FW_DEF(43362, "brcmfmac43362-sdio");
BRCMF_FW_DEF(4339, "brcmfmac4339-sdio");
BRCMF_FW_DEF(43430A0, "brcmfmac43430a0-sdio");
/* Note the names are not postfixed with a1 for backward compatibility */
-BRCMF_FW_DEF(43430A1, "brcmfmac43430-sdio");
-BRCMF_FW_DEF(43455, "brcmfmac43455-sdio");
+BRCMF_FW_CLM_DEF(43430A1, "brcmfmac43430-sdio");
+BRCMF_FW_CLM_DEF(43455, "brcmfmac43455-sdio");
BRCMF_FW_DEF(43456, "brcmfmac43456-sdio");
-BRCMF_FW_DEF(4354, "brcmfmac4354-sdio");
-BRCMF_FW_DEF(4356, "brcmfmac4356-sdio");
+BRCMF_FW_CLM_DEF(4354, "brcmfmac4354-sdio");
+BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-sdio");
BRCMF_FW_DEF(4359, "brcmfmac4359-sdio");
-BRCMF_FW_DEF(4373, "brcmfmac4373-sdio");
-BRCMF_FW_DEF(43012, "brcmfmac43012-sdio");
+BRCMF_FW_CLM_DEF(4373, "brcmfmac4373-sdio");
+BRCMF_FW_CLM_DEF(43012, "brcmfmac43012-sdio");
/* firmware config files */
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-sdio.*.txt");
-MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcm/brcmfmac*-pcie.*.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-sdio.*.txt");
+MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.*.txt");
static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
@@ -1291,7 +1291,7 @@ static void brcmf_sdio_free_glom(struct brcmf_sdio *bus)
}
}
-/**
+/*
* brcmfmac sdio bus specific header
* This is the lowest layer header wrapped on the packets transmitted between
* host and WiFi dongle which contains information needed for SDIO core and
@@ -4162,7 +4162,6 @@ static int brcmf_sdio_bus_reset(struct device *dev)
if (ret) {
brcmf_err("Failed to probe after sdio device reset: ret %d\n",
ret);
- brcmf_sdiod_remove(sdiodev);
}
return ret;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 586f4dfc638b..9fb68c2dc7e3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -1584,12 +1584,8 @@ void brcmf_usb_exit(void)
usb_deregister(&brcmf_usbdrvr);
}
-void brcmf_usb_register(void)
+int brcmf_usb_register(void)
{
- int ret;
-
brcmf_dbg(USB, "Enter\n");
- ret = usb_register(&brcmf_usbdrvr);
- if (ret)
- brcmf_err("usb_register failed %d\n", ret);
+ return usb_register(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
index 53365977bfd6..2084b506a450 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
@@ -531,9 +531,6 @@ void ai_detach(struct si_pub *sih)
sii = container_of(sih, struct si_info, pub);
- if (sii == NULL)
- return;
-
kfree(sii);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 39f3af2d0439..eadac0f5590f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -1220,6 +1220,7 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
+ int ret;
dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
@@ -1244,11 +1245,16 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
wl = brcms_attach(pdev);
if (!wl) {
pr_err("%s: brcms_attach failed!\n", __func__);
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_free_ieee80211;
}
brcms_led_register(wl);
return 0;
+
+err_free_ieee80211:
+ ieee80211_free_hw(hw);
+ return ret;
}
static int brcms_suspend(struct bcma_device *pdev)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 763e0ec583d7..26de1bd7fee9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -6607,7 +6607,8 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
rts->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_RTS);
- memcpy(&rts->ra, &h->addr1, 2 * ETH_ALEN);
+ memcpy(&rts->ra, &h->addr1, ETH_ALEN);
+ memcpy(&rts->ta, &h->addr2, ETH_ALEN);
}
/* mainrate
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
index aa4ab53bf634..af86c7fc5112 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/stf.h
@@ -29,7 +29,6 @@ void brcms_c_stf_ss_update(struct brcms_c_info *wlc, struct brcms_band *band);
void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
int brcms_c_stf_txchain_set(struct brcms_c_info *wlc, s32 int_val, bool force);
bool brcms_c_stf_stbc_rx_set(struct brcms_c_info *wlc, s32 int_val);
-void brcms_c_stf_phy_txant_upd(struct brcms_c_info *wlc);
void brcms_c_stf_phy_chain_calc(struct brcms_c_info *wlc);
u16 brcms_c_stf_phytxchain_sel(struct brcms_c_info *wlc, u32 rspec);
u16 brcms_c_stf_d11hdrs_phyctl_txant(struct brcms_c_info *wlc, u32 rspec);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 23fbddd0c1f8..47eb89b773cf 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -5356,7 +5356,7 @@ struct ipw2100_wep_key {
#define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10]
/**
- * Set a the wep key
+ * ipw2100_set_key() - Set a the wep key
*
* @priv: struct to work on
* @idx: index of the key we want to set
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index ee4cf3437e28..64fc5e410864 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -941,7 +941,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
wdev->netdev = dev;
priv->dev = dev;
- dev->netdev_ops = &lbs_netdev_ops;
+ dev->netdev_ops = &lbs_netdev_ops;
dev->watchdog_timeo = 5 * HZ;
dev->ethtool_ops = &lbs_ethtool_ops;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index f5b78257d551..6cbba84989b8 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -151,13 +151,13 @@ static uint16_t lbs_mesh_get_channel(struct lbs_private *priv)
*/
/**
- * lbs_anycast_get - Get function for sysfs attribute anycast_mask
+ * anycast_mask_show - Get function for sysfs attribute anycast_mask
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t lbs_anycast_get(struct device *dev,
- struct device_attribute *attr, char * buf)
+static ssize_t anycast_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -173,14 +173,15 @@ static ssize_t lbs_anycast_get(struct device *dev,
}
/**
- * lbs_anycast_set - Set function for sysfs attribute anycast_mask
+ * anycast_mask_store - Set function for sysfs attribute anycast_mask
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t lbs_anycast_set(struct device *dev,
- struct device_attribute *attr, const char * buf, size_t count)
+static ssize_t anycast_mask_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -199,13 +200,13 @@ static ssize_t lbs_anycast_set(struct device *dev,
}
/**
- * lbs_prb_rsp_limit_get - Get function for sysfs attribute prb_rsp_limit
+ * prb_rsp_limit_show - Get function for sysfs attribute prb_rsp_limit
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t prb_rsp_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -225,14 +226,15 @@ static ssize_t lbs_prb_rsp_limit_get(struct device *dev,
}
/**
- * lbs_prb_rsp_limit_set - Set function for sysfs attribute prb_rsp_limit
+ * prb_rsp_limit_store - Set function for sysfs attribute prb_rsp_limit
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t prb_rsp_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_access mesh_access;
@@ -259,27 +261,28 @@ static ssize_t lbs_prb_rsp_limit_set(struct device *dev,
}
/**
- * lbs_mesh_get - Get function for sysfs attribute mesh
+ * lbs_mesh_show - Get function for sysfs attribute mesh
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t lbs_mesh_get(struct device *dev,
- struct device_attribute *attr, char * buf)
+static ssize_t lbs_mesh_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev);
}
/**
- * lbs_mesh_set - Set function for sysfs attribute mesh
+ * lbs_mesh_store - Set function for sysfs attribute mesh
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t lbs_mesh_set(struct device *dev,
- struct device_attribute *attr, const char * buf, size_t count)
+static ssize_t lbs_mesh_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
int enable;
@@ -301,20 +304,19 @@ static ssize_t lbs_mesh_set(struct device *dev,
* lbs_mesh attribute to be exported per ethX interface
* through sysfs (/sys/class/net/ethX/lbs_mesh)
*/
-static DEVICE_ATTR(lbs_mesh, 0644, lbs_mesh_get, lbs_mesh_set);
+static DEVICE_ATTR_RW(lbs_mesh);
/*
* anycast_mask attribute to be exported per mshX interface
* through sysfs (/sys/class/net/mshX/anycast_mask)
*/
-static DEVICE_ATTR(anycast_mask, 0644, lbs_anycast_get, lbs_anycast_set);
+static DEVICE_ATTR_RW(anycast_mask);
/*
* prb_rsp_limit attribute to be exported per mshX interface
* through sysfs (/sys/class/net/mshX/prb_rsp_limit)
*/
-static DEVICE_ATTR(prb_rsp_limit, 0644, lbs_prb_rsp_limit_get,
- lbs_prb_rsp_limit_set);
+static DEVICE_ATTR_RW(prb_rsp_limit);
static struct attribute *lbs_mesh_sysfs_entries[] = {
&dev_attr_anycast_mask.attr,
@@ -351,13 +353,13 @@ static int mesh_get_default_parameters(struct device *dev,
}
/**
- * bootflag_get - Get function for sysfs attribute bootflag
+ * bootflag_show - Get function for sysfs attribute bootflag
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t bootflag_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t bootflag_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -371,14 +373,14 @@ static ssize_t bootflag_get(struct device *dev,
}
/**
- * bootflag_set - Set function for sysfs attribute bootflag
+ * bootflag_store - Set function for sysfs attribute bootflag
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bootflag_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_config cmd;
@@ -401,13 +403,13 @@ static ssize_t bootflag_set(struct device *dev, struct device_attribute *attr,
}
/**
- * boottime_get - Get function for sysfs attribute boottime
+ * boottime_show - Get function for sysfs attribute boottime
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t boottime_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t boottime_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -421,14 +423,15 @@ static ssize_t boottime_get(struct device *dev,
}
/**
- * boottime_set - Set function for sysfs attribute boottime
+ * boottime_store - Set function for sysfs attribute boottime
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t boottime_set(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t boottime_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_config cmd;
@@ -460,13 +463,13 @@ static ssize_t boottime_set(struct device *dev,
}
/**
- * channel_get - Get function for sysfs attribute channel
+ * channel_show - Get function for sysfs attribute channel
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t channel_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t channel_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -480,14 +483,14 @@ static ssize_t channel_get(struct device *dev,
}
/**
- * channel_set - Set function for sysfs attribute channel
+ * channel_store - Set function for sysfs attribute channel
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t channel_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
struct cmd_ds_mesh_config cmd;
@@ -510,13 +513,13 @@ static ssize_t channel_set(struct device *dev, struct device_attribute *attr,
}
/**
- * mesh_id_get - Get function for sysfs attribute mesh_id
+ * mesh_id_show - Get function for sysfs attribute mesh_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t mesh_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -539,14 +542,14 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
}
/**
- * mesh_id_set - Set function for sysfs attribute mesh_id
+ * mesh_id_store - Set function for sysfs attribute mesh_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t mesh_id_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -585,13 +588,14 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
}
/**
- * protocol_id_get - Get function for sysfs attribute protocol_id
+ * protocol_id_show - Get function for sysfs attribute protocol_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t protocol_id_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t protocol_id_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -605,14 +609,15 @@ static ssize_t protocol_id_get(struct device *dev,
}
/**
- * protocol_id_set - Set function for sysfs attribute protocol_id
+ * protocol_id_store - Set function for sysfs attribute protocol_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t protocol_id_set(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t protocol_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -646,13 +651,13 @@ static ssize_t protocol_id_set(struct device *dev,
}
/**
- * metric_id_get - Get function for sysfs attribute metric_id
+ * metric_id_show - Get function for sysfs attribute metric_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t metric_id_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t metric_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -666,14 +671,15 @@ static ssize_t metric_id_get(struct device *dev,
}
/**
- * metric_id_set - Set function for sysfs attribute metric_id
+ * metric_id_store - Set function for sysfs attribute metric_id
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t metric_id_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -707,13 +713,13 @@ static ssize_t metric_id_set(struct device *dev, struct device_attribute *attr,
}
/**
- * capability_get - Get function for sysfs attribute capability
+ * capability_show - Get function for sysfs attribute capability
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer where data will be returned
*/
-static ssize_t capability_get(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t capability_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mrvl_mesh_defaults defs;
int ret;
@@ -727,14 +733,15 @@ static ssize_t capability_get(struct device *dev,
}
/**
- * capability_set - Set function for sysfs attribute capability
+ * capability_store - Set function for sysfs attribute capability
* @dev: the &struct device
* @attr: device attributes
* @buf: buffer that contains new attribute value
* @count: size of buffer
*/
-static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t capability_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct cmd_ds_mesh_config cmd;
struct mrvl_mesh_defaults defs;
@@ -768,13 +775,13 @@ static ssize_t capability_set(struct device *dev, struct device_attribute *attr,
}
-static DEVICE_ATTR(bootflag, 0644, bootflag_get, bootflag_set);
-static DEVICE_ATTR(boottime, 0644, boottime_get, boottime_set);
-static DEVICE_ATTR(channel, 0644, channel_get, channel_set);
-static DEVICE_ATTR(mesh_id, 0644, mesh_id_get, mesh_id_set);
-static DEVICE_ATTR(protocol_id, 0644, protocol_id_get, protocol_id_set);
-static DEVICE_ATTR(metric_id, 0644, metric_id_get, metric_id_set);
-static DEVICE_ATTR(capability, 0644, capability_get, capability_set);
+static DEVICE_ATTR_RW(bootflag);
+static DEVICE_ATTR_RW(boottime);
+static DEVICE_ATTR_RW(channel);
+static DEVICE_ATTR_RW(mesh_id);
+static DEVICE_ATTR_RW(protocol_id);
+static DEVICE_ATTR_RW(metric_id);
+static DEVICE_ATTR_RW(capability);
static struct attribute *boot_opts_attrs[] = {
&dev_attr_bootflag.attr,
@@ -801,24 +808,6 @@ static const struct attribute_group mesh_ie_group = {
.attrs = mesh_ie_attrs,
};
-static void lbs_persist_config_init(struct net_device *dev)
-{
- int ret;
- ret = sysfs_create_group(&(dev->dev.kobj), &boot_opts_group);
- if (ret)
- pr_err("failed to create boot_opts_group.\n");
-
- ret = sysfs_create_group(&(dev->dev.kobj), &mesh_ie_group);
- if (ret)
- pr_err("failed to create mesh_ie_group.\n");
-}
-
-static void lbs_persist_config_remove(struct net_device *dev)
-{
- sysfs_remove_group(&(dev->dev.kobj), &boot_opts_group);
- sysfs_remove_group(&(dev->dev.kobj), &mesh_ie_group);
-}
-
/***************************************************************************
* Initializing and starting, stopping mesh
@@ -1014,6 +1003,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ mesh_dev->sysfs_groups[0] = &lbs_mesh_attr_group;
+ mesh_dev->sysfs_groups[1] = &boot_opts_group;
+ mesh_dev->sysfs_groups[2] = &mesh_ie_group;
+
/* Register virtual mesh interface */
ret = register_netdev(mesh_dev);
if (ret) {
@@ -1021,19 +1014,10 @@ static int lbs_add_mesh(struct lbs_private *priv)
goto err_free_netdev;
}
- ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- if (ret)
- goto err_unregister;
-
- lbs_persist_config_init(mesh_dev);
-
/* Everything successful */
ret = 0;
goto done;
-err_unregister:
- unregister_netdev(mesh_dev);
-
err_free_netdev:
free_netdev(mesh_dev);
@@ -1054,8 +1038,6 @@ void lbs_remove_mesh(struct lbs_private *priv)
netif_stop_queue(mesh_dev);
netif_carrier_off(mesh_dev);
- sysfs_remove_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group);
- lbs_persist_config_remove(mesh_dev);
unregister_netdev(mesh_dev);
priv->mesh_dev = NULL;
kfree(mesh_dev->ieee80211_ptr);
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index a92916dc81a9..fe0a69e804d8 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -48,7 +48,7 @@ static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
static int if_usb_reset_device(struct lbtf_private *priv);
/**
- * if_usb_wrike_bulk_callback - call back to handle URB status
+ * if_usb_write_bulk_callback - call back to handle URB status
*
* @urb: pointer to urb structure
*/
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 977acab0360a..03fe62837557 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -514,10 +514,36 @@ EXPORT_SYMBOL_GPL(mt76_free_device);
static void mt76_rx_release_amsdu(struct mt76_phy *phy, enum mt76_rxq_id q)
{
struct sk_buff *skb = phy->rx_amsdu[q].head;
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_dev *dev = phy->dev;
phy->rx_amsdu[q].head = NULL;
phy->rx_amsdu[q].tail = NULL;
+
+ /*
+ * Validate if the amsdu has a proper first subframe.
+ * A single MSDU can be parsed as A-MSDU when the unauthenticated A-MSDU
+ * flag of the QoS header gets flipped. In such cases, the first
+ * subframe has a LLC/SNAP header in the location of the destination
+ * address.
+ */
+ if (skb_shinfo(skb)->frag_list) {
+ int offset = 0;
+
+ if (!(status->flag & RX_FLAG_8023)) {
+ offset = ieee80211_get_hdrlen_from_skb(skb);
+
+ if ((status->flag &
+ (RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED)) ==
+ RX_FLAG_DECRYPTED)
+ offset += 8;
+ }
+
+ if (ether_addr_equal(skb->data + offset, rfc1042_header)) {
+ dev_kfree_skb(skb);
+ return;
+ }
+ }
__skb_queue_tail(&dev->rx_skb[q], skb);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 86341d1f82f3..d20f05a7717d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -510,7 +510,6 @@ void mt7615_init_device(struct mt7615_dev *dev)
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
spin_lock_init(&dev->pm.txq_lock);
- set_bit(MT76_STATE_PM, &dev->mphy.state);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work);
INIT_DELAYED_WORK(&dev->coredump.work, mt7615_coredump_work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index f81a17d56008..e2dcfee6be81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -1912,8 +1912,9 @@ void mt7615_pm_wake_work(struct work_struct *work)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
- MT7615_WATCHDOG_TIME);
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7615_WATCHDOG_TIME);
}
ieee80211_wake_queues(mphy->hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index 17fe4187d1de..d1be78b0711c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -51,16 +51,13 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
return ret;
}
-static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
u32 status;
int ret;
- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
- goto out;
-
sdio_claim_host(func);
sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
@@ -76,13 +73,21 @@ static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
}
sdio_release_host(func);
-
-out:
dev->pm.last_activity = jiffies;
return 0;
}
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+
+ if (test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ return __mt7663s_mcu_drv_pmctrl(dev);
+
+ return 0;
+}
+
static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
@@ -123,7 +128,7 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
struct mt7615_mcu_ops *mcu_ops;
int ret;
- ret = mt7663s_mcu_drv_pmctrl(dev);
+ ret = __mt7663s_mcu_drv_pmctrl(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
index be9a69fe1b38..f13d1b418742 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/soc.c
@@ -31,7 +31,6 @@ int mt7622_wmac_init(struct mt7615_dev *dev)
static int mt7622_wmac_probe(struct platform_device *pdev)
{
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *mem_base;
int irq;
@@ -39,7 +38,7 @@ static int mt7622_wmac_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- mem_base = devm_ioremap_resource(&pdev->dev, res);
+ mem_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mem_base))
return PTR_ERR(mem_base);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index c55698f9c49a..028ff432d811 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -55,10 +55,7 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
dev->mt76.mcu_ops = &mt7663u_mcu_ops,
- /* usb does not support runtime-pm */
- clear_bit(MT76_STATE_PM, &dev->mphy.state);
mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN);
-
if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
mt7615_mcu_restart(&dev->mt76);
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index fe0ab5e5ff81..619561606f96 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -721,6 +721,10 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
phy->rcpi = rcpi;
+ phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
+ sta->ht_cap.ampdu_factor) |
+ FIELD_PREP(IEEE80211_HT_AMPDU_PARM_DENSITY,
+ sta->ht_cap.ampdu_density);
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 5847f943e8da..b795e7245c07 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
.reconfig_complete = mt76x02_reconfig_complete,
};
-static int mt76x0e_register_device(struct mt76x02_dev *dev)
+static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume)
{
int err;
@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
if (err < 0)
return err;
- err = mt76x02_dma_init(dev);
- if (err < 0)
- return err;
+ if (!resume) {
+ err = mt76x02_dma_init(dev);
+ if (err < 0)
+ return err;
+ }
err = mt76x0_init_hardware(dev);
if (err < 0)
@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev)
mt76_clear(dev, 0x110, BIT(9));
mt76_set(dev, MT_MAX_LEN_CFG, BIT(13));
+ return 0;
+}
+
+static int mt76x0e_register_device(struct mt76x02_dev *dev)
+{
+ int err;
+
+ err = mt76x0e_init_hardware(dev, false);
+ if (err < 0)
+ return err;
+
err = mt76x0_register_device(dev);
if (err < 0)
return err;
@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
+ mt76_pci_disable_aspm(pdev);
+
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops,
&drv_ops);
if (!mdev)
@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev)
mt76_free_device(mdev);
}
+#ifdef CONFIG_PM
+static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int i;
+
+ mt76_worker_disable(&mdev->tx_worker);
+ for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++)
+ mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true);
+ for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++)
+ mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true);
+ napi_disable(&mdev->tx_napi);
+
+ mt76_for_each_q_rx(mdev, i)
+ napi_disable(&mdev->napi[i]);
+
+ mt76x02_dma_disable(dev);
+ mt76x02_mcu_cleanup(dev);
+ mt76x0_chip_onoff(dev, false, false);
+
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), true);
+ pci_save_state(pdev);
+
+ return pci_set_power_state(pdev, pci_choose_state(pdev, state));
+}
+
+static int mt76x0e_resume(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev = pci_get_drvdata(pdev);
+ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
+ int err, i;
+
+ err = pci_set_power_state(pdev, PCI_D0);
+ if (err)
+ return err;
+
+ pci_restore_state(pdev);
+
+ mt76_worker_enable(&mdev->tx_worker);
+
+ mt76_for_each_q_rx(mdev, i) {
+ mt76_queue_rx_reset(dev, i);
+ napi_enable(&mdev->napi[i]);
+ napi_schedule(&mdev->napi[i]);
+ }
+
+ napi_enable(&mdev->tx_napi);
+ napi_schedule(&mdev->tx_napi);
+
+ return mt76x0e_init_hardware(dev, true);
+}
+#endif /* CONFIG_PM */
+
static const struct pci_device_id mt76x0e_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7610) },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7630) },
@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = {
.id_table = mt76x0e_device_table,
.probe = mt76x0e_probe,
.remove = mt76x0e_remove,
+#ifdef CONFIG_PM
+ .suspend = mt76x0e_suspend,
+ .resume = mt76x0e_resume,
+#endif /* CONFIG_PM */
};
module_pci_driver(mt76x0e_driver);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index fe28bf4050c4..1763ea0614ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -76,8 +76,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
struct wiphy *wiphy = hw->wiphy;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
+ hw->max_rx_aggregation_subframes = 64;
+ hw->max_tx_aggregation_subframes = 128;
hw->radiotap_timestamp.units_pos =
IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 214bd1859792..decf2d5f0ce3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -1404,8 +1404,9 @@ void mt7921_pm_wake_work(struct work_struct *work)
napi_schedule(&dev->mt76.napi[i]);
mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
mt7921_tx_cleanup(dev);
- ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
- MT7921_WATCHDOG_TIME);
+ if (test_bit(MT76_STATE_RUNNING, &mphy->state))
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7921_WATCHDOG_TIME);
}
ieee80211_wake_queues(mphy->hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index f4c27aa41048..97a0ef331ac3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -74,8 +74,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
else if (band == NL80211_BAND_5GHZ)
he_cap_elem->phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
he_cap_elem->phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 5f3d56d570a5..67dc4b4cc094 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -402,20 +402,22 @@ static void
mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
u16 wlan_idx)
{
- struct mt7921_mcu_wlan_info_event *wtbl_info =
- (struct mt7921_mcu_wlan_info_event *)(skb->data);
- struct rate_info rate = {};
- u8 curr_idx = wtbl_info->rate_info.rate_idx;
- u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]);
- struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap;
+ struct mt7921_mcu_wlan_info_event *wtbl_info;
struct mt76_phy *mphy = &dev->mphy;
struct mt7921_sta_stats *stats;
+ struct rate_info rate = {};
struct mt7921_sta *msta;
struct mt76_wcid *wcid;
+ u8 idx;
if (wlan_idx >= MT76_N_WCIDS)
return;
+ wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data;
+ idx = wtbl_info->rate_info.rate_idx;
+ if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate))
+ return;
+
rcu_read_lock();
wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
@@ -426,7 +428,8 @@ mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb,
stats = &msta->stats;
/* current rate */
- mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr);
+ mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate,
+ le16_to_cpu(wtbl_info->rate_info.rate[idx]));
stats->tx_rate = rate;
out:
rcu_read_unlock();
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 1472e9843896..8e9aaf03a6fa 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -164,7 +164,7 @@ static int wilc_bus_probe(struct spi_device *spi)
wilc->bus_data = spi_priv;
wilc->dev_irq_num = spi->irq;
- wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc_clk");
+ wilc->rtc_clk = devm_clk_get(&spi->dev, "rtc");
if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
kfree(spi_priv);
return -EPROBE_DEFER;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 5264b0a1f098..deddb0afd312 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1037,7 +1037,7 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi,
* FIXME: if we do not find matching entry, we tell that frame was
* posted without any retries. We need to find a way to fix that
* and provide retry count.
- */
+ */
if (unlikely((aggr == 1 && ampdu == 0 && real_mcs != mcs)) || !match) {
rt2800_rate_from_status(skbdesc, status, rt2x00dev->curr_band);
mcs = real_mcs;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index d4d389e8f1b4..fb1d31b2d52a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -446,8 +446,9 @@ static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev,
* Beacons and probe responses require the tsf timestamp
* to be inserted into the frame.
*/
- if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ if ((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ !(tx_info->flags & IEEE80211_TX_CTL_INJECTED))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) &&
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 2a7ee90a3f54..ffd150ec181f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -440,9 +440,14 @@ static void rtl_watchdog_wq_callback(struct work_struct *work);
static void rtl_fwevt_wq_callback(struct work_struct *work);
static void rtl_c2hcmd_wq_callback(struct work_struct *work);
-static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
+static int _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct workqueue_struct *wq;
+
+ wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
+ if (!wq)
+ return -ENOMEM;
/* <1> timer */
timer_setup(&rtlpriv->works.watchdog_timer,
@@ -451,11 +456,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_easy_concurrent_retrytimer_callback, 0);
/* <2> work queue */
rtlpriv->works.hw = hw;
- rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
- if (unlikely(!rtlpriv->works.rtl_wq)) {
- pr_err("Failed to allocate work queue\n");
- return;
- }
+ rtlpriv->works.rtl_wq = wq;
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
rtl_watchdog_wq_callback);
@@ -466,6 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
rtl_swlps_rfon_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
+ return 0;
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -564,9 +566,7 @@ int rtl_init_core(struct ieee80211_hw *hw)
rtlmac->link_state = MAC80211_NOLINK;
/* <6> init deferred work */
- _rtl_init_deferred_work(hw);
-
- return 0;
+ return _rtl_init_deferred_work(hw);
}
EXPORT_SYMBOL_GPL(rtl_init_core);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 447caa4aad32..c5b8df58d4a2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -1721,10 +1721,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 14);
coex_dm->ps_tdma_du_adj_type = 14;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 15);
- coex_dm->ps_tdma_du_adj_type = 15;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 15);
@@ -1739,10 +1735,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 10);
coex_dm->ps_tdma_du_adj_type = 10;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 11);
- coex_dm->ps_tdma_du_adj_type = 11;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 11);
@@ -1759,10 +1751,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 6);
coex_dm->ps_tdma_du_adj_type = 6;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 7);
- coex_dm->ps_tdma_du_adj_type = 7;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 7);
@@ -1777,10 +1765,6 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 2);
coex_dm->ps_tdma_du_adj_type = 2;
- } else if (max_interval == 3) {
- btc8821a2ant_ps_tdma(btcoexist,
- NORMAL_EXEC, true, 3);
- coex_dm->ps_tdma_du_adj_type = 3;
} else {
btc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 3);
@@ -2810,6 +2794,7 @@ static void btc8821a2ant_action_a2dp(struct btc_coexist *btcoexist)
0x4);
}
+ /* preserve identical branches for further fine-tuning */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
@@ -2944,6 +2929,7 @@ static void btc8821a2ant_action_pan_edr(struct btc_coexist *btcoexist)
0x4);
}
+ /* preserve identical branches for further fine-tuning */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH))
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 26);
@@ -3132,6 +3118,7 @@ static void btc8821a2ant_action_pan_edr_hid(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_BW, &wifi_bw);
+ /* preserve identical branches for further fine-tuning */
if (wifi_bw == BTC_WIFI_BW_LEGACY) {
/* for HID at 11b/g mode */
btc8821a2ant_coex_table(btcoexist, NORMAL_EXEC, 0x55ff55ff,
@@ -3321,6 +3308,7 @@ static void btc8821a2ant_action_hid_a2dp(struct btc_coexist *btcoexist)
0x4);
}
+ /* preserve identical branches for further fine-tuning */
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 7aa28da39409..7a0355dc6bab 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -167,7 +167,7 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
u32 ul_command;
u32 ul_content;
- u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
+ u32 ul_enc_algo;
switch (rtlpriv->sec.pairwise_enc_algorithm) {
case WEP40_ENCRYPTION:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index 8d2c6d8d32d9..4ff0d4118193 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -112,7 +112,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
}
/**
- * writeLLT - LLT table write access
+ * rtl92c_llt_write - LLT table write access
* @hw: Pointer to the ieee80211_hw structure.
* @address: LLT logical address.
* @data: LLT data content
@@ -144,7 +144,7 @@ bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
}
/**
- * rtl92c_init_LLT_table - Init LLT table
+ * rtl92c_init_llt_table - Init LLT table
* @hw: Pointer to the ieee80211_hw structure.
* @boundary: Page boundary.
*
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index 38034102aacb..e474b4ec17f3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -513,7 +513,7 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
/* This bit indicate this packet is used for FW download. */
if (tcb_desc->cmd_or_init == DESC_PACKET_TYPE_INIT) {
- /* For firmware downlaod we only need to set LINIP */
+ /* For firmware download we only need to set LINIP */
set_tx_desc_linip(pdesc, tcb_desc->last_inipkt);
/* 92SE must set as 1 for firmware download HW DMA error */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index f8a1de6e9849..c98f2216734f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -915,7 +915,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- bool rtstatus = true;
+ bool rtstatus;
int err;
u8 tmp_u1b;
unsigned long flags;
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 9fe77556858e..63ce2443f136 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1036,14 +1036,11 @@ static bool is_associated(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
u8 bssid[ETH_ALEN];
- int ret;
if (!priv->radio_on)
return false;
- ret = get_bssid(usbdev, bssid);
-
- return (ret == 0 && !is_zero_ether_addr(bssid));
+ return (get_bssid(usbdev, bssid) == 0 && !is_zero_ether_addr(bssid));
}
static int disassociate(struct usbnet *usbdev, bool reset_ssid)
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index ce9892152f4d..99b21a2c8386 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
- (common->secinfo.security_enable)) {
+ info->control.hw_key) {
if (rsi_is_cipher_wep(common))
ieee80211_size += 4;
else
@@ -470,9 +470,9 @@ int rsi_prepare_beacon(struct rsi_common *common, struct sk_buff *skb)
}
if (common->band == NL80211_BAND_2GHZ)
- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_1);
+ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_1);
else
- bcn_frm->bbp_info |= cpu_to_le16(RSI_RATE_6);
+ bcn_frm->rate_info |= cpu_to_le16(RSI_RATE_6);
if (mac_bcn->data[tim_offset + 2] == 0)
bcn_frm->frame_info |= cpu_to_le16(RSI_DATA_DESC_DTIM_BEACON);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 16025300cddb..b66975f54567 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -837,6 +837,23 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
common->cqm_info.rssi_hyst);
}
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ rsi_dbg(INFO_ZONE, "%s: Changed Beacon interval: %d\n",
+ __func__, bss_conf->beacon_int);
+ if (common->beacon_interval != bss->beacon_int) {
+ common->beacon_interval = bss->beacon_int;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ struct vif_priv *vif_info = (struct vif_priv *)vif->drv_priv;
+
+ rsi_set_vap_capabilities(common, RSI_OPMODE_AP,
+ vif->addr, vif_info->vap_id,
+ VAP_UPDATE);
+ }
+ }
+ adapter->ps_info.listen_interval =
+ bss->beacon_int * adapter->ps_info.num_bcns_per_lis_int;
+ }
+
if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO))) {
@@ -1028,7 +1045,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
switch (cmd) {
case SET_KEY:
- secinfo->security_enable = true;
status = rsi_hal_key_config(hw, vif, key, sta);
if (status) {
mutex_unlock(&common->mutex);
@@ -1047,8 +1063,6 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
break;
case DISABLE_KEY:
- if (vif->type == NL80211_IFTYPE_STATION)
- secinfo->security_enable = false;
rsi_dbg(ERR_ZONE, "%s: RSI del key\n", __func__);
memset(key, 0, sizeof(struct ieee80211_key_conf));
status = rsi_hal_key_config(hw, vif, key, sta);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 33c76d39a8e9..891fd5f0fa76 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1547,8 +1547,8 @@ static int rsi_eeprom_read(struct rsi_common *common)
}
/**
- * This function sends a frame to block/unblock
- * data queues in the firmware
+ * rsi_send_block_unblock_frame() - This function sends a frame to block/unblock
+ * data queues in the firmware
*
* @common: Pointer to the driver private structure.
* @block_event: Event block if true, unblock if false
@@ -1803,8 +1803,7 @@ int rsi_send_wowlan_request(struct rsi_common *common, u16 flags,
RSI_WIFI_MGMT_Q);
cmd_frame->desc.desc_dword0.frame_type = WOWLAN_CONFIG_PARAMS;
cmd_frame->host_sleep_status = sleep_status;
- if (common->secinfo.security_enable &&
- common->secinfo.gtk_cipher)
+ if (common->secinfo.gtk_cipher)
flags |= RSI_WOW_GTK_REKEY;
if (sleep_status)
cmd_frame->wow_flags = flags;
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index a1065e5a92b4..0f535850a383 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -151,7 +151,6 @@ enum edca_queue {
};
struct security_info {
- bool security_enable;
u32 ptk_cipher;
u32 gtk_cipher;
};
diff --git a/drivers/net/wireless/st/cw1200/cw1200_sdio.c b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
index b65ec14136c7..4c30b5772ce0 100644
--- a/drivers/net/wireless/st/cw1200/cw1200_sdio.c
+++ b/drivers/net/wireless/st/cw1200/cw1200_sdio.c
@@ -53,6 +53,7 @@ static const struct sdio_device_id cw1200_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) },
{ /* end: all zeroes */ },
};
+MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids);
/* hwbus_ops implemetation */
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 988581cc134b..1f856fbbc0ea 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -75,30 +75,27 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
return -EINVAL;
- /* will be unlocked in cw1200_scan_work() */
- down(&priv->scan.lock);
- mutex_lock(&priv->conf_mutex);
-
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
- if (!frame.skb) {
- mutex_unlock(&priv->conf_mutex);
- up(&priv->scan.lock);
+ if (!frame.skb)
return -ENOMEM;
- }
if (req->ie_len)
skb_put_data(frame.skb, req->ie, req->ie_len);
+ /* will be unlocked in cw1200_scan_work() */
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
ret = wsm_set_template_frame(priv, &frame);
if (!ret) {
/* Host want to be the probe responder. */
ret = wsm_set_probe_responder(priv, true);
}
if (ret) {
- dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
up(&priv->scan.lock);
+ dev_kfree_skb(frame.skb);
return ret;
}
@@ -120,8 +117,8 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
+ dev_kfree_skb(frame.skb);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
}
diff --git a/drivers/net/wireless/ti/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index 498c8db2eb48..c3be81dc7970 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
@@ -12,7 +12,7 @@
#include "acx.h"
/**
- * send command to firmware
+ * wl1251_cmd_send - Send command to firmware
*
* @wl: wl struct
* @id: command id
@@ -59,7 +59,7 @@ out:
}
/**
- * send test command to firmware
+ * wl1251_cmd_test - Send test command to firmware
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
@@ -100,7 +100,7 @@ int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer)
}
/**
- * read acx from firmware
+ * wl1251_cmd_interrogate - Read acx from firmware
*
* @wl: wl struct
* @id: acx id
@@ -138,7 +138,7 @@ out:
}
/**
- * write acx value to firmware
+ * wl1251_cmd_configure - Write acx value to firmware
*
* @wl: wl struct
* @id: acx id
@@ -454,9 +454,12 @@ int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
cmd->channels[i].channel = channels[i]->hw_value;
}
- cmd->params.ssid_len = ssid_len;
- if (ssid)
- memcpy(cmd->params.ssid, ssid, ssid_len);
+ if (ssid) {
+ int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN);
+
+ cmd->params.ssid_len = len;
+ memcpy(cmd->params.ssid, ssid, len);
+ }
ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index 9d7dbfe7fe0c..c6da0cfb4afb 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1503,6 +1503,13 @@ static int wl12xx_get_fuse_mac(struct wl1271 *wl)
u32 mac1, mac2;
int ret;
+ /* Device may be in ELP from the bootloader or kexec */
+ ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ if (ret < 0)
+ goto out;
+
+ usleep_range(500000, 700000);
+
ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
if (ret < 0)
goto out;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 32a2e27cc561..8b798b5fcaf5 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -821,7 +821,7 @@ out:
/**
- * send test command to firmware
+ * wl1271_cmd_test - send test command to firmware
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
@@ -850,7 +850,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
EXPORT_SYMBOL_GPL(wl1271_cmd_test);
/**
- * read acx from firmware
+ * wl1271_cmd_interrogate - read acx from firmware
*
* @wl: wl struct
* @id: acx id
@@ -879,7 +879,7 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
}
/**
- * write acx value to firmware
+ * wlcore_cmd_configure_failsafe - write acx value to firmware
*
* @wl: wl struct
* @id: acx id
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index a68bbadae043..46ab69eab26a 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -29,18 +29,20 @@ int wlcore_event_fw_logger(struct wl1271 *wl)
u8 *buffer;
u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
- u32 end_buff_addr = WL18XX_LOGGER_SDIO_BUFF_ADDR +
- WL18XX_LOGGER_BUFF_OFFSET;
+ u32 addr_ptr;
+ u32 buff_start_ptr;
+ u32 buff_read_ptr;
+ u32 buff_end_ptr;
u32 available_len;
u32 actual_len;
- u32 clear_addr;
+ u32 clear_ptr;
size_t len;
u32 start_loc;
buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
if (!buffer) {
wl1271_error("Fail to allocate fw logger memory");
- fw_log.actual_buff_size = cpu_to_le32(0);
+ actual_len = 0;
goto out;
}
@@ -49,51 +51,58 @@ int wlcore_event_fw_logger(struct wl1271 *wl)
if (ret < 0) {
wl1271_error("Fail to read logger buffer, error_id = %d",
ret);
- fw_log.actual_buff_size = cpu_to_le32(0);
+ actual_len = 0;
goto free_out;
}
memcpy(&fw_log, buffer, sizeof(fw_log));
- if (le32_to_cpu(fw_log.actual_buff_size) == 0)
+ actual_len = le32_to_cpu(fw_log.actual_buff_size);
+ if (actual_len == 0)
goto free_out;
- actual_len = le32_to_cpu(fw_log.actual_buff_size);
- start_loc = (le32_to_cpu(fw_log.buff_read_ptr) -
- internal_fw_addrbase) - addr;
- end_buff_addr += le32_to_cpu(fw_log.max_buff_size);
- available_len = end_buff_addr -
- (le32_to_cpu(fw_log.buff_read_ptr) -
- internal_fw_addrbase);
- actual_len = min(actual_len, available_len);
- len = actual_len;
+ /* Calculate the internal pointer to the fwlog structure */
+ addr_ptr = internal_fw_addrbase + addr;
+
+ /* Calculate the internal pointers to the start and end of log buffer */
+ buff_start_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET;
+ buff_end_ptr = buff_start_ptr + le32_to_cpu(fw_log.max_buff_size);
+ /* Read the read pointer and validate it */
+ buff_read_ptr = le32_to_cpu(fw_log.buff_read_ptr);
+ if (buff_read_ptr < buff_start_ptr ||
+ buff_read_ptr >= buff_end_ptr) {
+ wl1271_error("buffer read pointer out of bounds: %x not in (%x-%x)\n",
+ buff_read_ptr, buff_start_ptr, buff_end_ptr);
+ goto free_out;
+ }
+
+ start_loc = buff_read_ptr - addr_ptr;
+ available_len = buff_end_ptr - buff_read_ptr;
+
+ /* Copy initial part up to the end of ring buffer */
+ len = min(actual_len, available_len);
wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
- clear_addr = addr + start_loc + le32_to_cpu(fw_log.actual_buff_size) +
- internal_fw_addrbase;
+ clear_ptr = addr_ptr + start_loc + actual_len;
+ if (clear_ptr == buff_end_ptr)
+ clear_ptr = buff_start_ptr;
- len = le32_to_cpu(fw_log.actual_buff_size) - len;
+ /* Copy any remaining part from beginning of ring buffer */
+ len = actual_len - len;
if (len) {
wl12xx_copy_fwlog(wl,
&buffer[WL18XX_LOGGER_BUFF_OFFSET],
len);
- clear_addr = addr + WL18XX_LOGGER_BUFF_OFFSET + len +
- internal_fw_addrbase;
- }
-
- /* double check that clear address and write pointer are the same */
- if (clear_addr != le32_to_cpu(fw_log.buff_write_ptr)) {
- wl1271_error("Calculate of clear addr Clear = %x, write = %x",
- clear_addr, le32_to_cpu(fw_log.buff_write_ptr));
+ clear_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET + len;
}
- /* indicate FW about Clear buffer */
+ /* Update the read pointer */
ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
- fw_log.buff_write_ptr);
+ clear_ptr);
free_out:
kfree(buffer);
out:
- return le32_to_cpu(fw_log.actual_buff_size);
+ return actual_len;
}
EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 8509b989940c..e500b8405f8f 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -3242,8 +3242,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
* the firmware filters so that all multicast packets are passed
* This is mandatory for MDNS based discovery protocols
*/
- if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
- if (*total & FIF_ALLMULTI) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI) {
ret = wl1271_acx_group_address_tbl(wl, wlvif,
false,
NULL, 0);
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index 5cf0379b88b6..35b535c125b6 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -12,9 +12,9 @@
#include "debug.h"
#include "sysfs.h"
-static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t bt_coex_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
@@ -30,9 +30,9 @@ static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
}
-static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bt_coex_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct wl1271 *wl = dev_get_drvdata(dev);
unsigned long res;
@@ -71,13 +71,11 @@ static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
return count;
}
-static DEVICE_ATTR(bt_coex_state, 0644,
- wl1271_sysfs_show_bt_coex_state,
- wl1271_sysfs_store_bt_coex_state);
+static DEVICE_ATTR_RW(bt_coex_state);
-static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t hw_pg_ver_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
@@ -94,7 +92,7 @@ static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
return len;
}
-static DEVICE_ATTR(hw_pg_ver, 0444, wl1271_sysfs_show_hw_pg_ver, NULL);
+static DEVICE_ATTR_RO(hw_pg_ver);
static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 5c4cd0e1adeb..a7ceef10bf6a 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1544,14 +1544,14 @@ static int __init usb_init(void)
zd_workqueue = create_singlethread_workqueue(driver.name);
if (zd_workqueue == NULL) {
- printk(KERN_ERR "%s couldn't create workqueue\n", driver.name);
+ pr_err("%s couldn't create workqueue\n", driver.name);
return -ENOMEM;
}
r = usb_register(&driver);
if (r) {
destroy_workqueue(zd_workqueue);
- printk(KERN_ERR "%s usb_register() failed. Error number %d\n",
+ pr_err("%s usb_register() failed. Error number %d\n",
driver.name, r);
return r;
}
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 7ad1920120bc..249b3f1ed62b 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -3,15 +3,9 @@
# Wireless WAN device configuration
#
-menuconfig WWAN
- bool "Wireless WAN"
- help
- This section contains Wireless WAN configuration for WWAN framework
- and drivers.
-
-if WWAN
+menu "Wireless WAN"
-config WWAN_CORE
+config WWAN
tristate "WWAN Driver Core"
help
Say Y here if you want to use the WWAN driver core. This driver
@@ -20,9 +14,19 @@ config WWAN_CORE
To compile this driver as a module, choose M here: the module will be
called wwan.
+if WWAN
+
+config WWAN_HWSIM
+ tristate "Simulated WWAN device"
+ help
+ This driver is a developer testing tool that can be used to test WWAN
+ framework.
+
+ To compile this driver as a module, choose M here: the module will be
+ called wwan_hwsim. If unsure, say N.
+
config MHI_WWAN_CTRL
tristate "MHI WWAN control driver for QCOM-based PCIe modems"
- select WWAN_CORE
depends on MHI_BUS
help
MHI WWAN CTRL allows QCOM-based PCIe modems to expose different modem
@@ -34,4 +38,17 @@ config MHI_WWAN_CTRL
To compile this driver as a module, choose M here: the module will be
called mhi_wwan_ctrl.
+config IOSM
+ tristate "IOSM Driver for Intel M.2 WWAN Device"
+ depends on INTEL_IOMMU
+ help
+ This driver enables Intel M.2 WWAN Device communication.
+
+ If you have one of those Intel M.2 WWAN Modules and wish to use it in
+ Linux say Y/M here.
+
+ If unsure, say N.
+
endif # WWAN
+
+endmenu
diff --git a/drivers/net/wwan/Makefile b/drivers/net/wwan/Makefile
index 556cd90958ca..83dd3482ffc3 100644
--- a/drivers/net/wwan/Makefile
+++ b/drivers/net/wwan/Makefile
@@ -3,7 +3,10 @@
# Makefile for the Linux WWAN device drivers.
#
-obj-$(CONFIG_WWAN_CORE) += wwan.o
+obj-$(CONFIG_WWAN) += wwan.o
wwan-objs += wwan_core.o
+obj-$(CONFIG_WWAN_HWSIM) += wwan_hwsim.o
+
obj-$(CONFIG_MHI_WWAN_CTRL) += mhi_wwan_ctrl.o
+obj-$(CONFIG_IOSM) += iosm/
diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile
new file mode 100644
index 000000000000..4f9f0ae398e1
--- /dev/null
+++ b/drivers/net/wwan/iosm/Makefile
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: (GPL-2.0-only)
+#
+# Copyright (C) 2020-21 Intel Corporation.
+#
+
+iosm-y = \
+ iosm_ipc_task_queue.o \
+ iosm_ipc_imem.o \
+ iosm_ipc_imem_ops.o \
+ iosm_ipc_mmio.o \
+ iosm_ipc_port.o \
+ iosm_ipc_wwan.o \
+ iosm_ipc_uevent.o \
+ iosm_ipc_pm.o \
+ iosm_ipc_pcie.o \
+ iosm_ipc_irq.o \
+ iosm_ipc_chnl_cfg.o \
+ iosm_ipc_protocol.o \
+ iosm_ipc_protocol_ops.o \
+ iosm_ipc_mux.o \
+ iosm_ipc_mux_codec.o
+
+obj-$(CONFIG_IOSM) := iosm.o
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
new file mode 100644
index 000000000000..804e6c4f2c78
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/wwan.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+
+/* Max. sizes of a downlink buffers */
+#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024)
+#define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048
+#define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024)
+#define IPC_MEM_MAX_DL_MBIM_BUF_SIZE IPC_MEM_MAX_DL_RPC_BUF_SIZE
+
+/* Max. transfer descriptors for a pipe. */
+#define IPC_MEM_MAX_TDS_FLASH_DL 3
+#define IPC_MEM_MAX_TDS_FLASH_UL 6
+#define IPC_MEM_MAX_TDS_AT 4
+#define IPC_MEM_MAX_TDS_RPC 4
+#define IPC_MEM_MAX_TDS_MBIM IPC_MEM_MAX_TDS_RPC
+#define IPC_MEM_MAX_TDS_LOOPBACK 11
+
+/* Accumulation backoff usec */
+#define IRQ_ACC_BACKOFF_OFF 0
+
+/* MUX acc backoff 1ms */
+#define IRQ_ACC_BACKOFF_MUX 1000
+
+/* Modem channel configuration table
+ * Always reserve element zero for flash channel.
+ */
+static struct ipc_chnl_cfg modem_cfg[] = {
+ /* IP Mux */
+ { IPC_MEM_IP_CHL_ID_0, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
+ IPC_MEM_MAX_TDS_MUX_LITE_UL, IPC_MEM_MAX_TDS_MUX_LITE_DL,
+ IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE, WWAN_PORT_UNKNOWN },
+ /* RPC - 0 */
+ { IPC_MEM_CTRL_CHL_ID_1, IPC_MEM_PIPE_2, IPC_MEM_PIPE_3,
+ IPC_MEM_MAX_TDS_RPC, IPC_MEM_MAX_TDS_RPC,
+ IPC_MEM_MAX_DL_RPC_BUF_SIZE, WWAN_PORT_UNKNOWN },
+ /* IAT0 */
+ { IPC_MEM_CTRL_CHL_ID_2, IPC_MEM_PIPE_4, IPC_MEM_PIPE_5,
+ IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
+ WWAN_PORT_AT },
+ /* Trace */
+ { IPC_MEM_CTRL_CHL_ID_3, IPC_MEM_PIPE_6, IPC_MEM_PIPE_7,
+ IPC_MEM_TDS_TRC, IPC_MEM_TDS_TRC, IPC_MEM_MAX_DL_TRC_BUF_SIZE,
+ WWAN_PORT_UNKNOWN },
+ /* IAT1 */
+ { IPC_MEM_CTRL_CHL_ID_4, IPC_MEM_PIPE_8, IPC_MEM_PIPE_9,
+ IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_TDS_AT, IPC_MEM_MAX_DL_AT_BUF_SIZE,
+ WWAN_PORT_AT },
+ /* Loopback */
+ { IPC_MEM_CTRL_CHL_ID_5, IPC_MEM_PIPE_10, IPC_MEM_PIPE_11,
+ IPC_MEM_MAX_TDS_LOOPBACK, IPC_MEM_MAX_TDS_LOOPBACK,
+ IPC_MEM_MAX_DL_LOOPBACK_SIZE, WWAN_PORT_UNKNOWN },
+ /* MBIM Channel */
+ { IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13,
+ IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM,
+ IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM },
+};
+
+int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
+{
+ int array_size = ARRAY_SIZE(modem_cfg);
+
+ if (index >= array_size) {
+ pr_err("index: %d and array_size %d", index, array_size);
+ return -ECHRNG;
+ }
+
+ if (index == IPC_MEM_MUX_IP_CH_IF_ID)
+ chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_MUX;
+ else
+ chnl_cfg->accumulation_backoff = IRQ_ACC_BACKOFF_OFF;
+
+ chnl_cfg->ul_nr_of_entries = modem_cfg[index].ul_nr_of_entries;
+ chnl_cfg->dl_nr_of_entries = modem_cfg[index].dl_nr_of_entries;
+ chnl_cfg->dl_buf_size = modem_cfg[index].dl_buf_size;
+ chnl_cfg->id = modem_cfg[index].id;
+ chnl_cfg->ul_pipe = modem_cfg[index].ul_pipe;
+ chnl_cfg->dl_pipe = modem_cfg[index].dl_pipe;
+ chnl_cfg->wwan_port_type = modem_cfg[index].wwan_port_type;
+
+ return 0;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
new file mode 100644
index 000000000000..422471367f78
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation
+ */
+
+#ifndef IOSM_IPC_CHNL_CFG_H
+#define IOSM_IPC_CHNL_CFG_H
+
+#include "iosm_ipc_mux.h"
+
+/* Number of TDs on the trace channel */
+#define IPC_MEM_TDS_TRC 32
+
+/* Trace channel TD buffer size. */
+#define IPC_MEM_MAX_DL_TRC_BUF_SIZE 8192
+
+/* Channel ID */
+enum ipc_channel_id {
+ IPC_MEM_IP_CHL_ID_0 = 0,
+ IPC_MEM_CTRL_CHL_ID_1,
+ IPC_MEM_CTRL_CHL_ID_2,
+ IPC_MEM_CTRL_CHL_ID_3,
+ IPC_MEM_CTRL_CHL_ID_4,
+ IPC_MEM_CTRL_CHL_ID_5,
+ IPC_MEM_CTRL_CHL_ID_6,
+};
+
+/**
+ * struct ipc_chnl_cfg - IPC channel configuration structure
+ * @id: Interface ID
+ * @ul_pipe: Uplink datastream
+ * @dl_pipe: Downlink datastream
+ * @ul_nr_of_entries: Number of Transfer descriptor uplink pipe
+ * @dl_nr_of_entries: Number of Transfer descriptor downlink pipe
+ * @dl_buf_size: Downlink buffer size
+ * @wwan_port_type: Wwan subsystem port type
+ * @accumulation_backoff: Time in usec for data accumalation
+ */
+struct ipc_chnl_cfg {
+ u32 id;
+ u32 ul_pipe;
+ u32 dl_pipe;
+ u32 ul_nr_of_entries;
+ u32 dl_nr_of_entries;
+ u32 dl_buf_size;
+ u32 wwan_port_type;
+ u32 accumulation_backoff;
+};
+
+/**
+ * ipc_chnl_cfg_get - Get pipe configuration.
+ * @chnl_cfg: Array of ipc_chnl_cfg struct
+ * @index: Channel index (upto MAX_CHANNELS)
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
new file mode 100644
index 000000000000..9f00e36b7f79
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_port.h"
+
+/* Check the wwan ips if it is valid with Channel as input. */
+static int ipc_imem_check_wwan_ips(struct ipc_mem_channel *chnl)
+{
+ if (chnl)
+ return chnl->ctype == IPC_CTYPE_WWAN &&
+ chnl->if_id == IPC_MEM_MUX_IP_CH_IF_ID;
+ return false;
+}
+
+static int ipc_imem_msg_send_device_sleep(struct iosm_imem *ipc_imem, u32 state)
+{
+ union ipc_msg_prep_args prep_args = {
+ .sleep.target = 1,
+ .sleep.state = state,
+ };
+
+ ipc_imem->device_sleep = state;
+
+ return ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_SLEEP, &prep_args, NULL);
+}
+
+static bool ipc_imem_dl_skb_alloc(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ /* limit max. nr of entries */
+ if (pipe->nr_of_queued_entries >= pipe->max_nr_of_queued_entries)
+ return false;
+
+ return ipc_protocol_dl_td_prepare(ipc_imem->ipc_protocol, pipe);
+}
+
+/* This timer handler will retry DL buff allocation if a pipe has no free buf
+ * and gives doorbell if TD is available
+ */
+static int ipc_imem_tq_td_alloc_timer(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ bool new_buffers_available = false;
+ bool retry_allocation = false;
+ int i;
+
+ for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
+ struct ipc_pipe *pipe = &ipc_imem->channels[i].dl_pipe;
+
+ if (!pipe->is_open || pipe->nr_of_queued_entries > 0)
+ continue;
+
+ while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
+ new_buffers_available = true;
+
+ if (pipe->nr_of_queued_entries == 0)
+ retry_allocation = true;
+ }
+
+ if (new_buffers_available)
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_DL_PROCESS);
+
+ if (retry_allocation) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->td_alloc_timer))
+ hrtimer_start(&ipc_imem->td_alloc_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+ return 0;
+}
+
+static enum hrtimer_restart ipc_imem_td_alloc_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, td_alloc_timer);
+ /* Post an async tasklet event to trigger HP update Doorbell */
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_alloc_timer, 0, NULL,
+ 0, false);
+ return HRTIMER_NORESTART;
+}
+
+/* Fast update timer tasklet handler to trigger HP update */
+static int ipc_imem_tq_fast_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_FAST_TD_UPD_TMR);
+
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_fast_update_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, fast_update_timer);
+ /* Post an async tasklet event to trigger HP update Doorbell */
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_fast_update_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
+static int ipc_imem_setup_cp_mux_cap_init(struct iosm_imem *ipc_imem,
+ struct ipc_mux_config *cfg)
+{
+ ipc_mmio_update_cp_capability(ipc_imem->mmio);
+
+ if (!ipc_imem->mmio->has_mux_lite) {
+ dev_err(ipc_imem->dev, "Failed to get Mux capability.");
+ return -EINVAL;
+ }
+
+ cfg->protocol = MUX_LITE;
+
+ cfg->ul_flow = (ipc_imem->mmio->has_ul_flow_credit == 1) ?
+ MUX_UL_ON_CREDITS :
+ MUX_UL;
+
+ /* The instance ID is same as channel ID because this is been reused
+ * for channel alloc function.
+ */
+ cfg->instance_id = IPC_MEM_MUX_IP_CH_IF_ID;
+ cfg->nr_sessions = IPC_MEM_MUX_IP_SESSION_ENTRIES;
+
+ return 0;
+}
+
+void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
+ unsigned int reset_enable, bool atomic_ctx)
+{
+ union ipc_msg_prep_args prep_args = { .feature_set.reset_enable =
+ reset_enable };
+
+ if (atomic_ctx)
+ ipc_protocol_tq_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_FEATURE_SET, &prep_args,
+ NULL);
+ else
+ ipc_protocol_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_FEATURE_SET, &prep_args);
+}
+
+void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem)
+{
+ /* Use the TD update timer only in the runtime phase */
+ if (!ipc_imem->enter_runtime || ipc_imem->td_update_timer_suspended) {
+ /* trigger the doorbell irq on CP directly. */
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_TD_UPD_TMR_START);
+ return;
+ }
+
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer))
+ hrtimer_start(&ipc_imem->tdupdate_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
+void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer)
+{
+ if (hrtimer_active(hr_timer))
+ hrtimer_cancel(hr_timer);
+}
+
+bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+ struct sk_buff_head *ul_list;
+ bool hpda_pending = false;
+ bool forced_hpdu = false;
+ struct ipc_pipe *pipe;
+ int i;
+
+ /* Analyze the uplink pipe of all active channels. */
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ channel = &ipc_imem->channels[i];
+
+ if (channel->state != IMEM_CHANNEL_ACTIVE)
+ continue;
+
+ pipe = &channel->ul_pipe;
+
+ /* Get the reference to the skbuf accumulator list. */
+ ul_list = &channel->ul_list;
+
+ /* Fill the transfer descriptor with the uplink buffer info. */
+ hpda_pending |= ipc_protocol_ul_td_send(ipc_imem->ipc_protocol,
+ pipe, ul_list);
+
+ /* forced HP update needed for non data channels */
+ if (hpda_pending && !ipc_imem_check_wwan_ips(channel))
+ forced_hpdu = true;
+ }
+
+ if (forced_hpdu) {
+ hpda_pending = false;
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_UL_WRITE_TD);
+ }
+
+ return hpda_pending;
+}
+
+void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem)
+{
+ int timeout = IPC_MODEM_BOOT_TIMEOUT;
+
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
+
+ /* Trigger the CP interrupt to enter the init state. */
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_INIT);
+ /* Wait for the CP update. */
+ do {
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ ipc_imem->ipc_requested_state) {
+ /* Prepare the MMIO space */
+ ipc_mmio_config(ipc_imem->mmio);
+
+ /* Trigger the CP irq to enter the running state. */
+ ipc_imem->ipc_requested_state =
+ IPC_MEM_DEVICE_IPC_RUNNING;
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_RUNNING);
+
+ return;
+ }
+ msleep(20);
+ } while (--timeout);
+
+ /* timeout */
+ dev_err(ipc_imem->dev, "%s: ipc_status(%d) ne. IPC_MEM_DEVICE_IPC_INIT",
+ ipc_imem_phase_get_string(ipc_imem->phase),
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_TIMEOUT);
+}
+
+/* Analyze the packet type and distribute it. */
+static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe, struct sk_buff *skb)
+{
+ u16 port_id;
+
+ if (!skb)
+ return;
+
+ /* An AT/control or IP packet is expected. */
+ switch (pipe->channel->ctype) {
+ case IPC_CTYPE_CTRL:
+ port_id = pipe->channel->channel_id;
+
+ /* Pass the packet to the wwan layer. */
+ wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb);
+ break;
+
+ case IPC_CTYPE_WWAN:
+ if (pipe->channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
+ ipc_mux_dl_decode(ipc_imem->mux, skb);
+ break;
+ default:
+ dev_err(ipc_imem->dev, "Invalid channel type");
+ break;
+ }
+}
+
+/* Process the downlink data and pass them to the char or net layer. */
+static void ipc_imem_dl_pipe_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ s32 cnt = 0, processed_td_cnt = 0;
+ struct ipc_mem_channel *channel;
+ u32 head = 0, tail = 0;
+ bool processed = false;
+ struct sk_buff *skb;
+
+ channel = pipe->channel;
+
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
+ &tail);
+ if (pipe->old_tail != tail) {
+ if (pipe->old_tail < tail)
+ cnt = tail - pipe->old_tail;
+ else
+ cnt = pipe->nr_of_entries - pipe->old_tail + tail;
+ }
+
+ processed_td_cnt = cnt;
+
+ /* Seek for pipes with pending DL data. */
+ while (cnt--) {
+ skb = ipc_protocol_dl_td_process(ipc_imem->ipc_protocol, pipe);
+
+ /* Analyze the packet type and distribute it. */
+ ipc_imem_dl_skb_process(ipc_imem, pipe, skb);
+ }
+
+ /* try to allocate new empty DL SKbs from head..tail - 1*/
+ while (ipc_imem_dl_skb_alloc(ipc_imem, pipe))
+ processed = true;
+
+ if (processed && !ipc_imem_check_wwan_ips(channel)) {
+ /* Force HP update for non IP channels */
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_DL_PROCESS);
+ processed = false;
+
+ /* If Fast Update timer is already running then stop */
+ ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
+ }
+
+ /* Any control channel process will get immediate HP update.
+ * Start Fast update timer only for IP channel if all the TDs were
+ * used in last process.
+ */
+ if (processed && (processed_td_cnt == pipe->nr_of_entries - 1)) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, FORCE_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ hrtimer_start(&ipc_imem->fast_update_timer,
+ ipc_imem->hrtimer_period, HRTIMER_MODE_REL);
+ }
+
+ if (ipc_imem->app_notify_dl_pend)
+ complete(&ipc_imem->dl_pend_sem);
+}
+
+/* process open uplink pipe */
+static void ipc_imem_ul_pipe_process(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_mem_channel *channel;
+ u32 tail = 0, head = 0;
+ struct sk_buff *skb;
+ s32 cnt = 0;
+
+ channel = pipe->channel;
+
+ /* Get the internal phase. */
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol, pipe, &head,
+ &tail);
+
+ if (pipe->old_tail != tail) {
+ if (pipe->old_tail < tail)
+ cnt = tail - pipe->old_tail;
+ else
+ cnt = pipe->nr_of_entries - pipe->old_tail + tail;
+ }
+
+ /* Free UL buffers. */
+ while (cnt--) {
+ skb = ipc_protocol_ul_td_process(ipc_imem->ipc_protocol, pipe);
+
+ if (!skb)
+ continue;
+
+ /* If the user app was suspended in uplink direction - blocking
+ * write, resume it.
+ */
+ if (IPC_CB(skb)->op_type == UL_USR_OP_BLOCKED)
+ complete(&channel->ul_sem);
+
+ /* Free the skbuf element. */
+ if (IPC_CB(skb)->op_type == UL_MUX_OP_ADB) {
+ if (channel->if_id == IPC_MEM_MUX_IP_CH_IF_ID)
+ ipc_mux_ul_encoded_process(ipc_imem->mux, skb);
+ else
+ dev_err(ipc_imem->dev,
+ "OP Type is UL_MUX, unknown if_id %d",
+ channel->if_id);
+ } else {
+ ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
+ }
+ }
+
+ /* Trace channel stats for IP UL pipe. */
+ if (ipc_imem_check_wwan_ips(pipe->channel))
+ ipc_mux_check_n_restart_tx(ipc_imem->mux);
+
+ if (ipc_imem->app_notify_ul_pend)
+ complete(&ipc_imem->ul_pend_sem);
+}
+
+/* Executes the irq. */
+static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+
+ if (ipc_imem->flash_channel_id < 0) {
+ ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL;
+ dev_err(ipc_imem->dev, "Missing flash app:%d",
+ ipc_imem->flash_channel_id);
+ return;
+ }
+
+ ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
+
+ /* Wake up the flash app to continue or to terminate depending
+ * on the CP ROM exit code.
+ */
+ channel = &ipc_imem->channels[ipc_imem->flash_channel_id];
+ complete(&channel->ul_sem);
+}
+
+/* Execute the UL bundle timer actions, generating the doorbell irq. */
+static int ipc_imem_tq_td_update_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol,
+ IPC_HP_TD_UPD_TMR);
+ return 0;
+}
+
+/* Consider link power management in the runtime phase. */
+static void ipc_imem_slp_control_exec(struct iosm_imem *ipc_imem)
+{
+ /* link will go down, Test pending UL packets.*/
+ if (ipc_protocol_pm_dev_sleep_handle(ipc_imem->ipc_protocol) &&
+ hrtimer_active(&ipc_imem->tdupdate_timer)) {
+ /* Generate the doorbell irq. */
+ ipc_imem_tq_td_update_timer_cb(ipc_imem, 0, NULL, 0);
+ /* Stop the TD update timer. */
+ ipc_imem_hrtimer_stop(&ipc_imem->tdupdate_timer);
+ /* Stop the fast update timer. */
+ ipc_imem_hrtimer_stop(&ipc_imem->fast_update_timer);
+ }
+}
+
+/* Execute startup timer and wait for delayed start (e.g. NAND) */
+static int ipc_imem_tq_startup_timer_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ /* Update & check the current operation phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN)
+ return -EIO;
+
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_UNINIT) {
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_INIT;
+
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_INIT);
+
+ ipc_imem->hrtimer_period = ktime_set(0, 100 * 1000UL * 1000ULL);
+ /* reduce period to 100 ms to check for mmio init state */
+ if (!hrtimer_active(&ipc_imem->startup_timer))
+ hrtimer_start(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ } else if (ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_INIT) {
+ /* Startup complete - disable timer */
+ ipc_imem_hrtimer_stop(&ipc_imem->startup_timer);
+
+ /* Prepare the MMIO space */
+ ipc_mmio_config(ipc_imem->mmio);
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_RUNNING;
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_RUNNING);
+ }
+
+ return 0;
+}
+
+static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
+{
+ enum hrtimer_restart result = HRTIMER_NORESTART;
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, startup_timer);
+
+ if (ktime_to_ns(ipc_imem->hrtimer_period)) {
+ hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
+ ipc_imem->hrtimer_period);
+ result = HRTIMER_RESTART;
+ }
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_startup_timer_cb, 0,
+ NULL, 0, false);
+ return result;
+}
+
+/* Get the CP execution stage */
+static enum ipc_mem_exec_stage
+ipc_imem_get_exec_stage_buffered(struct iosm_imem *ipc_imem)
+{
+ return (ipc_imem->phase == IPC_P_RUN &&
+ ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) ?
+ ipc_protocol_get_ap_exec_stage(ipc_imem->ipc_protocol) :
+ ipc_mmio_get_exec_stage(ipc_imem->mmio);
+}
+
+/* Callback to send the modem ready uevent */
+static int ipc_imem_send_mdm_rdy_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ enum ipc_mem_exec_stage exec_stage =
+ ipc_imem_get_exec_stage_buffered(ipc_imem);
+
+ if (exec_stage == IPC_MEM_EXEC_STAGE_RUN)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
+
+ return 0;
+}
+
+/* This function is executed in a task context via an ipc_worker object,
+ * as the creation or removal of device can't be done from tasklet.
+ */
+static void ipc_imem_run_state_worker(struct work_struct *instance)
+{
+ struct ipc_chnl_cfg chnl_cfg_port = { 0 };
+ struct ipc_mux_config mux_cfg;
+ struct iosm_imem *ipc_imem;
+ u8 ctrl_chl_idx = 0;
+
+ ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
+
+ if (ipc_imem->phase != IPC_P_RUN) {
+ dev_err(ipc_imem->dev,
+ "Modem link down. Exit run state worker.");
+ return;
+ }
+
+ if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
+ ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+
+ ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
+ if (ipc_imem->mux)
+ ipc_imem->mux->wwan = ipc_imem->wwan;
+
+ while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
+ if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
+ ipc_imem->ipc_port[ctrl_chl_idx] = NULL;
+ if (chnl_cfg_port.wwan_port_type != WWAN_PORT_UNKNOWN) {
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
+ chnl_cfg_port,
+ IRQ_MOD_OFF);
+ ipc_imem->ipc_port[ctrl_chl_idx] =
+ ipc_port_init(ipc_imem, chnl_cfg_port);
+ }
+ }
+ ctrl_chl_idx++;
+ }
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_send_mdm_rdy_cb, 0, NULL, 0,
+ false);
+
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ set_bit(FULLY_FUNCTIONAL, &ipc_imem->flag);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+}
+
+static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
+{
+ enum ipc_mem_device_ipc_state curr_ipc_status;
+ enum ipc_phase old_phase, phase;
+ bool retry_allocation = false;
+ bool ul_pending = false;
+ int ch_id, i;
+
+ if (irq != IMEM_IRQ_DONT_CARE)
+ ipc_imem->ev_irq_pending[irq] = false;
+
+ /* Get the internal phase. */
+ old_phase = ipc_imem->phase;
+
+ if (old_phase == IPC_P_OFF_REQ) {
+ dev_dbg(ipc_imem->dev,
+ "[%s]: Ignoring MSI. Deinit sequence in progress!",
+ ipc_imem_phase_get_string(old_phase));
+ return;
+ }
+
+ /* Update the phase controlled by CP. */
+ phase = ipc_imem_phase_update(ipc_imem);
+
+ switch (phase) {
+ case IPC_P_RUN:
+ if (!ipc_imem->enter_runtime) {
+ /* Excute the transition from flash/boot to runtime. */
+ ipc_imem->enter_runtime = 1;
+
+ /* allow device to sleep, default value is
+ * IPC_HOST_SLEEP_ENTER_SLEEP
+ */
+ ipc_imem_msg_send_device_sleep(ipc_imem,
+ ipc_imem->device_sleep);
+
+ ipc_imem_msg_send_feature_set(ipc_imem,
+ IPC_MEM_INBAND_CRASH_SIG,
+ true);
+ }
+
+ curr_ipc_status =
+ ipc_protocol_get_ipc_status(ipc_imem->ipc_protocol);
+
+ /* check ipc_status change */
+ if (ipc_imem->ipc_status != curr_ipc_status) {
+ ipc_imem->ipc_status = curr_ipc_status;
+
+ if (ipc_imem->ipc_status ==
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ schedule_work(&ipc_imem->run_state_worker);
+ }
+ }
+
+ /* Consider power management in the runtime phase. */
+ ipc_imem_slp_control_exec(ipc_imem);
+ break; /* Continue with skbuf processing. */
+
+ /* Unexpected phases. */
+ case IPC_P_OFF:
+ case IPC_P_OFF_REQ:
+ dev_err(ipc_imem->dev, "confused phase %s",
+ ipc_imem_phase_get_string(phase));
+ return;
+
+ case IPC_P_PSI:
+ if (old_phase != IPC_P_ROM)
+ break;
+
+ fallthrough;
+ /* On CP the PSI phase is already active. */
+
+ case IPC_P_ROM:
+ /* Before CP ROM driver starts the PSI image, it sets
+ * the exit_code field on the doorbell scratchpad and
+ * triggers the irq.
+ */
+ ipc_imem_rom_irq_exec(ipc_imem);
+ return;
+
+ default:
+ break;
+ }
+
+ /* process message ring */
+ ipc_protocol_msg_process(ipc_imem, irq);
+
+ /* process all open pipes */
+ for (i = 0; i < IPC_MEM_MAX_CHANNELS; i++) {
+ struct ipc_pipe *ul_pipe = &ipc_imem->channels[i].ul_pipe;
+ struct ipc_pipe *dl_pipe = &ipc_imem->channels[i].dl_pipe;
+
+ if (dl_pipe->is_open &&
+ (irq == IMEM_IRQ_DONT_CARE || irq == dl_pipe->irq)) {
+ ipc_imem_dl_pipe_process(ipc_imem, dl_pipe);
+
+ if (dl_pipe->nr_of_queued_entries == 0)
+ retry_allocation = true;
+ }
+
+ if (ul_pipe->is_open)
+ ipc_imem_ul_pipe_process(ipc_imem, ul_pipe);
+ }
+
+ /* Try to generate new ADB or ADGH. */
+ if (ipc_mux_ul_data_encode(ipc_imem->mux))
+ ipc_imem_td_update_timer_start(ipc_imem);
+
+ /* Continue the send procedure with accumulated SIO or NETIF packets.
+ * Reset the debounce flags.
+ */
+ ul_pending |= ipc_imem_ul_write_td(ipc_imem);
+
+ /* if UL data is pending restart TD update timer */
+ if (ul_pending) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, TD_UPDATE_DEFAULT_TIMEOUT_USEC * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->tdupdate_timer))
+ hrtimer_start(&ipc_imem->tdupdate_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+
+ /* If CP has executed the transition
+ * from IPC_INIT to IPC_RUNNING in the PSI
+ * phase, wake up the flash app to open the pipes.
+ */
+ if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
+ ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
+ ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
+ IPC_MEM_DEVICE_IPC_RUNNING &&
+ ipc_imem->flash_channel_id >= 0) {
+ /* Wake up the flash app to open the pipes. */
+ ch_id = ipc_imem->flash_channel_id;
+ complete(&ipc_imem->channels[ch_id].ul_sem);
+ }
+
+ /* Reset the expected CP state. */
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_DONT_CARE;
+
+ if (retry_allocation) {
+ ipc_imem->hrtimer_period =
+ ktime_set(0, IPC_TD_ALLOC_TIMER_PERIOD_MS * 1000 * 1000ULL);
+ if (!hrtimer_active(&ipc_imem->td_alloc_timer))
+ hrtimer_start(&ipc_imem->td_alloc_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ }
+}
+
+/* Callback by tasklet for handling interrupt events. */
+static int ipc_imem_tq_irq_cb(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size)
+{
+ ipc_imem_handle_irq(ipc_imem, arg);
+
+ return 0;
+}
+
+void ipc_imem_ul_send(struct iosm_imem *ipc_imem)
+{
+ /* start doorbell irq delay timer if UL is pending */
+ if (ipc_imem_ul_write_td(ipc_imem))
+ ipc_imem_td_update_timer_start(ipc_imem);
+}
+
+/* Check the execution stage and update the AP phase */
+static enum ipc_phase ipc_imem_phase_update_check(struct iosm_imem *ipc_imem,
+ enum ipc_mem_exec_stage stage)
+{
+ switch (stage) {
+ case IPC_MEM_EXEC_STAGE_BOOT:
+ if (ipc_imem->phase != IPC_P_ROM) {
+ /* Send this event only once */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_ROM_READY);
+ }
+
+ ipc_imem->phase = IPC_P_ROM;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_PSI:
+ ipc_imem->phase = IPC_P_PSI;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_EBL:
+ ipc_imem->phase = IPC_P_EBL;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_RUN:
+ if (ipc_imem->phase != IPC_P_RUN &&
+ ipc_imem->ipc_status == IPC_MEM_DEVICE_IPC_RUNNING) {
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_READY);
+ }
+ ipc_imem->phase = IPC_P_RUN;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_CRASH:
+ if (ipc_imem->phase != IPC_P_CRASH)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CRASH);
+
+ ipc_imem->phase = IPC_P_CRASH;
+ break;
+
+ case IPC_MEM_EXEC_STAGE_CD_READY:
+ if (ipc_imem->phase != IPC_P_CD_READY)
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY);
+ ipc_imem->phase = IPC_P_CD_READY;
+ break;
+
+ default:
+ /* unknown exec stage:
+ * assume that link is down and send info to listeners
+ */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
+ break;
+ }
+
+ return ipc_imem->phase;
+}
+
+/* Send msg to device to open pipe */
+static bool ipc_imem_pipe_open(struct iosm_imem *ipc_imem,
+ struct ipc_pipe *pipe)
+{
+ union ipc_msg_prep_args prep_args = {
+ .pipe_open.pipe = pipe,
+ };
+
+ if (ipc_protocol_msg_send(ipc_imem->ipc_protocol,
+ IPC_MSG_PREP_PIPE_OPEN, &prep_args) == 0)
+ pipe->is_open = true;
+
+ return pipe->is_open;
+}
+
+/* Allocates the TDs for the given pipe along with firing HP update DB. */
+static int ipc_imem_tq_pipe_td_alloc(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct ipc_pipe *dl_pipe = msg;
+ bool processed = false;
+ int i;
+
+ for (i = 0; i < dl_pipe->nr_of_entries - 1; i++)
+ processed |= ipc_imem_dl_skb_alloc(ipc_imem, dl_pipe);
+
+ /* Trigger the doorbell irq to inform CP that new downlink buffers are
+ * available.
+ */
+ if (processed)
+ ipc_protocol_doorbell_trigger(ipc_imem->ipc_protocol, arg);
+
+ return 0;
+}
+
+static enum hrtimer_restart
+ipc_imem_td_update_timer_cb(struct hrtimer *hr_timer)
+{
+ struct iosm_imem *ipc_imem =
+ container_of(hr_timer, struct iosm_imem, tdupdate_timer);
+
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_td_update_timer_cb, 0,
+ NULL, 0, false);
+ return HRTIMER_NORESTART;
+}
+
+/* Get the CP execution state and map it to the AP phase. */
+enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem)
+{
+ enum ipc_mem_exec_stage exec_stage =
+ ipc_imem_get_exec_stage_buffered(ipc_imem);
+ /* If the CP stage is undef, return the internal precalculated phase. */
+ return ipc_imem->phase == IPC_P_OFF_REQ ?
+ ipc_imem->phase :
+ ipc_imem_phase_update_check(ipc_imem, exec_stage);
+}
+
+const char *ipc_imem_phase_get_string(enum ipc_phase phase)
+{
+ switch (phase) {
+ case IPC_P_RUN:
+ return "A-RUN";
+
+ case IPC_P_OFF:
+ return "A-OFF";
+
+ case IPC_P_ROM:
+ return "A-ROM";
+
+ case IPC_P_PSI:
+ return "A-PSI";
+
+ case IPC_P_EBL:
+ return "A-EBL";
+
+ case IPC_P_CRASH:
+ return "A-CRASH";
+
+ case IPC_P_CD_READY:
+ return "A-CD_READY";
+
+ case IPC_P_OFF_REQ:
+ return "A-OFF_REQ";
+
+ default:
+ return "A-???";
+ }
+}
+
+void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
+{
+ union ipc_msg_prep_args prep_args = { .pipe_close.pipe = pipe };
+
+ pipe->is_open = false;
+ ipc_protocol_msg_send(ipc_imem->ipc_protocol, IPC_MSG_PREP_PIPE_CLOSE,
+ &prep_args);
+
+ ipc_imem_pipe_cleanup(ipc_imem, pipe);
+}
+
+void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id)
+{
+ struct ipc_mem_channel *channel;
+
+ if (channel_id < 0 || channel_id >= ipc_imem->nr_of_channels) {
+ dev_err(ipc_imem->dev, "invalid channel id %d", channel_id);
+ return;
+ }
+
+ channel = &ipc_imem->channels[channel_id];
+
+ if (channel->state == IMEM_CHANNEL_FREE) {
+ dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
+ channel_id, channel->state);
+ return;
+ }
+
+ /* Free only the channel id in the CP power off mode. */
+ if (channel->state == IMEM_CHANNEL_RESERVED)
+ /* Release only the channel id. */
+ goto channel_free;
+
+ if (ipc_imem->phase == IPC_P_RUN) {
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+ }
+
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+
+channel_free:
+ ipc_imem_channel_free(channel);
+}
+
+struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
+ int channel_id, u32 db_id)
+{
+ struct ipc_mem_channel *channel;
+
+ if (channel_id < 0 || channel_id >= IPC_MEM_MAX_CHANNELS) {
+ dev_err(ipc_imem->dev, "invalid channel ID: %d", channel_id);
+ return NULL;
+ }
+
+ channel = &ipc_imem->channels[channel_id];
+
+ channel->state = IMEM_CHANNEL_ACTIVE;
+
+ if (!ipc_imem_pipe_open(ipc_imem, &channel->ul_pipe))
+ goto ul_pipe_err;
+
+ if (!ipc_imem_pipe_open(ipc_imem, &channel->dl_pipe))
+ goto dl_pipe_err;
+
+ /* Allocate the downlink buffers in tasklet context. */
+ if (ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_pipe_td_alloc, db_id,
+ &channel->dl_pipe, 0, false)) {
+ dev_err(ipc_imem->dev, "td allocation failed : %d", channel_id);
+ goto task_failed;
+ }
+
+ /* Active channel. */
+ return channel;
+task_failed:
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+dl_pipe_err:
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ul_pipe_err:
+ ipc_imem_channel_free(channel);
+ return NULL;
+}
+
+void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem)
+{
+ ipc_protocol_suspend(ipc_imem->ipc_protocol);
+}
+
+void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep)
+{
+ ipc_protocol_s2idle_sleep(ipc_imem->ipc_protocol, sleep);
+}
+
+void ipc_imem_pm_resume(struct iosm_imem *ipc_imem)
+{
+ enum ipc_mem_exec_stage stage;
+
+ if (ipc_protocol_resume(ipc_imem->ipc_protocol)) {
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ ipc_imem_phase_update_check(ipc_imem, stage);
+ }
+}
+
+void ipc_imem_channel_free(struct ipc_mem_channel *channel)
+{
+ /* Reset dynamic channel elements. */
+ channel->state = IMEM_CHANNEL_FREE;
+}
+
+int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
+ enum ipc_ctype ctype)
+{
+ struct ipc_mem_channel *channel;
+ int i;
+
+ /* Find channel of given type/index */
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ channel = &ipc_imem->channels[i];
+ if (channel->ctype == ctype && channel->index == index)
+ break;
+ }
+
+ if (i >= ipc_imem->nr_of_channels) {
+ dev_dbg(ipc_imem->dev,
+ "no channel definition for index=%d ctype=%d", index,
+ ctype);
+ return -ECHRNG;
+ }
+
+ if (ipc_imem->channels[i].state != IMEM_CHANNEL_FREE) {
+ dev_dbg(ipc_imem->dev, "channel is in use");
+ return -EBUSY;
+ }
+
+ if (channel->ctype == IPC_CTYPE_WWAN &&
+ index == IPC_MEM_MUX_IP_CH_IF_ID)
+ channel->if_id = index;
+
+ channel->channel_id = index;
+ channel->state = IMEM_CHANNEL_RESERVED;
+
+ return i;
+}
+
+void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
+{
+ struct ipc_mem_channel *channel;
+
+ if (chnl_cfg.ul_pipe >= IPC_MEM_MAX_PIPES ||
+ chnl_cfg.dl_pipe >= IPC_MEM_MAX_PIPES) {
+ dev_err(ipc_imem->dev, "invalid pipe: ul_pipe=%d, dl_pipe=%d",
+ chnl_cfg.ul_pipe, chnl_cfg.dl_pipe);
+ return;
+ }
+
+ if (ipc_imem->nr_of_channels >= IPC_MEM_MAX_CHANNELS) {
+ dev_err(ipc_imem->dev, "too many channels");
+ return;
+ }
+
+ channel = &ipc_imem->channels[ipc_imem->nr_of_channels];
+ channel->channel_id = ipc_imem->nr_of_channels;
+ channel->ctype = ctype;
+ channel->index = chnl_cfg.id;
+ channel->net_err_count = 0;
+ channel->state = IMEM_CHANNEL_FREE;
+ ipc_imem->nr_of_channels++;
+
+ ipc_imem_channel_update(ipc_imem, channel->channel_id, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ skb_queue_head_init(&channel->ul_list);
+
+ init_completion(&channel->ul_sem);
+}
+
+void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation)
+{
+ struct ipc_mem_channel *channel;
+
+ if (id < 0 || id >= ipc_imem->nr_of_channels) {
+ dev_err(ipc_imem->dev, "invalid channel id %d", id);
+ return;
+ }
+
+ channel = &ipc_imem->channels[id];
+
+ if (channel->state != IMEM_CHANNEL_FREE &&
+ channel->state != IMEM_CHANNEL_RESERVED) {
+ dev_err(ipc_imem->dev, "invalid channel state %d",
+ channel->state);
+ return;
+ }
+
+ channel->ul_pipe.nr_of_entries = chnl_cfg.ul_nr_of_entries;
+ channel->ul_pipe.pipe_nr = chnl_cfg.ul_pipe;
+ channel->ul_pipe.is_open = false;
+ channel->ul_pipe.irq = IPC_UL_PIPE_IRQ_VECTOR;
+ channel->ul_pipe.channel = channel;
+ channel->ul_pipe.dir = IPC_MEM_DIR_UL;
+ channel->ul_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
+ channel->ul_pipe.irq_moderation = irq_moderation;
+ channel->ul_pipe.buf_size = 0;
+
+ channel->dl_pipe.nr_of_entries = chnl_cfg.dl_nr_of_entries;
+ channel->dl_pipe.pipe_nr = chnl_cfg.dl_pipe;
+ channel->dl_pipe.is_open = false;
+ channel->dl_pipe.irq = IPC_DL_PIPE_IRQ_VECTOR;
+ channel->dl_pipe.channel = channel;
+ channel->dl_pipe.dir = IPC_MEM_DIR_DL;
+ channel->dl_pipe.accumulation_backoff = chnl_cfg.accumulation_backoff;
+ channel->dl_pipe.irq_moderation = irq_moderation;
+ channel->dl_pipe.buf_size = chnl_cfg.dl_buf_size;
+}
+
+static void ipc_imem_channel_reset(struct iosm_imem *ipc_imem)
+{
+ int i;
+
+ for (i = 0; i < ipc_imem->nr_of_channels; i++) {
+ struct ipc_mem_channel *channel;
+
+ channel = &ipc_imem->channels[i];
+
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+
+ ipc_imem_channel_free(channel);
+ }
+}
+
+void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe)
+{
+ struct sk_buff *skb;
+
+ /* Force pipe to closed state also when not explicitly closed through
+ * ipc_imem_pipe_close()
+ */
+ pipe->is_open = false;
+
+ /* Empty the uplink skb accumulator. */
+ while ((skb = skb_dequeue(&pipe->channel->ul_list)))
+ ipc_pcie_kfree_skb(ipc_imem->pcie, skb);
+
+ ipc_protocol_pipe_cleanup(ipc_imem->ipc_protocol, pipe);
+}
+
+/* Send IPC protocol uninit to the modem when Link is active. */
+static void ipc_imem_device_ipc_uninit(struct iosm_imem *ipc_imem)
+{
+ int timeout = IPC_MODEM_UNINIT_TIMEOUT_MS;
+ enum ipc_mem_device_ipc_state ipc_state;
+
+ /* When PCIe link is up set IPC_UNINIT
+ * of the modem otherwise ignore it when PCIe link down happens.
+ */
+ if (ipc_pcie_check_data_link_active(ipc_imem->pcie)) {
+ /* set modem to UNINIT
+ * (in case we want to reload the AP driver without resetting
+ * the modem)
+ */
+ ipc_doorbell_fire(ipc_imem->pcie, IPC_DOORBELL_IRQ_IPC,
+ IPC_MEM_DEVICE_IPC_UNINIT);
+ ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
+
+ /* Wait for maximum 30ms to allow the Modem to uninitialize the
+ * protocol.
+ */
+ while ((ipc_state <= IPC_MEM_DEVICE_IPC_DONT_CARE) &&
+ (ipc_state != IPC_MEM_DEVICE_IPC_UNINIT) &&
+ (timeout > 0)) {
+ usleep_range(1000, 1250);
+ timeout--;
+ ipc_state = ipc_mmio_get_ipc_state(ipc_imem->mmio);
+ }
+ }
+}
+
+void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
+{
+ ipc_imem->phase = IPC_P_OFF_REQ;
+
+ /* forward MDM_NOT_READY to listeners */
+ ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
+
+ hrtimer_cancel(&ipc_imem->td_alloc_timer);
+ hrtimer_cancel(&ipc_imem->tdupdate_timer);
+ hrtimer_cancel(&ipc_imem->fast_update_timer);
+ hrtimer_cancel(&ipc_imem->startup_timer);
+
+ /* cancel the workqueue */
+ cancel_work_sync(&ipc_imem->run_state_worker);
+
+ if (test_and_clear_bit(FULLY_FUNCTIONAL, &ipc_imem->flag)) {
+ ipc_mux_deinit(ipc_imem->mux);
+ ipc_wwan_deinit(ipc_imem->wwan);
+ ipc_port_deinit(ipc_imem->ipc_port);
+ }
+
+ ipc_imem_device_ipc_uninit(ipc_imem);
+ ipc_imem_channel_reset(ipc_imem);
+
+ ipc_protocol_deinit(ipc_imem->ipc_protocol);
+ ipc_task_deinit(ipc_imem->ipc_task);
+
+ kfree(ipc_imem->ipc_task);
+ kfree(ipc_imem->mmio);
+
+ ipc_imem->phase = IPC_P_OFF;
+}
+
+/* After CP has unblocked the PCIe link, save the start address of the doorbell
+ * scratchpad and prepare the shared memory region. If the flashing to RAM
+ * procedure shall be executed, copy the chip information from the doorbell
+ * scratchtpad to the application buffer and wake up the flash app.
+ */
+static int ipc_imem_config(struct iosm_imem *ipc_imem)
+{
+ enum ipc_phase phase;
+
+ /* Initialize the semaphore for the blocking read UL/DL transfer. */
+ init_completion(&ipc_imem->ul_pend_sem);
+
+ init_completion(&ipc_imem->dl_pend_sem);
+
+ /* clear internal flags */
+ ipc_imem->ipc_status = IPC_MEM_DEVICE_IPC_UNINIT;
+ ipc_imem->enter_runtime = 0;
+
+ phase = ipc_imem_phase_update(ipc_imem);
+
+ /* Either CP shall be in the power off or power on phase. */
+ switch (phase) {
+ case IPC_P_ROM:
+ ipc_imem->hrtimer_period = ktime_set(0, 1000 * 1000 * 1000ULL);
+ /* poll execution stage (for delayed start, e.g. NAND) */
+ if (!hrtimer_active(&ipc_imem->startup_timer))
+ hrtimer_start(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period,
+ HRTIMER_MODE_REL);
+ return 0;
+
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ case IPC_P_RUN:
+ /* The initial IPC state is IPC_MEM_DEVICE_IPC_UNINIT. */
+ ipc_imem->ipc_requested_state = IPC_MEM_DEVICE_IPC_UNINIT;
+
+ /* Verify the exepected initial state. */
+ if (ipc_imem->ipc_requested_state ==
+ ipc_mmio_get_ipc_state(ipc_imem->mmio)) {
+ ipc_imem_ipc_init_check(ipc_imem);
+
+ return 0;
+ }
+ dev_err(ipc_imem->dev,
+ "ipc_status(%d) != IPC_MEM_DEVICE_IPC_UNINIT",
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+ break;
+ case IPC_P_CRASH:
+ case IPC_P_CD_READY:
+ dev_dbg(ipc_imem->dev,
+ "Modem is in phase %d, reset Modem to collect CD",
+ phase);
+ return 0;
+ default:
+ dev_err(ipc_imem->dev, "unexpected operation phase %d", phase);
+ break;
+ }
+
+ complete(&ipc_imem->dl_pend_sem);
+ complete(&ipc_imem->ul_pend_sem);
+ ipc_imem->phase = IPC_P_OFF;
+ return -EIO;
+}
+
+/* Pass the dev ptr to the shared memory driver and request the entry points */
+struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
+ void __iomem *mmio, struct device *dev)
+{
+ struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
+
+ if (!ipc_imem)
+ return NULL;
+
+ /* Save the device address. */
+ ipc_imem->pcie = pcie;
+ ipc_imem->dev = dev;
+
+ ipc_imem->pci_device_id = device_id;
+
+ ipc_imem->ev_cdev_write_pending = false;
+ ipc_imem->cp_version = 0;
+ ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
+
+ /* Reset the flash channel id. */
+ ipc_imem->flash_channel_id = -1;
+
+ /* Reset the max number of configured channels */
+ ipc_imem->nr_of_channels = 0;
+
+ /* allocate IPC MMIO */
+ ipc_imem->mmio = ipc_mmio_init(mmio, ipc_imem->dev);
+ if (!ipc_imem->mmio) {
+ dev_err(ipc_imem->dev, "failed to initialize mmio region");
+ goto mmio_init_fail;
+ }
+
+ ipc_imem->ipc_task = kzalloc(sizeof(*ipc_imem->ipc_task),
+ GFP_KERNEL);
+
+ /* Create tasklet for event handling*/
+ if (!ipc_imem->ipc_task)
+ goto ipc_task_fail;
+
+ if (ipc_task_init(ipc_imem->ipc_task))
+ goto ipc_task_init_fail;
+
+ ipc_imem->ipc_task->dev = ipc_imem->dev;
+
+ INIT_WORK(&ipc_imem->run_state_worker, ipc_imem_run_state_worker);
+
+ ipc_imem->ipc_protocol = ipc_protocol_init(ipc_imem);
+
+ if (!ipc_imem->ipc_protocol)
+ goto protocol_init_fail;
+
+ /* The phase is set to power off. */
+ ipc_imem->phase = IPC_P_OFF;
+
+ hrtimer_init(&ipc_imem->startup_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->startup_timer.function = ipc_imem_startup_timer_cb;
+
+ hrtimer_init(&ipc_imem->tdupdate_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->tdupdate_timer.function = ipc_imem_td_update_timer_cb;
+
+ hrtimer_init(&ipc_imem->fast_update_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->fast_update_timer.function = ipc_imem_fast_update_timer_cb;
+
+ hrtimer_init(&ipc_imem->td_alloc_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ ipc_imem->td_alloc_timer.function = ipc_imem_td_alloc_timer_cb;
+
+ if (ipc_imem_config(ipc_imem)) {
+ dev_err(ipc_imem->dev, "failed to initialize the imem");
+ goto imem_config_fail;
+ }
+
+ return ipc_imem;
+
+imem_config_fail:
+ hrtimer_cancel(&ipc_imem->td_alloc_timer);
+ hrtimer_cancel(&ipc_imem->fast_update_timer);
+ hrtimer_cancel(&ipc_imem->tdupdate_timer);
+ hrtimer_cancel(&ipc_imem->startup_timer);
+protocol_init_fail:
+ cancel_work_sync(&ipc_imem->run_state_worker);
+ ipc_task_deinit(ipc_imem->ipc_task);
+ipc_task_init_fail:
+ kfree(ipc_imem->ipc_task);
+ipc_task_fail:
+ kfree(ipc_imem->mmio);
+mmio_init_fail:
+ kfree(ipc_imem);
+ return NULL;
+}
+
+void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq)
+{
+ /* Debounce IPC_EV_IRQ. */
+ if (ipc_imem && !ipc_imem->ev_irq_pending[irq]) {
+ ipc_imem->ev_irq_pending[irq] = true;
+ ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_irq_cb, irq,
+ NULL, 0, false);
+ }
+}
+
+void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
+{
+ ipc_imem->td_update_timer_suspended = suspend;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
new file mode 100644
index 000000000000..0d2f10e4cbc8
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -0,0 +1,579 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IMEM_H
+#define IOSM_IPC_IMEM_H
+
+#include <linux/skbuff.h>
+#include <stdbool.h>
+
+#include "iosm_ipc_mmio.h"
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_uevent.h"
+#include "iosm_ipc_wwan.h"
+#include "iosm_ipc_task_queue.h"
+
+struct ipc_chnl_cfg;
+
+/* IRQ moderation in usec */
+#define IRQ_MOD_OFF 0
+#define IRQ_MOD_NET 1000
+#define IRQ_MOD_TRC 4000
+
+/* Either the PSI image is accepted by CP or the suspended flash tool is waken,
+ * informed that the CP ROM driver is not ready to process the PSI image.
+ * unit : milliseconds
+ */
+#define IPC_PSI_TRANSFER_TIMEOUT 3000
+
+/* Timeout in 20 msec to wait for the modem to boot up to
+ * IPC_MEM_DEVICE_IPC_INIT state.
+ * unit : milliseconds (500 * ipc_util_msleep(20))
+ */
+#define IPC_MODEM_BOOT_TIMEOUT 500
+
+/* Wait timeout for ipc status reflects IPC_MEM_DEVICE_IPC_UNINIT
+ * unit : milliseconds
+ */
+#define IPC_MODEM_UNINIT_TIMEOUT_MS 30
+
+/* Pending time for processing data.
+ * unit : milliseconds
+ */
+#define IPC_PEND_DATA_TIMEOUT 500
+
+/* The timeout in milliseconds for application to wait for remote time. */
+#define IPC_REMOTE_TS_TIMEOUT_MS 10
+
+/* Timeout for TD allocation retry.
+ * unit : milliseconds
+ */
+#define IPC_TD_ALLOC_TIMER_PERIOD_MS 100
+
+/* Host sleep target is host */
+#define IPC_HOST_SLEEP_HOST 0
+
+/* Host sleep target is device */
+#define IPC_HOST_SLEEP_DEVICE 1
+
+/* Sleep message, target host: AP enters sleep / target device: CP is
+ * allowed to enter sleep and shall use the host sleep protocol
+ */
+#define IPC_HOST_SLEEP_ENTER_SLEEP 0
+
+/* Sleep_message, target host: AP exits sleep / target device: CP is
+ * NOT allowed to enter sleep
+ */
+#define IPC_HOST_SLEEP_EXIT_SLEEP 1
+
+#define IMEM_IRQ_DONT_CARE (-1)
+
+#define IPC_MEM_MAX_CHANNELS 7
+
+#define IPC_MEM_MUX_IP_SESSION_ENTRIES 8
+
+#define IPC_MEM_MUX_IP_CH_IF_ID 0
+
+#define TD_UPDATE_DEFAULT_TIMEOUT_USEC 1900
+
+#define FORCE_UPDATE_DEFAULT_TIMEOUT_USEC 500
+
+/* Sleep_message, target host: not applicable / target device: CP is
+ * allowed to enter sleep and shall NOT use the device sleep protocol
+ */
+#define IPC_HOST_SLEEP_ENTER_SLEEP_NO_PROTOCOL 2
+
+/* in_band_crash_signal IPC_MEM_INBAND_CRASH_SIG
+ * Modem crash notification configuration. If this value is non-zero then
+ * FEATURE_SET message will be sent to the Modem as a result the Modem will
+ * signal Crash via Execution Stage register. If this value is zero then Modem
+ * will use out-of-band method to notify about it's Crash.
+ */
+#define IPC_MEM_INBAND_CRASH_SIG 1
+
+/* Extra headroom to be allocated for DL SKBs to allow addition of Ethernet
+ * header
+ */
+#define IPC_MEM_DL_ETH_OFFSET 16
+
+#define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb))
+
+#define FULLY_FUNCTIONAL 0
+
+/* List of the supported UL/DL pipes. */
+enum ipc_mem_pipes {
+ IPC_MEM_PIPE_0 = 0,
+ IPC_MEM_PIPE_1,
+ IPC_MEM_PIPE_2,
+ IPC_MEM_PIPE_3,
+ IPC_MEM_PIPE_4,
+ IPC_MEM_PIPE_5,
+ IPC_MEM_PIPE_6,
+ IPC_MEM_PIPE_7,
+ IPC_MEM_PIPE_8,
+ IPC_MEM_PIPE_9,
+ IPC_MEM_PIPE_10,
+ IPC_MEM_PIPE_11,
+ IPC_MEM_PIPE_12,
+ IPC_MEM_PIPE_13,
+ IPC_MEM_PIPE_14,
+ IPC_MEM_PIPE_15,
+ IPC_MEM_PIPE_16,
+ IPC_MEM_PIPE_17,
+ IPC_MEM_PIPE_18,
+ IPC_MEM_PIPE_19,
+ IPC_MEM_PIPE_20,
+ IPC_MEM_PIPE_21,
+ IPC_MEM_PIPE_22,
+ IPC_MEM_PIPE_23,
+ IPC_MEM_MAX_PIPES
+};
+
+/* Enum defining channel states. */
+enum ipc_channel_state {
+ IMEM_CHANNEL_FREE,
+ IMEM_CHANNEL_RESERVED,
+ IMEM_CHANNEL_ACTIVE,
+ IMEM_CHANNEL_CLOSING,
+};
+
+/* Time Unit */
+enum ipc_time_unit {
+ IPC_SEC = 0,
+ IPC_MILLI_SEC = 1,
+ IPC_MICRO_SEC = 2,
+ IPC_NANO_SEC = 3,
+ IPC_PICO_SEC = 4,
+ IPC_FEMTO_SEC = 5,
+ IPC_ATTO_SEC = 6,
+};
+
+/**
+ * enum ipc_ctype - Enum defining supported channel type needed for control
+ * /IP traffic.
+ * @IPC_CTYPE_WWAN: Used for IP traffic
+ * @IPC_CTYPE_CTRL: Used for Control Communication
+ */
+enum ipc_ctype {
+ IPC_CTYPE_WWAN,
+ IPC_CTYPE_CTRL,
+};
+
+/* Pipe direction. */
+enum ipc_mem_pipe_dir {
+ IPC_MEM_DIR_UL,
+ IPC_MEM_DIR_DL,
+};
+
+/* HP update identifier. To be used as data for ipc_cp_irq_hpda_update() */
+enum ipc_hp_identifier {
+ IPC_HP_MR = 0,
+ IPC_HP_PM_TRIGGER,
+ IPC_HP_WAKEUP_SPEC_TMR,
+ IPC_HP_TD_UPD_TMR_START,
+ IPC_HP_TD_UPD_TMR,
+ IPC_HP_FAST_TD_UPD_TMR,
+ IPC_HP_UL_WRITE_TD,
+ IPC_HP_DL_PROCESS,
+ IPC_HP_NET_CHANNEL_INIT,
+ IPC_HP_CDEV_OPEN,
+};
+
+/**
+ * struct ipc_pipe - Structure for Pipe.
+ * @tdr_start: Ipc private protocol Transfer Descriptor Ring
+ * @channel: Id of the sio device, set by imem_sio_open,
+ * needed to pass DL char to the user terminal
+ * @skbr_start: Circular buffer for skbuf and the buffer
+ * reference in a tdr_start entry.
+ * @phy_tdr_start: Transfer descriptor start address
+ * @old_head: last head pointer reported to CP.
+ * @old_tail: AP read position before CP moves the read
+ * position to write/head. If CP has consumed the
+ * buffers, AP has to freed the skbuf starting at
+ * tdr_start[old_tail].
+ * @nr_of_entries: Number of elements of skb_start and tdr_start.
+ * @max_nr_of_queued_entries: Maximum number of queued entries in TDR
+ * @accumulation_backoff: Accumulation in usec for accumulation
+ * backoff (0 = no acc backoff)
+ * @irq_moderation: timer in usec for irq_moderation
+ * (0=no irq moderation)
+ * @pipe_nr: Pipe identification number
+ * @irq: Interrupt vector
+ * @dir: Direction of data stream in pipe
+ * @td_tag: Unique tag of the buffer queued
+ * @buf_size: Buffer size (in bytes) for preallocated
+ * buffers (for DL pipes)
+ * @nr_of_queued_entries: Aueued number of entries
+ * @is_open: Check for open pipe status
+ */
+struct ipc_pipe {
+ struct ipc_protocol_td *tdr_start;
+ struct ipc_mem_channel *channel;
+ struct sk_buff **skbr_start;
+ dma_addr_t phy_tdr_start;
+ u32 old_head;
+ u32 old_tail;
+ u32 nr_of_entries;
+ u32 max_nr_of_queued_entries;
+ u32 accumulation_backoff;
+ u32 irq_moderation;
+ u32 pipe_nr;
+ u32 irq;
+ enum ipc_mem_pipe_dir dir;
+ u32 td_tag;
+ u32 buf_size;
+ u16 nr_of_queued_entries;
+ u8 is_open:1;
+};
+
+/**
+ * struct ipc_mem_channel - Structure for Channel.
+ * @channel_id: Instance of the channel list and is return to the user
+ * at the end of the open operation.
+ * @ctype: Control or netif channel.
+ * @index: unique index per ctype
+ * @ul_pipe: pipe objects
+ * @dl_pipe: pipe objects
+ * @if_id: Interface ID
+ * @net_err_count: Number of downlink errors returned by ipc_wwan_receive
+ * interface at the entry point of the IP stack.
+ * @state: Free, reserved or busy (in use).
+ * @ul_sem: Needed for the blocking write or uplink transfer.
+ * @ul_list: Uplink accumulator which is filled by the uplink
+ * char app or IP stack. The socket buffer pointer are
+ * added to the descriptor list in the kthread context.
+ */
+struct ipc_mem_channel {
+ int channel_id;
+ enum ipc_ctype ctype;
+ int index;
+ struct ipc_pipe ul_pipe;
+ struct ipc_pipe dl_pipe;
+ int if_id;
+ u32 net_err_count;
+ enum ipc_channel_state state;
+ struct completion ul_sem;
+ struct sk_buff_head ul_list;
+};
+
+/**
+ * enum ipc_phase - Different AP and CP phases.
+ * The enums defined after "IPC_P_ROM" and before
+ * "IPC_P_RUN" indicates the operating state where CP can
+ * respond to any requests. So while introducing new phase
+ * this shall be taken into consideration.
+ * @IPC_P_OFF: On host PC, the PCIe device link settings are known
+ * about the combined power on. PC is running, the driver
+ * is loaded and CP is in power off mode. The PCIe bus
+ * driver call the device power mode D3hot. In this phase
+ * the driver the polls the device, until the device is in
+ * the power on state and signals the power mode D0.
+ * @IPC_P_OFF_REQ: The intermediate phase between cleanup activity starts
+ * and ends.
+ * @IPC_P_CRASH: The phase indicating CP crash
+ * @IPC_P_CD_READY: The phase indicating CP core dump is ready
+ * @IPC_P_ROM: After power on, CP starts in ROM mode and the IPC ROM
+ * driver is waiting 150 ms for the AP active notification
+ * saved in the PCI link status register.
+ * @IPC_P_PSI: Primary signed image download phase
+ * @IPC_P_EBL: Extended bootloader pahse
+ * @IPC_P_RUN: The phase after flashing to RAM is the RUNTIME phase.
+ */
+enum ipc_phase {
+ IPC_P_OFF,
+ IPC_P_OFF_REQ,
+ IPC_P_CRASH,
+ IPC_P_CD_READY,
+ IPC_P_ROM,
+ IPC_P_PSI,
+ IPC_P_EBL,
+ IPC_P_RUN,
+};
+
+/**
+ * struct iosm_imem - Current state of the IPC shared memory.
+ * @mmio: mmio instance to access CP MMIO area /
+ * doorbell scratchpad.
+ * @ipc_protocol: IPC Protocol instance
+ * @ipc_task: Task for entry into ipc task queue
+ * @wwan: WWAN device pointer
+ * @mux: IP Data multiplexing state.
+ * @sio: IPC SIO data structure pointer
+ * @ipc_port: IPC PORT data structure pointer
+ * @pcie: IPC PCIe
+ * @dev: Pointer to device structure
+ * @flash_channel_id: Reserved channel id for flashing to RAM.
+ * @ipc_requested_state: Expected IPC state on CP.
+ * @channels: Channel list with UL/DL pipe pairs.
+ * @ipc_status: local ipc_status
+ * @nr_of_channels: number of configured channels
+ * @startup_timer: startup timer for NAND support.
+ * @hrtimer_period: Hr timer period
+ * @tdupdate_timer: Delay the TD update doorbell.
+ * @fast_update_timer: forced head pointer update delay timer.
+ * @td_alloc_timer: Timer for DL pipe TD allocation retry
+ * @rom_exit_code: Mapped boot rom exit code.
+ * @enter_runtime: 1 means the transition to runtime phase was
+ * executed.
+ * @ul_pend_sem: Semaphore to wait/complete of UL TDs
+ * before closing pipe.
+ * @app_notify_ul_pend: Signal app if UL TD is pending
+ * @dl_pend_sem: Semaphore to wait/complete of DL TDs
+ * before closing pipe.
+ * @app_notify_dl_pend: Signal app if DL TD is pending
+ * @phase: Operating phase like runtime.
+ * @pci_device_id: Device ID
+ * @cp_version: CP version
+ * @device_sleep: Device sleep state
+ * @run_state_worker: Pointer to worker component for device
+ * setup operations to be called when modem
+ * reaches RUN state
+ * @ev_irq_pending: 0 means inform the IPC tasklet to
+ * process the irq actions.
+ * @flag: Flag to monitor the state of driver
+ * @td_update_timer_suspended: if true then td update timer suspend
+ * @ev_cdev_write_pending: 0 means inform the IPC tasklet to pass
+ * the accumulated uplink buffers to CP.
+ * @ev_mux_net_transmit_pending:0 means inform the IPC tasklet to pass
+ * @reset_det_n: Reset detect flag
+ * @pcie_wake_n: Pcie wake flag
+ */
+struct iosm_imem {
+ struct iosm_mmio *mmio;
+ struct iosm_protocol *ipc_protocol;
+ struct ipc_task *ipc_task;
+ struct iosm_wwan *wwan;
+ struct iosm_mux *mux;
+ struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS];
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ int flash_channel_id;
+ enum ipc_mem_device_ipc_state ipc_requested_state;
+ struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS];
+ u32 ipc_status;
+ u32 nr_of_channels;
+ struct hrtimer startup_timer;
+ ktime_t hrtimer_period;
+ struct hrtimer tdupdate_timer;
+ struct hrtimer fast_update_timer;
+ struct hrtimer td_alloc_timer;
+ enum rom_exit_code rom_exit_code;
+ u32 enter_runtime;
+ struct completion ul_pend_sem;
+ u32 app_notify_ul_pend;
+ struct completion dl_pend_sem;
+ u32 app_notify_dl_pend;
+ enum ipc_phase phase;
+ u16 pci_device_id;
+ int cp_version;
+ int device_sleep;
+ struct work_struct run_state_worker;
+ u8 ev_irq_pending[IPC_IRQ_VECTORS];
+ unsigned long flag;
+ u8 td_update_timer_suspended:1,
+ ev_cdev_write_pending:1,
+ ev_mux_net_transmit_pending:1,
+ reset_det_n:1,
+ pcie_wake_n:1;
+};
+
+/**
+ * ipc_imem_init - Initialize the shared memory region
+ * @pcie: Pointer to core driver data-struct
+ * @device_id: PCI device ID
+ * @mmio: Pointer to the mmio area
+ * @dev: Pointer to device structure
+ *
+ * Returns: Initialized imem pointer on success else NULL
+ */
+struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
+ void __iomem *mmio, struct device *dev);
+
+/**
+ * ipc_imem_pm_s2idle_sleep - Set PM variables to sleep/active for
+ * s2idle sleep/active
+ * @ipc_imem: Pointer to imem data-struct
+ * @sleep: Set PM Variable to sleep/active
+ */
+void ipc_imem_pm_s2idle_sleep(struct iosm_imem *ipc_imem, bool sleep);
+
+/**
+ * ipc_imem_pm_suspend - The HAL shall ask the shared memory layer
+ * whether D3 is allowed.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_pm_suspend(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_pm_resume - The HAL shall inform the shared memory layer
+ * that the device is active.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_pm_resume(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_cleanup - Inform CP and free the shared memory resources.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_cleanup(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_irq_process - Shift the IRQ actions to the IPC thread.
+ * @ipc_imem: Pointer to imem data-struct
+ * @irq: Irq number
+ */
+void ipc_imem_irq_process(struct iosm_imem *ipc_imem, int irq);
+
+/**
+ * imem_get_device_sleep_state - Get the device sleep state value.
+ * @ipc_imem: Pointer to imem instance
+ *
+ * Returns: device sleep state
+ */
+int imem_get_device_sleep_state(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_td_update_timer_suspend - Updates the TD Update Timer suspend flag.
+ * @ipc_imem: Pointer to imem data-struct
+ * @suspend: Flag to update. If TRUE then HP update doorbell is triggered to
+ * device without any wait. If FALSE then HP update doorbell is
+ * delayed until timeout.
+ */
+void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend);
+
+/**
+ * ipc_imem_channel_close - Release the channel resources.
+ * @ipc_imem: Pointer to imem data-struct
+ * @channel_id: Channel ID to be cleaned up.
+ */
+void ipc_imem_channel_close(struct iosm_imem *ipc_imem, int channel_id);
+
+/**
+ * ipc_imem_channel_alloc - Reserves a channel
+ * @ipc_imem: Pointer to imem data-struct
+ * @index: ID to lookup from the preallocated list.
+ * @ctype: Channel type.
+ *
+ * Returns: Index on success and failure value on error
+ */
+int ipc_imem_channel_alloc(struct iosm_imem *ipc_imem, int index,
+ enum ipc_ctype ctype);
+
+/**
+ * ipc_imem_channel_open - Establish the pipes.
+ * @ipc_imem: Pointer to imem data-struct
+ * @channel_id: Channel ID returned during alloc.
+ * @db_id: Doorbell ID for trigger identifier.
+ *
+ * Returns: Pointer of ipc_mem_channel on success and NULL on failure.
+ */
+struct ipc_mem_channel *ipc_imem_channel_open(struct iosm_imem *ipc_imem,
+ int channel_id, u32 db_id);
+
+/**
+ * ipc_imem_td_update_timer_start - Starts the TD Update Timer if not running.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_td_update_timer_start(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_ul_write_td - Pass the channel UL list to protocol layer for TD
+ * preparation and sending them to the device.
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: TRUE of HP Doorbell trigger is pending. FALSE otherwise.
+ */
+bool ipc_imem_ul_write_td(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_ul_send - Dequeue SKB from channel list and start with
+ * the uplink transfer.If HP Doorbell is pending to be
+ * triggered then starts the TD Update Timer.
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_ul_send(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_channel_update - Set or modify pipe config of an existing channel
+ * @ipc_imem: Pointer to imem data-struct
+ * @id: Channel config index
+ * @chnl_cfg: Channel config struct
+ * @irq_moderation: Timer in usec for irq_moderation
+ */
+void ipc_imem_channel_update(struct iosm_imem *ipc_imem, int id,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+
+/**
+ * ipc_imem_channel_free -Free an IPC channel.
+ * @channel: Channel to be freed
+ */
+void ipc_imem_channel_free(struct ipc_mem_channel *channel);
+
+/**
+ * ipc_imem_hrtimer_stop - Stop the hrtimer
+ * @hr_timer: Pointer to hrtimer instance
+ */
+void ipc_imem_hrtimer_stop(struct hrtimer *hr_timer);
+
+/**
+ * ipc_imem_pipe_cleanup - Reset volatile pipe content for all channels
+ * @ipc_imem: Pointer to imem data-struct
+ * @pipe: Pipe to cleaned up
+ */
+void ipc_imem_pipe_cleanup(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe);
+
+/**
+ * ipc_imem_pipe_close - Send msg to device to close pipe
+ * @ipc_imem: Pointer to imem data-struct
+ * @pipe: Pipe to be closed
+ */
+void ipc_imem_pipe_close(struct iosm_imem *ipc_imem, struct ipc_pipe *pipe);
+
+/**
+ * ipc_imem_phase_update - Get the CP execution state
+ * and map it to the AP phase.
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: Current ap updated phase
+ */
+enum ipc_phase ipc_imem_phase_update(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_phase_get_string - Return the current operation
+ * phase as string.
+ * @phase: AP phase
+ *
+ * Returns: AP phase string
+ */
+const char *ipc_imem_phase_get_string(enum ipc_phase phase);
+
+/**
+ * ipc_imem_msg_send_feature_set - Send feature set message to modem
+ * @ipc_imem: Pointer to imem data-struct
+ * @reset_enable: 0 = out-of-band, 1 = in-band-crash notification
+ * @atomic_ctx: if disabled call in tasklet context
+ *
+ */
+void ipc_imem_msg_send_feature_set(struct iosm_imem *ipc_imem,
+ unsigned int reset_enable, bool atomic_ctx);
+
+/**
+ * ipc_imem_ipc_init_check - Send the init event to CP, wait a certain time and
+ * set CP to runtime with the context information
+ * @ipc_imem: Pointer to imem data-struct
+ */
+void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_channel_init - Initialize the channel list with UL/DL pipe pairs.
+ * @ipc_imem: Pointer to imem data-struct
+ * @ctype: Channel type
+ * @chnl_cfg: Channel configuration struct
+ * @irq_moderation: Timer in usec for irq_moderation
+ */
+void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
+ struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
new file mode 100644
index 000000000000..46f76e8aae92
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_port.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Open a packet data online channel between the network layer and CP. */
+int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
+{
+ dev_dbg(ipc_imem->dev, "%s if id: %d",
+ ipc_imem_phase_get_string(ipc_imem->phase), if_id);
+
+ /* The network interface is only supported in the runtime phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
+ dev_err(ipc_imem->dev, "net:%d : refused phase %s", if_id,
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ return -EIO;
+ }
+
+ /* check for the interafce id
+ * if if_id 1 to 8 then create IP MUX channel sessions.
+ * To start MUX session from 0 as network interface id would start
+ * from 1 so map it to if_id = if_id - 1
+ */
+ if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
+ return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
+
+ return -EINVAL;
+}
+
+/* Release a net link to CP. */
+void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id)
+{
+ if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
+ if_id <= IP_MUX_SESSION_END)
+ ipc_mux_close_session(ipc_imem->mux, if_id - 1);
+}
+
+/* Tasklet call to do uplink transfer. */
+static int ipc_imem_tq_cdev_write(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ ipc_imem->ev_cdev_write_pending = false;
+ ipc_imem_ul_send(ipc_imem);
+
+ return 0;
+}
+
+/* Through tasklet to do sio write. */
+static int ipc_imem_call_cdev_write(struct iosm_imem *ipc_imem)
+{
+ if (ipc_imem->ev_cdev_write_pending)
+ return -1;
+
+ ipc_imem->ev_cdev_write_pending = true;
+
+ return ipc_task_queue_send_task(ipc_imem, ipc_imem_tq_cdev_write, 0,
+ NULL, 0, false);
+}
+
+/* Function for transfer UL data */
+int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
+ int if_id, int channel_id, struct sk_buff *skb)
+{
+ int ret = -EINVAL;
+
+ if (!ipc_imem || channel_id < 0)
+ goto out;
+
+ /* Is CP Running? */
+ if (ipc_imem->phase != IPC_P_RUN) {
+ dev_dbg(ipc_imem->dev, "phase %s transmit",
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ ret = -EIO;
+ goto out;
+ }
+
+ if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
+ /* Route the UL packet through IP MUX Layer */
+ ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
+ if_id - 1, skb);
+ else
+ dev_err(ipc_imem->dev,
+ "invalid if_id %d: ", if_id);
+out:
+ return ret;
+}
+
+/* Initialize wwan channel */
+void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type)
+{
+ struct ipc_chnl_cfg chnl_cfg = { 0 };
+
+ ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
+
+ /* If modem version is invalid (0xffffffff), do not initialize WWAN. */
+ if (ipc_imem->cp_version == -1) {
+ dev_err(ipc_imem->dev, "invalid CP version");
+ return;
+ }
+
+ ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg,
+ IRQ_MOD_OFF);
+
+ /* WWAN registration. */
+ ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
+ if (!ipc_imem->wwan)
+ dev_err(ipc_imem->dev,
+ "failed to register the ipc_wwan interfaces");
+}
+
+/* Map SKB to DMA for transfer */
+static int ipc_imem_map_skb_to_dma(struct iosm_imem *ipc_imem,
+ struct sk_buff *skb)
+{
+ struct iosm_pcie *ipc_pcie = ipc_imem->pcie;
+ char *buf = skb->data;
+ int len = skb->len;
+ dma_addr_t mapping;
+ int ret;
+
+ ret = ipc_pcie_addr_map(ipc_pcie, buf, len, &mapping, DMA_TO_DEVICE);
+
+ if (ret)
+ goto err;
+
+ BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
+
+ IPC_CB(skb)->mapping = mapping;
+ IPC_CB(skb)->direction = DMA_TO_DEVICE;
+ IPC_CB(skb)->len = len;
+ IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
+
+err:
+ return ret;
+}
+
+/* return true if channel is ready for use */
+static bool ipc_imem_is_channel_active(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel)
+{
+ enum ipc_phase phase;
+
+ /* Update the current operation phase. */
+ phase = ipc_imem->phase;
+
+ /* Select the operation depending on the execution stage. */
+ switch (phase) {
+ case IPC_P_RUN:
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ break;
+
+ case IPC_P_ROM:
+ /* Prepare the PSI image for the CP ROM driver and
+ * suspend the flash app.
+ */
+ if (channel->state != IMEM_CHANNEL_RESERVED) {
+ dev_err(ipc_imem->dev,
+ "ch[%d]:invalid channel state %d,expected %d",
+ channel->channel_id, channel->state,
+ IMEM_CHANNEL_RESERVED);
+ goto channel_unavailable;
+ }
+ goto channel_available;
+
+ default:
+ /* Ignore uplink actions in all other phases. */
+ dev_err(ipc_imem->dev, "ch[%d]: confused phase %d",
+ channel->channel_id, phase);
+ goto channel_unavailable;
+ }
+ /* Check the full availability of the channel. */
+ if (channel->state != IMEM_CHANNEL_ACTIVE) {
+ dev_err(ipc_imem->dev, "ch[%d]: confused channel state %d",
+ channel->channel_id, channel->state);
+ goto channel_unavailable;
+ }
+
+channel_available:
+ return true;
+
+channel_unavailable:
+ return false;
+}
+
+/* Release a sio link to CP. */
+void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev)
+{
+ struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
+ struct ipc_mem_channel *channel = ipc_cdev->channel;
+ enum ipc_phase curr_phase;
+ int status = 0;
+ u32 tail = 0;
+
+ curr_phase = ipc_imem->phase;
+
+ /* If current phase is IPC_P_OFF or SIO ID is -ve then
+ * channel is already freed. Nothing to do.
+ */
+ if (curr_phase == IPC_P_OFF) {
+ dev_err(ipc_imem->dev,
+ "nothing to do. Current Phase: %s",
+ ipc_imem_phase_get_string(curr_phase));
+ return;
+ }
+
+ if (channel->state == IMEM_CHANNEL_FREE) {
+ dev_err(ipc_imem->dev, "ch[%d]: invalid channel state %d",
+ channel->channel_id, channel->state);
+ return;
+ }
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
+ ipc_imem->app_notify_ul_pend = 1;
+
+ /* Suspend the user app and wait a certain time for processing
+ * UL Data.
+ */
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->ul_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Pend data Timeout UL-Pipe:%d Head:%d Tail:%d",
+ channel->ul_pipe.pipe_nr,
+ channel->ul_pipe.old_head,
+ channel->ul_pipe.old_tail);
+ }
+
+ ipc_imem->app_notify_ul_pend = 0;
+ }
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
+ &channel->dl_pipe, NULL, &tail);
+
+ if (tail != channel->dl_pipe.old_tail) {
+ ipc_imem->app_notify_dl_pend = 1;
+
+ /* Suspend the user app and wait a certain time for processing
+ * DL Data.
+ */
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->dl_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Pend data Timeout DL-Pipe:%d Head:%d Tail:%d",
+ channel->dl_pipe.pipe_nr,
+ channel->dl_pipe.old_head,
+ channel->dl_pipe.old_tail);
+ }
+
+ ipc_imem->app_notify_dl_pend = 0;
+ }
+
+ /* Due to wait for completion in messages, there is a small window
+ * between closing the pipe and updating the channel is closed. In this
+ * small window there could be HP update from Host Driver. Hence update
+ * the channel state as CLOSING to aviod unnecessary interrupt
+ * towards CP.
+ */
+ channel->state = IMEM_CHANNEL_CLOSING;
+
+ ipc_imem_pipe_close(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_close(ipc_imem, &channel->dl_pipe);
+
+ ipc_imem_channel_free(channel);
+}
+
+/* Open a PORT link to CP and return the channel */
+struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
+ int chl_id, int hp_id)
+{
+ struct ipc_mem_channel *channel;
+ int ch_id;
+
+ /* The PORT interface is only supported in the runtime phase. */
+ if (ipc_imem_phase_update(ipc_imem) != IPC_P_RUN) {
+ dev_err(ipc_imem->dev, "PORT open refused, phase %s",
+ ipc_imem_phase_get_string(ipc_imem->phase));
+ return NULL;
+ }
+
+ ch_id = ipc_imem_channel_alloc(ipc_imem, chl_id, IPC_CTYPE_CTRL);
+
+ if (ch_id < 0) {
+ dev_err(ipc_imem->dev, "reservation of an PORT chnl id failed");
+ return NULL;
+ }
+
+ channel = ipc_imem_channel_open(ipc_imem, ch_id, hp_id);
+
+ if (!channel) {
+ dev_err(ipc_imem->dev, "PORT channel id open failed");
+ return NULL;
+ }
+
+ return channel;
+}
+
+/* transfer skb to modem */
+int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
+{
+ struct ipc_mem_channel *channel = ipc_cdev->channel;
+ struct iosm_imem *ipc_imem = ipc_cdev->ipc_imem;
+ int ret = -EIO;
+
+ if (!ipc_imem_is_channel_active(ipc_imem, channel) ||
+ ipc_imem->phase == IPC_P_OFF_REQ)
+ goto out;
+
+ ret = ipc_imem_map_skb_to_dma(ipc_imem, skb);
+
+ if (ret)
+ goto out;
+
+ /* Add skb to the uplink skbuf accumulator. */
+ skb_queue_tail(&channel->ul_list, skb);
+
+ ret = ipc_imem_call_cdev_write(ipc_imem);
+
+ if (ret) {
+ skb_dequeue_tail(&channel->ul_list);
+ dev_err(ipc_cdev->dev, "channel id[%d] write failed\n",
+ ipc_cdev->channel->channel_id);
+ }
+out:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
new file mode 100644
index 000000000000..84087cf33329
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IMEM_OPS_H
+#define IOSM_IPC_IMEM_OPS_H
+
+#include "iosm_ipc_mux_codec.h"
+
+/* Maximum wait time for blocking read */
+#define IPC_READ_TIMEOUT 500
+
+/* The delay in ms for defering the unregister */
+#define SIO_UNREGISTER_DEFER_DELAY_MS 1
+
+/* Default delay till CP PSI image is running and modem updates the
+ * execution stage.
+ * unit : milliseconds
+ */
+#define PSI_START_DEFAULT_TIMEOUT 3000
+
+/* Default time out when closing SIO, till the modem is in
+ * running state.
+ * unit : milliseconds
+ */
+#define BOOT_CHECK_DEFAULT_TIMEOUT 400
+
+/* IP MUX channel range */
+#define IP_MUX_SESSION_START 1
+#define IP_MUX_SESSION_END 8
+
+/**
+ * ipc_imem_sys_port_open - Open a port link to CP.
+ * @ipc_imem: Imem instance.
+ * @chl_id: Channel Indentifier.
+ * @hp_id: HP Indentifier.
+ *
+ * Return: channel instance on success, NULL for failure
+ */
+struct ipc_mem_channel *ipc_imem_sys_port_open(struct iosm_imem *ipc_imem,
+ int chl_id, int hp_id);
+
+/**
+ * ipc_imem_sys_cdev_close - Release a sio link to CP.
+ * @ipc_cdev: iosm sio instance.
+ */
+void ipc_imem_sys_cdev_close(struct iosm_cdev *ipc_cdev);
+
+/**
+ * ipc_imem_sys_cdev_write - Route the uplink buffer to CP.
+ * @ipc_cdev: iosm_cdev instance.
+ * @skb: Pointer to skb.
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb);
+
+/**
+ * ipc_imem_sys_wwan_open - Open packet data online channel between network
+ * layer and CP.
+ * @ipc_imem: Imem instance.
+ * @if_id: ip link tag of the net device.
+ *
+ * Return: Channel ID on success and failure value on error
+ */
+int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id);
+
+/**
+ * ipc_imem_sys_wwan_close - Close packet data online channel between network
+ * layer and CP.
+ * @ipc_imem: Imem instance.
+ * @if_id: IP link id net device.
+ * @channel_id: Channel ID to be closed.
+ */
+void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id);
+
+/**
+ * ipc_imem_sys_wwan_transmit - Function for transfer UL data
+ * @ipc_imem: Imem instance.
+ * @if_id: link ID of the device.
+ * @channel_id: Channel ID used
+ * @skb: Pointer to sk buffer
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
+ int channel_id, struct sk_buff *skb);
+/**
+ * ipc_imem_wwan_channel_init - Initializes WWAN channels and the channel for
+ * MUX.
+ * @ipc_imem: Pointer to iosm_imem struct.
+ * @mux_type: Type of mux protocol.
+ */
+void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+ enum ipc_mux_protocol mux_type);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_irq.c b/drivers/net/wwan/iosm/iosm_ipc_irq.c
new file mode 100644
index 000000000000..702f50a48151
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_irq.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_protocol.h"
+
+static void ipc_write_dbell_reg(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
+{
+ void __iomem *write_reg;
+
+ /* Select the first doorbell register, which is only currently needed
+ * by CP.
+ */
+ write_reg = (void __iomem *)((u8 __iomem *)ipc_pcie->ipc_regs +
+ ipc_pcie->doorbell_write +
+ (irq_n * ipc_pcie->doorbell_reg_offset));
+
+ /* Fire the doorbell irq by writing data on the doorbell write pointer
+ * register.
+ */
+ iowrite32(data, write_reg);
+}
+
+void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data)
+{
+ ipc_write_dbell_reg(ipc_pcie, irq_n, data);
+}
+
+/* Threaded Interrupt handler for MSI interrupts */
+static irqreturn_t ipc_msi_interrupt(int irq, void *dev_id)
+{
+ struct iosm_pcie *ipc_pcie = dev_id;
+ int instance = irq - ipc_pcie->pci->irq;
+
+ /* Shift the MSI irq actions to the IPC tasklet. IRQ_NONE means the
+ * irq was not from the IPC device or could not be served.
+ */
+ if (instance >= ipc_pcie->nvec)
+ return IRQ_NONE;
+
+ if (!test_bit(0, &ipc_pcie->suspend))
+ ipc_imem_irq_process(ipc_pcie->imem, instance);
+
+ return IRQ_HANDLED;
+}
+
+void ipc_release_irq(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev = ipc_pcie->pci;
+
+ if (pdev->msi_enabled) {
+ while (--ipc_pcie->nvec >= 0)
+ free_irq(pdev->irq + ipc_pcie->nvec, ipc_pcie);
+ }
+ pci_free_irq_vectors(pdev);
+}
+
+int ipc_acquire_irq(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev = ipc_pcie->pci;
+ int i, rc = -EINVAL;
+
+ ipc_pcie->nvec = pci_alloc_irq_vectors(pdev, IPC_MSI_VECTORS,
+ IPC_MSI_VECTORS, PCI_IRQ_MSI);
+
+ if (ipc_pcie->nvec < 0) {
+ rc = ipc_pcie->nvec;
+ goto error;
+ }
+
+ if (!pdev->msi_enabled)
+ goto error;
+
+ for (i = 0; i < ipc_pcie->nvec; ++i) {
+ rc = request_threaded_irq(pdev->irq + i, NULL,
+ ipc_msi_interrupt, IRQF_ONESHOT,
+ KBUILD_MODNAME, ipc_pcie);
+ if (rc) {
+ dev_err(ipc_pcie->dev, "unable to grab IRQ, rc=%d", rc);
+ ipc_pcie->nvec = i;
+ ipc_release_irq(ipc_pcie);
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_irq.h b/drivers/net/wwan/iosm/iosm_ipc_irq.h
new file mode 100644
index 000000000000..a8ed596cb6a5
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_irq.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_IRQ_H
+#define IOSM_IPC_IRQ_H
+
+struct iosm_pcie;
+
+/**
+ * ipc_doorbell_fire - fire doorbell to CP
+ * @ipc_pcie: Pointer to iosm_pcie
+ * @irq_n: Doorbell type
+ * @data: ipc state
+ */
+void ipc_doorbell_fire(struct iosm_pcie *ipc_pcie, int irq_n, u32 data);
+
+/**
+ * ipc_release_irq - Release the IRQ handler.
+ * @ipc_pcie: Pointer to iosm_pcie struct
+ */
+void ipc_release_irq(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_acquire_irq - acquire IRQ & register IRQ handler.
+ * @ipc_pcie: Pointer to iosm_pcie struct
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_acquire_irq(struct iosm_pcie *ipc_pcie);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
new file mode 100644
index 000000000000..06c94b1720b6
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/slab.h>
+
+#include "iosm_ipc_mmio.h"
+
+/* Definition of MMIO offsets
+ * note that MMIO_CI offsets are relative to end of chip info structure
+ */
+
+/* MMIO chip info size in bytes */
+#define MMIO_CHIP_INFO_SIZE 60
+
+/* CP execution stage */
+#define MMIO_OFFSET_EXECUTION_STAGE 0x00
+
+/* Boot ROM Chip Info struct */
+#define MMIO_OFFSET_CHIP_INFO 0x04
+
+#define MMIO_OFFSET_ROM_EXIT_CODE 0x40
+
+#define MMIO_OFFSET_PSI_ADDRESS 0x54
+
+#define MMIO_OFFSET_PSI_SIZE 0x5C
+
+#define MMIO_OFFSET_IPC_STATUS 0x60
+
+#define MMIO_OFFSET_CONTEXT_INFO 0x64
+
+#define MMIO_OFFSET_BASE_ADDR 0x6C
+
+#define MMIO_OFFSET_END_ADDR 0x74
+
+#define MMIO_OFFSET_CP_VERSION 0xF0
+
+#define MMIO_OFFSET_CP_CAPABILITIES 0xF4
+
+/* Timeout in 50 msec to wait for the modem boot code to write a valid
+ * execution stage into mmio area
+ */
+#define IPC_MMIO_EXEC_STAGE_TIMEOUT 50
+
+/* check if exec stage has one of the valid values */
+static bool ipc_mmio_is_valid_exec_stage(enum ipc_mem_exec_stage stage)
+{
+ switch (stage) {
+ case IPC_MEM_EXEC_STAGE_BOOT:
+ case IPC_MEM_EXEC_STAGE_PSI:
+ case IPC_MEM_EXEC_STAGE_EBL:
+ case IPC_MEM_EXEC_STAGE_RUN:
+ case IPC_MEM_EXEC_STAGE_CRASH:
+ case IPC_MEM_EXEC_STAGE_CD_READY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio)
+{
+ u32 cp_cap;
+ unsigned int ver;
+
+ ver = ipc_mmio_get_cp_version(ipc_mmio);
+ cp_cap = readl(ipc_mmio->base + ipc_mmio->offset.cp_capability);
+
+ ipc_mmio->has_mux_lite = (ver >= IOSM_CP_VERSION) &&
+ !(cp_cap & DL_AGGR) && !(cp_cap & UL_AGGR);
+
+ ipc_mmio->has_ul_flow_credit =
+ (ver >= IOSM_CP_VERSION) && (cp_cap & UL_FLOW_CREDIT);
+}
+
+struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
+{
+ struct iosm_mmio *ipc_mmio = kzalloc(sizeof(*ipc_mmio), GFP_KERNEL);
+ int retries = IPC_MMIO_EXEC_STAGE_TIMEOUT;
+ enum ipc_mem_exec_stage stage;
+
+ if (!ipc_mmio)
+ return NULL;
+
+ ipc_mmio->dev = dev;
+
+ ipc_mmio->base = mmio;
+
+ ipc_mmio->offset.exec_stage = MMIO_OFFSET_EXECUTION_STAGE;
+
+ /* Check for a valid execution stage to make sure that the boot code
+ * has correctly initialized the MMIO area.
+ */
+ do {
+ stage = ipc_mmio_get_exec_stage(ipc_mmio);
+ if (ipc_mmio_is_valid_exec_stage(stage))
+ break;
+
+ msleep(20);
+ } while (retries-- > 0);
+
+ if (!retries) {
+ dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
+ goto init_fail;
+ }
+
+ ipc_mmio->offset.chip_info = MMIO_OFFSET_CHIP_INFO;
+
+ /* read chip info size and version from chip info structure */
+ ipc_mmio->chip_info_version =
+ ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info);
+
+ /* Increment of 2 is needed as the size value in the chip info
+ * excludes the version and size field, which are always present
+ */
+ ipc_mmio->chip_info_size =
+ ioread8(ipc_mmio->base + ipc_mmio->offset.chip_info + 1) + 2;
+
+ if (ipc_mmio->chip_info_size != MMIO_CHIP_INFO_SIZE) {
+ dev_err(ipc_mmio->dev, "Unexpected Chip Info");
+ goto init_fail;
+ }
+
+ ipc_mmio->offset.rom_exit_code = MMIO_OFFSET_ROM_EXIT_CODE;
+
+ ipc_mmio->offset.psi_address = MMIO_OFFSET_PSI_ADDRESS;
+ ipc_mmio->offset.psi_size = MMIO_OFFSET_PSI_SIZE;
+ ipc_mmio->offset.ipc_status = MMIO_OFFSET_IPC_STATUS;
+ ipc_mmio->offset.context_info = MMIO_OFFSET_CONTEXT_INFO;
+ ipc_mmio->offset.ap_win_base = MMIO_OFFSET_BASE_ADDR;
+ ipc_mmio->offset.ap_win_end = MMIO_OFFSET_END_ADDR;
+
+ ipc_mmio->offset.cp_version = MMIO_OFFSET_CP_VERSION;
+ ipc_mmio->offset.cp_capability = MMIO_OFFSET_CP_CAPABILITIES;
+
+ return ipc_mmio;
+
+init_fail:
+ kfree(ipc_mmio);
+ return NULL;
+}
+
+enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IPC_MEM_EXEC_STAGE_INVALID;
+
+ return (enum ipc_mem_exec_stage)readl(ipc_mmio->base +
+ ipc_mmio->offset.exec_stage);
+}
+
+void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
+ size_t size)
+{
+ if (ipc_mmio && dest)
+ memcpy_fromio(dest, ipc_mmio->base + ipc_mmio->offset.chip_info,
+ size);
+}
+
+enum ipc_mem_device_ipc_state ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IPC_MEM_DEVICE_IPC_INVALID;
+
+ return (enum ipc_mem_device_ipc_state)
+ readl(ipc_mmio->base + ipc_mmio->offset.ipc_status);
+}
+
+enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return IMEM_ROM_EXIT_FAIL;
+
+ return (enum rom_exit_code)readl(ipc_mmio->base +
+ ipc_mmio->offset.rom_exit_code);
+}
+
+void ipc_mmio_config(struct iosm_mmio *ipc_mmio)
+{
+ if (!ipc_mmio)
+ return;
+
+ /* AP memory window (full window is open and active so that modem checks
+ * each AP address) 0 means don't check on modem side.
+ */
+ iowrite64_lo_hi(0, ipc_mmio->base + ipc_mmio->offset.ap_win_base);
+ iowrite64_lo_hi(0, ipc_mmio->base + ipc_mmio->offset.ap_win_end);
+
+ iowrite64_lo_hi(ipc_mmio->context_info_addr,
+ ipc_mmio->base + ipc_mmio->offset.context_info);
+}
+
+void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
+ u32 size)
+{
+ if (!ipc_mmio)
+ return;
+
+ iowrite64_lo_hi(addr, ipc_mmio->base + ipc_mmio->offset.psi_address);
+ writel(size, ipc_mmio->base + ipc_mmio->offset.psi_size);
+}
+
+void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio, phys_addr_t addr)
+{
+ if (!ipc_mmio)
+ return;
+
+ /* store context_info address. This will be stored in the mmio area
+ * during IPC_MEM_DEVICE_IPC_INIT state via ipc_mmio_config()
+ */
+ ipc_mmio->context_info_addr = addr;
+}
+
+int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio)
+{
+ return ipc_mmio ? readl(ipc_mmio->base + ipc_mmio->offset.cp_version) :
+ -EFAULT;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.h b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
new file mode 100644
index 000000000000..45e6923da78f
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MMIO_H
+#define IOSM_IPC_MMIO_H
+
+/* Minimal IOSM CP VERSION which has valid CP_CAPABILITIES field */
+#define IOSM_CP_VERSION 0x0100UL
+
+/* DL dir Aggregation support mask */
+#define DL_AGGR BIT(23)
+
+/* UL dir Aggregation support mask */
+#define UL_AGGR BIT(22)
+
+/* UL flow credit support mask */
+#define UL_FLOW_CREDIT BIT(21)
+
+/* Possible states of the IPC finite state machine. */
+enum ipc_mem_device_ipc_state {
+ IPC_MEM_DEVICE_IPC_UNINIT,
+ IPC_MEM_DEVICE_IPC_INIT,
+ IPC_MEM_DEVICE_IPC_RUNNING,
+ IPC_MEM_DEVICE_IPC_RECOVERY,
+ IPC_MEM_DEVICE_IPC_ERROR,
+ IPC_MEM_DEVICE_IPC_DONT_CARE,
+ IPC_MEM_DEVICE_IPC_INVALID = -1
+};
+
+/* Boot ROM exit status. */
+enum rom_exit_code {
+ IMEM_ROM_EXIT_OPEN_EXT = 0x01,
+ IMEM_ROM_EXIT_OPEN_MEM = 0x02,
+ IMEM_ROM_EXIT_CERT_EXT = 0x10,
+ IMEM_ROM_EXIT_CERT_MEM = 0x20,
+ IMEM_ROM_EXIT_FAIL = 0xFF
+};
+
+/* Boot stages */
+enum ipc_mem_exec_stage {
+ IPC_MEM_EXEC_STAGE_RUN = 0x600DF00D,
+ IPC_MEM_EXEC_STAGE_CRASH = 0x8BADF00D,
+ IPC_MEM_EXEC_STAGE_CD_READY = 0xBADC0DED,
+ IPC_MEM_EXEC_STAGE_BOOT = 0xFEEDB007,
+ IPC_MEM_EXEC_STAGE_PSI = 0xFEEDBEEF,
+ IPC_MEM_EXEC_STAGE_EBL = 0xFEEDCAFE,
+ IPC_MEM_EXEC_STAGE_INVALID = 0xFFFFFFFF
+};
+
+/* mmio scratchpad info */
+struct mmio_offset {
+ int exec_stage;
+ int chip_info;
+ int rom_exit_code;
+ int psi_address;
+ int psi_size;
+ int ipc_status;
+ int context_info;
+ int ap_win_base;
+ int ap_win_end;
+ int cp_version;
+ int cp_capability;
+};
+
+/**
+ * struct iosm_mmio - MMIO region mapped to the doorbell scratchpad.
+ * @base: Base address of MMIO region
+ * @dev: Pointer to device structure
+ * @offset: Start offset
+ * @context_info_addr: Physical base address of context info structure
+ * @chip_info_version: Version of chip info structure
+ * @chip_info_size: Size of chip info structure
+ * @has_mux_lite: It doesn't support mux aggergation
+ * @has_ul_flow_credit: Ul flow credit support
+ * @has_slp_no_prot: Device sleep no protocol support
+ * @has_mcr_support: Usage of mcr support
+ */
+struct iosm_mmio {
+ unsigned char __iomem *base;
+ struct device *dev;
+ struct mmio_offset offset;
+ phys_addr_t context_info_addr;
+ unsigned int chip_info_version;
+ unsigned int chip_info_size;
+ u8 has_mux_lite:1,
+ has_ul_flow_credit:1,
+ has_slp_no_prot:1,
+ has_mcr_support:1;
+};
+
+/**
+ * ipc_mmio_init - Allocate mmio instance data
+ * @mmio_addr: Mapped AP base address of the MMIO area.
+ * @dev: Pointer to device structure
+ *
+ * Returns: address of mmio instance data or NULL if fails.
+ */
+struct iosm_mmio *ipc_mmio_init(void __iomem *mmio_addr, struct device *dev);
+
+/**
+ * ipc_mmio_set_psi_addr_and_size - Set start address and size of the
+ * primary system image (PSI) for the
+ * FW dowload.
+ * @ipc_mmio: Pointer to mmio instance
+ * @addr: PSI address
+ * @size: PSI immage size
+ */
+void ipc_mmio_set_psi_addr_and_size(struct iosm_mmio *ipc_mmio, dma_addr_t addr,
+ u32 size);
+
+/**
+ * ipc_mmio_set_contex_info_addr - Stores the Context Info Address in
+ * MMIO instance to share it with CP during
+ * mmio_init.
+ * @ipc_mmio: Pointer to mmio instance
+ * @addr: 64-bit address of AP context information.
+ */
+void ipc_mmio_set_contex_info_addr(struct iosm_mmio *ipc_mmio,
+ phys_addr_t addr);
+
+/**
+ * ipc_mmio_get_cp_version - Get the CP IPC version
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: version number on success and failure value on error.
+ */
+int ipc_mmio_get_cp_version(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_rom_exit_code - Get exit code from CP boot rom download app
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: exit code from CP boot rom download APP
+ */
+enum rom_exit_code ipc_mmio_get_rom_exit_code(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_exec_stage - Query CP execution stage
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: CP execution stage
+ */
+enum ipc_mem_exec_stage ipc_mmio_get_exec_stage(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_get_ipc_state - Query CP IPC state
+ * @ipc_mmio: Pointer to mmio instance
+ *
+ * Returns: CP IPC state
+ */
+enum ipc_mem_device_ipc_state
+ipc_mmio_get_ipc_state(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_copy_chip_info - Copy size bytes of CP chip info structure
+ * into caller provided buffer
+ * @ipc_mmio: Pointer to mmio instance
+ * @dest: Pointer to caller provided buff
+ * @size: Number of bytes to copy
+ */
+void ipc_mmio_copy_chip_info(struct iosm_mmio *ipc_mmio, void *dest,
+ size_t size);
+
+/**
+ * ipc_mmio_config - Write context info and AP memory range addresses.
+ * This needs to be called when CP is in
+ * IPC_MEM_DEVICE_IPC_INIT state
+ *
+ * @ipc_mmio: Pointer to mmio instance
+ */
+void ipc_mmio_config(struct iosm_mmio *ipc_mmio);
+
+/**
+ * ipc_mmio_update_cp_capability - Read and update modem capability, from mmio
+ * capability offset
+ *
+ * @ipc_mmio: Pointer to mmio instance
+ */
+void ipc_mmio_update_cp_capability(struct iosm_mmio *ipc_mmio);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.c b/drivers/net/wwan/iosm/iosm_ipc_mux.c
new file mode 100644
index 000000000000..c1c77ce699da
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_mux_codec.h"
+
+/* At the begin of the runtime phase the IP MUX channel shall created. */
+static int ipc_mux_channel_create(struct iosm_mux *ipc_mux)
+{
+ int channel_id;
+
+ channel_id = ipc_imem_channel_alloc(ipc_mux->imem, ipc_mux->instance_id,
+ IPC_CTYPE_WWAN);
+
+ if (channel_id < 0) {
+ dev_err(ipc_mux->dev,
+ "allocation of the MUX channel id failed");
+ ipc_mux->state = MUX_S_ERROR;
+ ipc_mux->event = MUX_E_NOT_APPLICABLE;
+ goto no_channel;
+ }
+
+ /* Establish the MUX channel in blocking mode. */
+ ipc_mux->channel = ipc_imem_channel_open(ipc_mux->imem, channel_id,
+ IPC_HP_NET_CHANNEL_INIT);
+
+ if (!ipc_mux->channel) {
+ dev_err(ipc_mux->dev, "ipc_imem_channel_open failed");
+ ipc_mux->state = MUX_S_ERROR;
+ ipc_mux->event = MUX_E_NOT_APPLICABLE;
+ return -ENODEV; /* MUX channel is not available. */
+ }
+
+ /* Define the MUX active state properties. */
+ ipc_mux->state = MUX_S_ACTIVE;
+ ipc_mux->event = MUX_E_NO_ORDERS;
+
+no_channel:
+ return channel_id;
+}
+
+/* Reset the session/if id state. */
+static void ipc_mux_session_free(struct iosm_mux *ipc_mux, int if_id)
+{
+ struct mux_session *if_entry;
+
+ if_entry = &ipc_mux->session[if_id];
+ /* Reset the session state. */
+ if_entry->wwan = NULL;
+}
+
+/* Create and send the session open command. */
+static struct mux_cmd_open_session_resp *
+ipc_mux_session_open_send(struct iosm_mux *ipc_mux, int if_id)
+{
+ struct mux_cmd_open_session_resp *open_session_resp;
+ struct mux_acb *acb = &ipc_mux->acb;
+ union mux_cmd_param param;
+
+ /* open_session commands to one ACB and start transmission. */
+ param.open_session.flow_ctrl = 0;
+ param.open_session.ipv4v6_hints = 0;
+ param.open_session.reserved2 = 0;
+ param.open_session.dl_head_pad_len = cpu_to_le32(IPC_MEM_DL_ETH_OFFSET);
+
+ /* Finish and transfer ACB. The user thread is suspended.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ acb->wanted_response = MUX_CMD_OPEN_SESSION_RESP;
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_OPEN_SESSION, if_id, 0,
+ &param, sizeof(param.open_session), true,
+ false) ||
+ acb->got_response != MUX_CMD_OPEN_SESSION_RESP) {
+ dev_err(ipc_mux->dev, "if_id %d: OPEN_SESSION send failed",
+ if_id);
+ return NULL;
+ }
+
+ open_session_resp = &ipc_mux->acb.got_param.open_session_resp;
+ if (open_session_resp->response != cpu_to_le32(MUX_CMD_RESP_SUCCESS)) {
+ dev_err(ipc_mux->dev,
+ "if_id %d,session open failed,response=%d", if_id,
+ open_session_resp->response);
+ return NULL;
+ }
+
+ return open_session_resp;
+}
+
+/* Open the first IP session. */
+static bool ipc_mux_session_open(struct iosm_mux *ipc_mux,
+ struct mux_session_open *session_open)
+{
+ struct mux_cmd_open_session_resp *open_session_resp;
+ int if_id;
+
+ /* Search for a free session interface id. */
+ if_id = le32_to_cpu(session_open->if_id);
+ if (if_id < 0 || if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "invalid interface id=%d", if_id);
+ return false;
+ }
+
+ /* Create and send the session open command.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ open_session_resp = ipc_mux_session_open_send(ipc_mux, if_id);
+ if (!open_session_resp) {
+ ipc_mux_session_free(ipc_mux, if_id);
+ session_open->if_id = cpu_to_le32(-1);
+ return false;
+ }
+
+ /* Initialize the uplink skb accumulator. */
+ skb_queue_head_init(&ipc_mux->session[if_id].ul_list);
+
+ ipc_mux->session[if_id].dl_head_pad_len = IPC_MEM_DL_ETH_OFFSET;
+ ipc_mux->session[if_id].ul_head_pad_len =
+ le32_to_cpu(open_session_resp->ul_head_pad_len);
+ ipc_mux->session[if_id].wwan = ipc_mux->wwan;
+
+ /* Reset the flow ctrl stats of the session */
+ ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
+ ipc_mux->session[if_id].ul_flow_credits = 0;
+ ipc_mux->session[if_id].net_tx_stop = false;
+ ipc_mux->session[if_id].flow_ctl_mask = 0;
+
+ /* Save and return the assigned if id. */
+ session_open->if_id = cpu_to_le32(if_id);
+
+ return true;
+}
+
+/* Free pending session UL packet. */
+static void ipc_mux_session_reset(struct iosm_mux *ipc_mux, int if_id)
+{
+ /* Reset the session/if id state. */
+ ipc_mux_session_free(ipc_mux, if_id);
+
+ /* Empty the uplink skb accumulator. */
+ skb_queue_purge(&ipc_mux->session[if_id].ul_list);
+}
+
+static void ipc_mux_session_close(struct iosm_mux *ipc_mux,
+ struct mux_session_close *msg)
+{
+ int if_id;
+
+ /* Copy the session interface id. */
+ if_id = le32_to_cpu(msg->if_id);
+
+ if (if_id < 0 || if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "invalid session id %d", if_id);
+ return;
+ }
+
+ /* Create and send the session close command.
+ * It is a blocking function call, until CP responds or timeout.
+ */
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, MUX_CMD_CLOSE_SESSION, if_id, 0,
+ NULL, 0, true, false))
+ dev_err(ipc_mux->dev, "if_id %d: CLOSE_SESSION send failed",
+ if_id);
+
+ /* Reset the flow ctrl stats of the session */
+ ipc_mux->session[if_id].flow_ctl_en_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_dis_cnt = 0;
+ ipc_mux->session[if_id].flow_ctl_mask = 0;
+
+ ipc_mux_session_reset(ipc_mux, if_id);
+}
+
+static void ipc_mux_channel_close(struct iosm_mux *ipc_mux,
+ struct mux_channel_close *channel_close_p)
+{
+ int i;
+
+ /* Free pending session UL packet. */
+ for (i = 0; i < ipc_mux->nr_sessions; i++)
+ if (ipc_mux->session[i].wwan)
+ ipc_mux_session_reset(ipc_mux, i);
+
+ ipc_imem_channel_close(ipc_mux->imem, ipc_mux->channel_id);
+
+ /* Reset the MUX object. */
+ ipc_mux->state = MUX_S_INACTIVE;
+ ipc_mux->event = MUX_E_INACTIVE;
+}
+
+/* CP has interrupted AP. If AP is in IP MUX mode, execute the pending ops. */
+static int ipc_mux_schedule(struct iosm_mux *ipc_mux, union mux_msg *msg)
+{
+ enum mux_event order;
+ bool success;
+ int ret = -EIO;
+
+ if (!ipc_mux->initialized) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ order = msg->common.event;
+
+ switch (ipc_mux->state) {
+ case MUX_S_INACTIVE:
+ if (order != MUX_E_MUX_SESSION_OPEN)
+ goto out; /* Wait for the request to open a session */
+
+ if (ipc_mux->event == MUX_E_INACTIVE)
+ /* Establish the MUX channel and the new state. */
+ ipc_mux->channel_id = ipc_mux_channel_create(ipc_mux);
+
+ if (ipc_mux->state != MUX_S_ACTIVE) {
+ ret = ipc_mux->channel_id; /* Missing the MUX channel */
+ goto out;
+ }
+
+ /* Disable the TD update timer and open the first IP session. */
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
+ ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
+ success = ipc_mux_session_open(ipc_mux, &msg->session_open);
+
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
+ if (success)
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_S_ACTIVE:
+ switch (order) {
+ case MUX_E_MUX_SESSION_OPEN:
+ /* Disable the TD update timer and open a session */
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, true);
+ ipc_mux->event = MUX_E_MUX_SESSION_OPEN;
+ success = ipc_mux_session_open(ipc_mux,
+ &msg->session_open);
+ ipc_imem_td_update_timer_suspend(ipc_mux->imem, false);
+ if (success)
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_E_MUX_SESSION_CLOSE:
+ /* Release an IP session. */
+ ipc_mux->event = MUX_E_MUX_SESSION_CLOSE;
+ ipc_mux_session_close(ipc_mux, &msg->session_close);
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ case MUX_E_MUX_CHANNEL_CLOSE:
+ /* Close the MUX channel pipes. */
+ ipc_mux->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_channel_close(ipc_mux, &msg->channel_close);
+ ret = ipc_mux->channel_id;
+ goto out;
+
+ default:
+ /* Invalid order. */
+ goto out;
+ }
+
+ default:
+ dev_err(ipc_mux->dev,
+ "unexpected MUX transition: state=%d, event=%d",
+ ipc_mux->state, ipc_mux->event);
+ }
+out:
+ return ret;
+}
+
+struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
+ struct iosm_imem *imem)
+{
+ struct iosm_mux *ipc_mux = kzalloc(sizeof(*ipc_mux), GFP_KERNEL);
+ int i, ul_tds, ul_td_size;
+ struct sk_buff_head *free_list;
+ struct sk_buff *skb;
+
+ if (!ipc_mux)
+ return NULL;
+
+ ipc_mux->protocol = mux_cfg->protocol;
+ ipc_mux->ul_flow = mux_cfg->ul_flow;
+ ipc_mux->nr_sessions = mux_cfg->nr_sessions;
+ ipc_mux->instance_id = mux_cfg->instance_id;
+ ipc_mux->wwan_q_offset = 0;
+
+ ipc_mux->pcie = imem->pcie;
+ ipc_mux->imem = imem;
+ ipc_mux->ipc_protocol = imem->ipc_protocol;
+ ipc_mux->dev = imem->dev;
+ ipc_mux->wwan = imem->wwan;
+
+ /* Get the reference to the UL ADB list. */
+ free_list = &ipc_mux->ul_adb.free_list;
+
+ /* Initialize the list with free ADB. */
+ skb_queue_head_init(free_list);
+
+ ul_td_size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
+
+ ul_tds = IPC_MEM_MAX_TDS_MUX_LITE_UL;
+
+ ipc_mux->ul_adb.dest_skb = NULL;
+
+ ipc_mux->initialized = true;
+ ipc_mux->adb_prep_ongoing = false;
+ ipc_mux->size_needed = 0;
+ ipc_mux->ul_data_pend_bytes = 0;
+ ipc_mux->state = MUX_S_INACTIVE;
+ ipc_mux->ev_mux_net_transmit_pending = false;
+ ipc_mux->tx_transaction_id = 0;
+ ipc_mux->rr_next_session = 0;
+ ipc_mux->event = MUX_E_INACTIVE;
+ ipc_mux->channel_id = -1;
+ ipc_mux->channel = NULL;
+
+ /* Allocate the list of UL ADB. */
+ for (i = 0; i < ul_tds; i++) {
+ dma_addr_t mapping;
+
+ skb = ipc_pcie_alloc_skb(ipc_mux->pcie, ul_td_size, GFP_ATOMIC,
+ &mapping, DMA_TO_DEVICE, 0);
+ if (!skb) {
+ ipc_mux_deinit(ipc_mux);
+ return NULL;
+ }
+ /* Extend the UL ADB list. */
+ skb_queue_tail(free_list, skb);
+ }
+
+ return ipc_mux;
+}
+
+/* Informs the network stack to restart transmission for all opened session if
+ * Flow Control is not ON for that session.
+ */
+static void ipc_mux_restart_tx_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ /* If flow control of the session is OFF and if there was tx
+ * stop then restart. Inform the network interface to restart
+ * sending data.
+ */
+ if (session->flow_ctl_mask == 0) {
+ session->net_tx_stop = false;
+ ipc_mux_netif_tx_flowctrl(session, idx, false);
+ }
+ }
+}
+
+/* Informs the network stack to stop sending further pkt for all opened
+ * sessions
+ */
+static void ipc_mux_stop_netif_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
+ }
+}
+
+void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux)
+{
+ if (ipc_mux->ul_flow == MUX_UL) {
+ int low_thresh = IPC_MEM_MUX_UL_FLOWCTRL_LOW_B;
+
+ if (ipc_mux->ul_data_pend_bytes < low_thresh)
+ ipc_mux_restart_tx_for_all_sessions(ipc_mux);
+ }
+}
+
+int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux)
+{
+ return ipc_mux ? ipc_mux->nr_sessions : -EFAULT;
+}
+
+enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux)
+{
+ return ipc_mux ? ipc_mux->protocol : MUX_UNKNOWN;
+}
+
+int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr)
+{
+ struct mux_session_open *session_open;
+ union mux_msg mux_msg;
+
+ session_open = &mux_msg.session_open;
+ session_open->event = MUX_E_MUX_SESSION_OPEN;
+
+ session_open->if_id = cpu_to_le32(session_nr);
+ ipc_mux->session[session_nr].flags |= IPC_MEM_WWAN_MUX;
+ return ipc_mux_schedule(ipc_mux, &mux_msg);
+}
+
+int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr)
+{
+ struct mux_session_close *session_close;
+ union mux_msg mux_msg;
+ int ret_val;
+
+ session_close = &mux_msg.session_close;
+ session_close->event = MUX_E_MUX_SESSION_CLOSE;
+
+ session_close->if_id = cpu_to_le32(session_nr);
+ ret_val = ipc_mux_schedule(ipc_mux, &mux_msg);
+ ipc_mux->session[session_nr].flags &= ~IPC_MEM_WWAN_MUX;
+
+ return ret_val;
+}
+
+void ipc_mux_deinit(struct iosm_mux *ipc_mux)
+{
+ struct mux_channel_close *channel_close;
+ struct sk_buff_head *free_list;
+ union mux_msg mux_msg;
+ struct sk_buff *skb;
+
+ if (!ipc_mux->initialized)
+ return;
+ ipc_mux_stop_netif_for_all_sessions(ipc_mux);
+
+ channel_close = &mux_msg.channel_close;
+ channel_close->event = MUX_E_MUX_CHANNEL_CLOSE;
+ ipc_mux_schedule(ipc_mux, &mux_msg);
+
+ /* Empty the ADB free list. */
+ free_list = &ipc_mux->ul_adb.free_list;
+
+ /* Remove from the head of the downlink queue. */
+ while ((skb = skb_dequeue(free_list)))
+ ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
+
+ if (ipc_mux->channel) {
+ ipc_mux->channel->ul_pipe.is_open = false;
+ ipc_mux->channel->dl_pipe.is_open = false;
+ }
+
+ kfree(ipc_mux);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h
new file mode 100644
index 000000000000..ddd2cd0bd911
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MUX_H
+#define IOSM_IPC_MUX_H
+
+#include "iosm_ipc_protocol.h"
+
+/* Size of the buffer for the IP MUX data buffer. */
+#define IPC_MEM_MAX_DL_MUX_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_DL_MUX_BUF_SIZE
+
+/* Size of the buffer for the IP MUX Lite data buffer. */
+#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
+
+/* TD counts for IP MUX Lite */
+#define IPC_MEM_MAX_TDS_MUX_LITE_UL 800
+#define IPC_MEM_MAX_TDS_MUX_LITE_DL 1200
+
+/* open session request (AP->CP) */
+#define MUX_CMD_OPEN_SESSION 1
+
+/* response to open session request (CP->AP) */
+#define MUX_CMD_OPEN_SESSION_RESP 2
+
+/* close session request (AP->CP) */
+#define MUX_CMD_CLOSE_SESSION 3
+
+/* response to close session request (CP->AP) */
+#define MUX_CMD_CLOSE_SESSION_RESP 4
+
+/* Flow control command with mask of the flow per queue/flow. */
+#define MUX_LITE_CMD_FLOW_CTL 5
+
+/* ACK the flow control command. Shall have the same Transaction ID as the
+ * matching FLOW_CTL command.
+ */
+#define MUX_LITE_CMD_FLOW_CTL_ACK 6
+
+/* Command for report packet indicating link quality metrics. */
+#define MUX_LITE_CMD_LINK_STATUS_REPORT 7
+
+/* Response to a report packet */
+#define MUX_LITE_CMD_LINK_STATUS_REPORT_RESP 8
+
+/* Used to reset a command/response state. */
+#define MUX_CMD_INVALID 255
+
+/* command response : command processed successfully */
+#define MUX_CMD_RESP_SUCCESS 0
+
+/* MUX for route link devices */
+#define IPC_MEM_WWAN_MUX BIT(0)
+
+/* Initiated actions to change the state of the MUX object. */
+enum mux_event {
+ MUX_E_INACTIVE, /* No initiated actions. */
+ MUX_E_MUX_SESSION_OPEN, /* Create the MUX channel and a session. */
+ MUX_E_MUX_SESSION_CLOSE, /* Release a session. */
+ MUX_E_MUX_CHANNEL_CLOSE, /* Release the MUX channel. */
+ MUX_E_NO_ORDERS, /* No MUX order. */
+ MUX_E_NOT_APPLICABLE, /* Defect IP MUX. */
+};
+
+/* MUX session open command. */
+struct mux_session_open {
+ enum mux_event event;
+ __le32 if_id;
+};
+
+/* MUX session close command. */
+struct mux_session_close {
+ enum mux_event event;
+ __le32 if_id;
+};
+
+/* MUX channel close command. */
+struct mux_channel_close {
+ enum mux_event event;
+};
+
+/* Default message type to find out the right message type. */
+struct mux_common {
+ enum mux_event event;
+};
+
+/* List of ops in MUX mode. */
+union mux_msg {
+ struct mux_session_open session_open;
+ struct mux_session_close session_close;
+ struct mux_channel_close channel_close;
+ struct mux_common common;
+};
+
+/* Parameter definition of the open session command. */
+struct mux_cmd_open_session {
+ u8 flow_ctrl; /* 0: Flow control disabled (flow allowed). */
+ /* 1: Flow control enabled (flow not allowed)*/
+ u8 ipv4v6_hints; /* 0: IPv4/IPv6 hints not supported.*/
+ /* 1: IPv4/IPv6 hints supported*/
+ __le16 reserved2; /* Reserved. Set to zero. */
+ __le32 dl_head_pad_len; /* Maximum length supported */
+ /* for DL head padding on a datagram. */
+};
+
+/* Parameter definition of the open session response. */
+struct mux_cmd_open_session_resp {
+ __le32 response; /* Response code */
+ u8 flow_ctrl; /* 0: Flow control disabled (flow allowed). */
+ /* 1: Flow control enabled (flow not allowed) */
+ u8 ipv4v6_hints; /* 0: IPv4/IPv6 hints not supported */
+ /* 1: IPv4/IPv6 hints supported */
+ __le16 reserved2; /* Reserved. Set to zero. */
+ __le32 ul_head_pad_len; /* Actual length supported for */
+ /* UL head padding on adatagram.*/
+};
+
+/* Parameter definition of the close session response code */
+struct mux_cmd_close_session_resp {
+ __le32 response;
+};
+
+/* Parameter definition of the flow control command. */
+struct mux_cmd_flow_ctl {
+ __le32 mask; /* indicating the desired flow control */
+ /* state for various flows/queues */
+};
+
+/* Parameter definition of the link status report code*/
+struct mux_cmd_link_status_report {
+ u8 payload;
+};
+
+/* Parameter definition of the link status report response code. */
+struct mux_cmd_link_status_report_resp {
+ __le32 response;
+};
+
+/**
+ * union mux_cmd_param - Union-definition of the command parameters.
+ * @open_session: Inband command for open session
+ * @open_session_resp: Inband command for open session response
+ * @close_session_resp: Inband command for close session response
+ * @flow_ctl: In-band flow control on the opened interfaces
+ * @link_status: In-band Link Status Report
+ * @link_status_resp: In-band command for link status report response
+ */
+union mux_cmd_param {
+ struct mux_cmd_open_session open_session;
+ struct mux_cmd_open_session_resp open_session_resp;
+ struct mux_cmd_close_session_resp close_session_resp;
+ struct mux_cmd_flow_ctl flow_ctl;
+ struct mux_cmd_link_status_report link_status;
+ struct mux_cmd_link_status_report_resp link_status_resp;
+};
+
+/* States of the MUX object.. */
+enum mux_state {
+ MUX_S_INACTIVE, /* IP MUX is unused. */
+ MUX_S_ACTIVE, /* IP MUX channel is available. */
+ MUX_S_ERROR, /* Defect IP MUX. */
+};
+
+/* Supported MUX protocols. */
+enum ipc_mux_protocol {
+ MUX_UNKNOWN,
+ MUX_LITE,
+};
+
+/* Supported UL data transfer methods. */
+enum ipc_mux_ul_flow {
+ MUX_UL_UNKNOWN,
+ MUX_UL, /* Normal UL data transfer */
+ MUX_UL_ON_CREDITS, /* UL data transfer will be based on credits */
+};
+
+/* List of the MUX session. */
+struct mux_session {
+ struct iosm_wwan *wwan; /*Network i/f used for communication*/
+ int if_id; /* i/f id for session open message.*/
+ u32 flags;
+ u32 ul_head_pad_len; /* Nr of bytes for UL head padding. */
+ u32 dl_head_pad_len; /* Nr of bytes for DL head padding. */
+ struct sk_buff_head ul_list; /* skb entries for an ADT. */
+ u32 flow_ctl_mask; /* UL flow control */
+ u32 flow_ctl_en_cnt; /* Flow control Enable cmd count */
+ u32 flow_ctl_dis_cnt; /* Flow Control Disable cmd count */
+ int ul_flow_credits; /* UL flow credits */
+ u8 net_tx_stop:1,
+ flush:1; /* flush net interface ? */
+};
+
+/* State of a single UL data block. */
+struct mux_adb {
+ struct sk_buff *dest_skb; /* Current UL skb for the data block. */
+ u8 *buf; /* ADB memory. */
+ struct mux_adgh *adgh; /* ADGH pointer */
+ struct sk_buff *qlth_skb; /* QLTH pointer */
+ u32 *next_table_index; /* Pointer to next table index. */
+ struct sk_buff_head free_list; /* List of alloc. ADB for the UL sess.*/
+ int size; /* Size of the ADB memory. */
+ u32 if_cnt; /* Statistic counter */
+ u32 dg_cnt_total;
+ u32 payload_size;
+};
+
+/* Temporary ACB state. */
+struct mux_acb {
+ struct sk_buff *skb; /* Used UL skb. */
+ int if_id; /* Session id. */
+ u32 wanted_response;
+ u32 got_response;
+ u32 cmd;
+ union mux_cmd_param got_param; /* Received command/response parameter */
+};
+
+/**
+ * struct iosm_mux - Structure of the data multiplexing over an IP channel.
+ * @dev: Pointer to device structure
+ * @session: Array of the MUX sessions.
+ * @channel: Reference to the IP MUX channel
+ * @pcie: Pointer to iosm_pcie struct
+ * @imem: Pointer to iosm_imem
+ * @wwan: Poinetr to iosm_wwan
+ * @ipc_protocol: Pointer to iosm_protocol
+ * @channel_id: Channel ID for MUX
+ * @protocol: Type of the MUX protocol
+ * @ul_flow: UL Flow type
+ * @nr_sessions: Number of sessions
+ * @instance_id: Instance ID
+ * @state: States of the MUX object
+ * @event: Initiated actions to change the state of the MUX object
+ * @tx_transaction_id: Transaction id for the ACB command.
+ * @rr_next_session: Next session number for round robin.
+ * @ul_adb: State of the UL ADB/ADGH.
+ * @size_needed: Variable to store the size needed during ADB preparation
+ * @ul_data_pend_bytes: Pending UL data to be processed in bytes
+ * @acb: Temporary ACB state
+ * @wwan_q_offset: This will hold the offset of the given instance
+ * Useful while passing or receiving packets from
+ * wwan/imem layer.
+ * @initialized: MUX object is initialized
+ * @ev_mux_net_transmit_pending:
+ * 0 means inform the IPC tasklet to pass the
+ * accumulated uplink ADB to CP.
+ * @adb_prep_ongoing: Flag for ADB preparation status
+ */
+struct iosm_mux {
+ struct device *dev;
+ struct mux_session session[IPC_MEM_MUX_IP_SESSION_ENTRIES];
+ struct ipc_mem_channel *channel;
+ struct iosm_pcie *pcie;
+ struct iosm_imem *imem;
+ struct iosm_wwan *wwan;
+ struct iosm_protocol *ipc_protocol;
+ int channel_id;
+ enum ipc_mux_protocol protocol;
+ enum ipc_mux_ul_flow ul_flow;
+ int nr_sessions;
+ int instance_id;
+ enum mux_state state;
+ enum mux_event event;
+ u32 tx_transaction_id;
+ int rr_next_session;
+ struct mux_adb ul_adb;
+ int size_needed;
+ long long ul_data_pend_bytes;
+ struct mux_acb acb;
+ int wwan_q_offset;
+ u8 initialized:1,
+ ev_mux_net_transmit_pending:1,
+ adb_prep_ongoing:1;
+};
+
+/* MUX configuration structure */
+struct ipc_mux_config {
+ enum ipc_mux_protocol protocol;
+ enum ipc_mux_ul_flow ul_flow;
+ int nr_sessions;
+ int instance_id;
+};
+
+/**
+ * ipc_mux_init - Allocates and Init MUX instance
+ * @mux_cfg: Pointer to MUX configuration structure
+ * @ipc_imem: Pointer to imem data-struct
+ *
+ * Returns: Initialized mux pointer on success else NULL
+ */
+struct iosm_mux *ipc_mux_init(struct ipc_mux_config *mux_cfg,
+ struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_mux_deinit - Deallocates MUX instance
+ * @ipc_mux: Pointer to the MUX instance.
+ */
+void ipc_mux_deinit(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_check_n_restart_tx - Checks for pending UL date bytes and then
+ * it restarts the net interface tx queue if
+ * device has set flow control as off.
+ * @ipc_mux: Pointer to MUX data-struct
+ */
+void ipc_mux_check_n_restart_tx(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_get_active_protocol - Returns the active MUX protocol type.
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: enum of type ipc_mux_protocol
+ */
+enum ipc_mux_protocol ipc_mux_get_active_protocol(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_open_session - Opens a MUX session for IP traffic.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @session_nr: Interface ID or session number
+ *
+ * Returns: channel id on success, failure value on error
+ */
+int ipc_mux_open_session(struct iosm_mux *ipc_mux, int session_nr);
+
+/**
+ * ipc_mux_close_session - Closes a MUX session.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @session_nr: Interface ID or session number
+ *
+ * Returns: channel id on success, failure value on error
+ */
+int ipc_mux_close_session(struct iosm_mux *ipc_mux, int session_nr);
+
+/**
+ * ipc_mux_get_max_sessions - Retuns the maximum sessions supported on the
+ * provided MUX instance..
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: Number of sessions supported on Success and failure value on error
+ */
+int ipc_mux_get_max_sessions(struct iosm_mux *ipc_mux);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
new file mode 100644
index 000000000000..fbf3cab3394c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/nospec.h>
+
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_mux_codec.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Test the link power state and send a MUX command in blocking mode. */
+static int ipc_mux_tq_cmd_send(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size)
+{
+ struct iosm_mux *ipc_mux = ipc_imem->mux;
+ const struct mux_acb *acb = msg;
+
+ skb_queue_tail(&ipc_mux->channel->ul_list, acb->skb);
+ ipc_imem_ul_send(ipc_mux->imem);
+
+ return 0;
+}
+
+static int ipc_mux_acb_send(struct iosm_mux *ipc_mux, bool blocking)
+{
+ struct completion *completion = &ipc_mux->channel->ul_sem;
+ int ret = ipc_task_queue_send_task(ipc_mux->imem, ipc_mux_tq_cmd_send,
+ 0, &ipc_mux->acb,
+ sizeof(ipc_mux->acb), false);
+ if (ret) {
+ dev_err(ipc_mux->dev, "unable to send mux command");
+ return ret;
+ }
+
+ /* if blocking, suspend the app and wait for irq in the flash or
+ * crash phase. return false on timeout to indicate failure.
+ */
+ if (blocking) {
+ u32 wait_time_milliseconds = IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT;
+
+ reinit_completion(completion);
+
+ if (wait_for_completion_interruptible_timeout
+ (completion, msecs_to_jiffies(wait_time_milliseconds)) ==
+ 0) {
+ dev_err(ipc_mux->dev, "ch[%d] timeout",
+ ipc_mux->channel_id);
+ ipc_uevent_send(ipc_mux->imem->dev, UEVENT_MDM_TIMEOUT);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/* Prepare mux Command */
+static struct mux_lite_cmdh *ipc_mux_lite_add_cmd(struct iosm_mux *ipc_mux,
+ u32 cmd, struct mux_acb *acb,
+ void *param, u32 param_size)
+{
+ struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)acb->skb->data;
+
+ cmdh->signature = cpu_to_le32(MUX_SIG_CMDH);
+ cmdh->command_type = cpu_to_le32(cmd);
+ cmdh->if_id = acb->if_id;
+
+ acb->cmd = cmd;
+
+ cmdh->cmd_len = cpu_to_le16(offsetof(struct mux_lite_cmdh, param) +
+ param_size);
+ cmdh->transaction_id = cpu_to_le32(ipc_mux->tx_transaction_id++);
+
+ if (param)
+ memcpy(&cmdh->param, param, param_size);
+
+ skb_put(acb->skb, le16_to_cpu(cmdh->cmd_len));
+
+ return cmdh;
+}
+
+static int ipc_mux_acb_alloc(struct iosm_mux *ipc_mux)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+
+ /* Allocate skb memory for the uplink buffer. */
+ skb = ipc_pcie_alloc_skb(ipc_mux->pcie, MUX_MAX_UL_ACB_BUF_SIZE,
+ GFP_ATOMIC, &mapping, DMA_TO_DEVICE, 0);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Save the skb address. */
+ acb->skb = skb;
+
+ memset(skb->data, 0, MUX_MAX_UL_ACB_BUF_SIZE);
+
+ return 0;
+}
+
+int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
+ u32 transaction_id, union mux_cmd_param *param,
+ size_t res_size, bool blocking, bool respond)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+ struct mux_lite_cmdh *ack_lite;
+ int ret = 0;
+
+ acb->if_id = if_id;
+ ret = ipc_mux_acb_alloc(ipc_mux);
+ if (ret)
+ return ret;
+
+ ack_lite = ipc_mux_lite_add_cmd(ipc_mux, cmd_type, acb, param,
+ res_size);
+ if (respond)
+ ack_lite->transaction_id = cpu_to_le32(transaction_id);
+
+ ret = ipc_mux_acb_send(ipc_mux, blocking);
+
+ return ret;
+}
+
+void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on)
+{
+ /* Inform the network interface to start/stop flow ctrl */
+ ipc_wwan_tx_flowctrl(session->wwan, idx, on);
+}
+
+static int ipc_mux_dl_cmdresps_decode_process(struct iosm_mux *ipc_mux,
+ struct mux_lite_cmdh *cmdh)
+{
+ struct mux_acb *acb = &ipc_mux->acb;
+
+ switch (le32_to_cpu(cmdh->command_type)) {
+ case MUX_CMD_OPEN_SESSION_RESP:
+ case MUX_CMD_CLOSE_SESSION_RESP:
+ /* Resume the control application. */
+ acb->got_param = cmdh->param;
+ break;
+
+ case MUX_LITE_CMD_FLOW_CTL_ACK:
+ /* This command type is not expected as response for
+ * Aggregation version of the protocol. So return non-zero.
+ */
+ if (ipc_mux->protocol != MUX_LITE)
+ return -EINVAL;
+
+ dev_dbg(ipc_mux->dev, "if %u FLOW_CTL_ACK %u received",
+ cmdh->if_id, le32_to_cpu(cmdh->transaction_id));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ acb->wanted_response = MUX_CMD_INVALID;
+ acb->got_response = le32_to_cpu(cmdh->command_type);
+ complete(&ipc_mux->channel->ul_sem);
+
+ return 0;
+}
+
+static int ipc_mux_dl_dlcmds_decode_process(struct iosm_mux *ipc_mux,
+ struct mux_lite_cmdh *cmdh)
+{
+ union mux_cmd_param *param = &cmdh->param;
+ struct mux_session *session;
+ int new_size;
+
+ dev_dbg(ipc_mux->dev, "if_id[%d]: dlcmds decode process %d",
+ cmdh->if_id, le32_to_cpu(cmdh->command_type));
+
+ switch (le32_to_cpu(cmdh->command_type)) {
+ case MUX_LITE_CMD_FLOW_CTL:
+
+ if (cmdh->if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "if_id [%d] not valid",
+ cmdh->if_id);
+ return -EINVAL; /* No session interface id. */
+ }
+
+ session = &ipc_mux->session[cmdh->if_id];
+
+ new_size = offsetof(struct mux_lite_cmdh, param) +
+ sizeof(param->flow_ctl);
+ if (param->flow_ctl.mask == cpu_to_le32(0xFFFFFFFF)) {
+ /* Backward Compatibility */
+ if (cmdh->cmd_len == cpu_to_le16(new_size))
+ session->flow_ctl_mask =
+ le32_to_cpu(param->flow_ctl.mask);
+ else
+ session->flow_ctl_mask = ~0;
+ /* if CP asks for FLOW CTRL Enable
+ * then set our internal flow control Tx flag
+ * to limit uplink session queueing
+ */
+ session->net_tx_stop = true;
+ /* Update the stats */
+ session->flow_ctl_en_cnt++;
+ } else if (param->flow_ctl.mask == 0) {
+ /* Just reset the Flow control mask and let
+ * mux_flow_ctrl_low_thre_b take control on
+ * our internal Tx flag and enabling kernel
+ * flow control
+ */
+ /* Backward Compatibility */
+ if (cmdh->cmd_len == cpu_to_le16(new_size))
+ session->flow_ctl_mask =
+ le32_to_cpu(param->flow_ctl.mask);
+ else
+ session->flow_ctl_mask = 0;
+ /* Update the stats */
+ session->flow_ctl_dis_cnt++;
+ } else {
+ break;
+ }
+
+ dev_dbg(ipc_mux->dev, "if[%u] FLOW CTRL 0x%08X", cmdh->if_id,
+ le32_to_cpu(param->flow_ctl.mask));
+ break;
+
+ case MUX_LITE_CMD_LINK_STATUS_REPORT:
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* Decode and Send appropriate response to a command block. */
+static void ipc_mux_dl_cmd_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_lite_cmdh *cmdh = (struct mux_lite_cmdh *)skb->data;
+ __le32 trans_id = cmdh->transaction_id;
+
+ if (ipc_mux_dl_cmdresps_decode_process(ipc_mux, cmdh)) {
+ /* Unable to decode command response indicates the cmd_type
+ * may be a command instead of response. So try to decoding it.
+ */
+ if (!ipc_mux_dl_dlcmds_decode_process(ipc_mux, cmdh)) {
+ /* Decoded command may need a response. Give the
+ * response according to the command type.
+ */
+ union mux_cmd_param *mux_cmd = NULL;
+ size_t size = 0;
+ u32 cmd = MUX_LITE_CMD_LINK_STATUS_REPORT_RESP;
+
+ if (cmdh->command_type ==
+ cpu_to_le32(MUX_LITE_CMD_LINK_STATUS_REPORT)) {
+ mux_cmd = &cmdh->param;
+ mux_cmd->link_status_resp.response =
+ cpu_to_le32(MUX_CMD_RESP_SUCCESS);
+ /* response field is u32 */
+ size = sizeof(u32);
+ } else if (cmdh->command_type ==
+ cpu_to_le32(MUX_LITE_CMD_FLOW_CTL)) {
+ cmd = MUX_LITE_CMD_FLOW_CTL_ACK;
+ } else {
+ return;
+ }
+
+ if (ipc_mux_dl_acb_send_cmds(ipc_mux, cmd, cmdh->if_id,
+ le32_to_cpu(trans_id),
+ mux_cmd, size, false,
+ true))
+ dev_err(ipc_mux->dev,
+ "if_id %d: cmd send failed",
+ cmdh->if_id);
+ }
+ }
+}
+
+/* Pass the DL packet to the netif layer. */
+static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
+ struct iosm_wwan *wwan, u32 offset,
+ u8 service_class, struct sk_buff *skb)
+{
+ struct sk_buff *dest_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (!dest_skb)
+ return -ENOMEM;
+
+ skb_pull(dest_skb, offset);
+ skb_set_tail_pointer(dest_skb, dest_skb->len);
+ /* Pass the packet to the netif layer. */
+ dest_skb->priority = service_class;
+
+ return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
+}
+
+/* Decode Flow Credit Table in the block */
+static void ipc_mux_dl_fcth_decode(struct iosm_mux *ipc_mux,
+ unsigned char *block)
+{
+ struct ipc_mem_lite_gen_tbl *fct = (struct ipc_mem_lite_gen_tbl *)block;
+ struct iosm_wwan *wwan;
+ int ul_credits;
+ int if_id;
+
+ if (fct->vfl_length != sizeof(fct->vfl.nr_of_bytes)) {
+ dev_err(ipc_mux->dev, "unexpected FCT length: %d",
+ fct->vfl_length);
+ return;
+ }
+
+ if_id = fct->if_id;
+ if (if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "not supported if_id: %d", if_id);
+ return;
+ }
+
+ /* Is the session active ? */
+ if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan) {
+ dev_err(ipc_mux->dev, "session Net ID is NULL");
+ return;
+ }
+
+ ul_credits = fct->vfl.nr_of_bytes;
+
+ dev_dbg(ipc_mux->dev, "Flow_Credit:: if_id[%d] Old: %d Grants: %d",
+ if_id, ipc_mux->session[if_id].ul_flow_credits, ul_credits);
+
+ /* Update the Flow Credit information from ADB */
+ ipc_mux->session[if_id].ul_flow_credits += ul_credits;
+
+ /* Check whether the TX can be started */
+ if (ipc_mux->session[if_id].ul_flow_credits > 0) {
+ ipc_mux->session[if_id].net_tx_stop = false;
+ ipc_mux_netif_tx_flowctrl(&ipc_mux->session[if_id],
+ ipc_mux->session[if_id].if_id, false);
+ }
+}
+
+/* Decode non-aggregated datagram */
+static void ipc_mux_dl_adgh_decode(struct iosm_mux *ipc_mux,
+ struct sk_buff *skb)
+{
+ u32 pad_len, packet_offset;
+ struct iosm_wwan *wwan;
+ struct mux_adgh *adgh;
+ u8 *block = skb->data;
+ int rc = 0;
+ u8 if_id;
+
+ adgh = (struct mux_adgh *)block;
+
+ if (adgh->signature != cpu_to_le32(MUX_SIG_ADGH)) {
+ dev_err(ipc_mux->dev, "invalid ADGH signature received");
+ return;
+ }
+
+ if_id = adgh->if_id;
+ if (if_id >= ipc_mux->nr_sessions) {
+ dev_err(ipc_mux->dev, "invalid if_id while decoding %d", if_id);
+ return;
+ }
+
+ /* Is the session active ? */
+ if_id = array_index_nospec(if_id, ipc_mux->nr_sessions);
+ wwan = ipc_mux->session[if_id].wwan;
+ if (!wwan) {
+ dev_err(ipc_mux->dev, "session Net ID is NULL");
+ return;
+ }
+
+ /* Store the pad len for the corresponding session
+ * Pad bytes as negotiated in the open session less the header size
+ * (see session management chapter for details).
+ * If resulting padding is zero or less, the additional head padding is
+ * omitted. For e.g., if HEAD_PAD_LEN = 16 or less, this field is
+ * omitted if HEAD_PAD_LEN = 20, then this field will have 4 bytes
+ * set to zero
+ */
+ pad_len =
+ ipc_mux->session[if_id].dl_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+ packet_offset = sizeof(*adgh) + pad_len;
+
+ if_id += ipc_mux->wwan_q_offset;
+
+ /* Pass the packet to the netif layer */
+ rc = ipc_mux_net_receive(ipc_mux, if_id, wwan, packet_offset,
+ adgh->service_class, skb);
+ if (rc) {
+ dev_err(ipc_mux->dev, "mux adgh decoding error");
+ return;
+ }
+ ipc_mux->session[if_id].flush = 1;
+}
+
+void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ u32 signature;
+
+ if (!skb->data)
+ return;
+
+ /* Decode the MUX header type. */
+ signature = le32_to_cpup((__le32 *)skb->data);
+
+ switch (signature) {
+ case MUX_SIG_ADGH:
+ ipc_mux_dl_adgh_decode(ipc_mux, skb);
+ break;
+
+ case MUX_SIG_FCTH:
+ ipc_mux_dl_fcth_decode(ipc_mux, skb->data);
+ break;
+
+ case MUX_SIG_CMDH:
+ ipc_mux_dl_cmd_decode(ipc_mux, skb);
+ break;
+
+ default:
+ dev_err(ipc_mux->dev, "invalid ABH signature");
+ }
+
+ ipc_pcie_kfree_skb(ipc_mux->pcie, skb);
+}
+
+static int ipc_mux_ul_skb_alloc(struct iosm_mux *ipc_mux,
+ struct mux_adb *ul_adb, u32 type)
+{
+ /* Take the first element of the free list. */
+ struct sk_buff *skb = skb_dequeue(&ul_adb->free_list);
+ int qlt_size;
+
+ if (!skb)
+ return -EBUSY; /* Wait for a free ADB skb. */
+
+ /* Mark it as UL ADB to select the right free operation. */
+ IPC_CB(skb)->op_type = (u8)UL_MUX_OP_ADB;
+
+ switch (type) {
+ case MUX_SIG_ADGH:
+ /* Save the ADB memory settings. */
+ ul_adb->dest_skb = skb;
+ ul_adb->buf = skb->data;
+ ul_adb->size = IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE;
+ /* reset statistic counter */
+ ul_adb->if_cnt = 0;
+ ul_adb->payload_size = 0;
+ ul_adb->dg_cnt_total = 0;
+
+ ul_adb->adgh = (struct mux_adgh *)skb->data;
+ memset(ul_adb->adgh, 0, sizeof(struct mux_adgh));
+ break;
+
+ case MUX_SIG_QLTH:
+ qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
+ (MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl));
+
+ if (qlt_size > IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE) {
+ dev_err(ipc_mux->dev,
+ "can't support. QLT size:%d SKB size: %d",
+ qlt_size, IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE);
+ return -ERANGE;
+ }
+
+ ul_adb->qlth_skb = skb;
+ memset((ul_adb->qlth_skb)->data, 0, qlt_size);
+ skb_put(skb, qlt_size);
+ break;
+ }
+
+ return 0;
+}
+
+static void ipc_mux_ul_adgh_finish(struct iosm_mux *ipc_mux)
+{
+ struct mux_adb *ul_adb = &ipc_mux->ul_adb;
+ u16 adgh_len;
+ long long bytes;
+ char *str;
+
+ if (!ul_adb || !ul_adb->dest_skb) {
+ dev_err(ipc_mux->dev, "no dest skb");
+ return;
+ }
+
+ adgh_len = le16_to_cpu(ul_adb->adgh->length);
+ skb_put(ul_adb->dest_skb, adgh_len);
+ skb_queue_tail(&ipc_mux->channel->ul_list, ul_adb->dest_skb);
+ ul_adb->dest_skb = NULL;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
+ struct mux_session *session;
+
+ session = &ipc_mux->session[ul_adb->adgh->if_id];
+ str = "available_credits";
+ bytes = (long long)session->ul_flow_credits;
+
+ } else {
+ str = "pend_bytes";
+ bytes = ipc_mux->ul_data_pend_bytes;
+ ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes +
+ adgh_len;
+ }
+
+ dev_dbg(ipc_mux->dev, "UL ADGH: size=%u, if_id=%d, payload=%d, %s=%lld",
+ adgh_len, ul_adb->adgh->if_id, ul_adb->payload_size,
+ str, bytes);
+}
+
+/* Allocates an ADB from the free list and initializes it with ADBH */
+static bool ipc_mux_ul_adb_allocate(struct iosm_mux *ipc_mux,
+ struct mux_adb *adb, int *size_needed,
+ u32 type)
+{
+ bool ret_val = false;
+ int status;
+
+ if (!adb->dest_skb) {
+ /* Allocate memory for the ADB including of the
+ * datagram table header.
+ */
+ status = ipc_mux_ul_skb_alloc(ipc_mux, adb, type);
+ if (status)
+ /* Is a pending ADB available ? */
+ ret_val = true; /* None. */
+
+ /* Update size need to zero only for new ADB memory */
+ *size_needed = 0;
+ }
+
+ return ret_val;
+}
+
+/* Informs the network stack to stop sending further packets for all opened
+ * sessions
+ */
+static void ipc_mux_stop_tx_for_all_sessions(struct iosm_mux *ipc_mux)
+{
+ struct mux_session *session;
+ int idx;
+
+ for (idx = 0; idx < ipc_mux->nr_sessions; idx++) {
+ session = &ipc_mux->session[idx];
+
+ if (!session->wwan)
+ continue;
+
+ session->net_tx_stop = true;
+ }
+}
+
+/* Sends Queue Level Table of all opened sessions */
+static bool ipc_mux_lite_send_qlt(struct iosm_mux *ipc_mux)
+{
+ struct ipc_mem_lite_gen_tbl *qlt;
+ struct mux_session *session;
+ bool qlt_updated = false;
+ int i;
+ int qlt_size;
+
+ if (!ipc_mux->initialized || ipc_mux->state != MUX_S_ACTIVE)
+ return qlt_updated;
+
+ qlt_size = offsetof(struct ipc_mem_lite_gen_tbl, vfl) +
+ MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ session = &ipc_mux->session[i];
+
+ if (!session->wwan || session->flow_ctl_mask)
+ continue;
+
+ if (ipc_mux_ul_skb_alloc(ipc_mux, &ipc_mux->ul_adb,
+ MUX_SIG_QLTH)) {
+ dev_err(ipc_mux->dev,
+ "no reserved mem to send QLT of if_id: %d", i);
+ break;
+ }
+
+ /* Prepare QLT */
+ qlt = (struct ipc_mem_lite_gen_tbl *)(ipc_mux->ul_adb.qlth_skb)
+ ->data;
+ qlt->signature = cpu_to_le32(MUX_SIG_QLTH);
+ qlt->length = cpu_to_le16(qlt_size);
+ qlt->if_id = i;
+ qlt->vfl_length = MUX_QUEUE_LEVEL * sizeof(struct mux_lite_vfl);
+ qlt->reserved[0] = 0;
+ qlt->reserved[1] = 0;
+
+ qlt->vfl.nr_of_bytes = session->ul_list.qlen;
+
+ /* Add QLT to the transfer list. */
+ skb_queue_tail(&ipc_mux->channel->ul_list,
+ ipc_mux->ul_adb.qlth_skb);
+
+ qlt_updated = true;
+ ipc_mux->ul_adb.qlth_skb = NULL;
+ }
+
+ if (qlt_updated)
+ /* Updates the TDs with ul_list */
+ (void)ipc_imem_ul_write_td(ipc_mux->imem);
+
+ return qlt_updated;
+}
+
+/* Checks the available credits for the specified session and returns
+ * number of packets for which credits are available.
+ */
+static int ipc_mux_ul_bytes_credits_check(struct iosm_mux *ipc_mux,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list,
+ int max_nr_of_pkts)
+{
+ int pkts_to_send = 0;
+ struct sk_buff *skb;
+ int credits = 0;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS) {
+ credits = session->ul_flow_credits;
+ if (credits <= 0) {
+ dev_dbg(ipc_mux->dev,
+ "FC::if_id[%d] Insuff.Credits/Qlen:%d/%u",
+ session->if_id, session->ul_flow_credits,
+ session->ul_list.qlen); /* nr_of_bytes */
+ return 0;
+ }
+ } else {
+ credits = IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B -
+ ipc_mux->ul_data_pend_bytes;
+ if (credits <= 0) {
+ ipc_mux_stop_tx_for_all_sessions(ipc_mux);
+
+ dev_dbg(ipc_mux->dev,
+ "if_id[%d] encod. fail Bytes: %llu, thresh: %d",
+ session->if_id, ipc_mux->ul_data_pend_bytes,
+ IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B);
+ return 0;
+ }
+ }
+
+ /* Check if there are enough credits/bytes available to send the
+ * requested max_nr_of_pkts. Otherwise restrict the nr_of_pkts
+ * depending on available credits.
+ */
+ skb_queue_walk(ul_list, skb)
+ {
+ if (!(credits >= skb->len && pkts_to_send < max_nr_of_pkts))
+ break;
+ credits -= skb->len;
+ pkts_to_send++;
+ }
+
+ return pkts_to_send;
+}
+
+/* Encode the UL IP packet according to Lite spec. */
+static int ipc_mux_ul_adgh_encode(struct iosm_mux *ipc_mux, int session_id,
+ struct mux_session *session,
+ struct sk_buff_head *ul_list,
+ struct mux_adb *adb, int nr_of_pkts)
+{
+ int offset = sizeof(struct mux_adgh);
+ int adb_updated = -EINVAL;
+ struct sk_buff *src_skb;
+ int aligned_size = 0;
+ int nr_of_skb = 0;
+ u32 pad_len = 0;
+
+ /* Re-calculate the number of packets depending on number of bytes to be
+ * processed/available credits.
+ */
+ nr_of_pkts = ipc_mux_ul_bytes_credits_check(ipc_mux, session, ul_list,
+ nr_of_pkts);
+
+ /* If calculated nr_of_pkts from available credits is <= 0
+ * then nothing to do.
+ */
+ if (nr_of_pkts <= 0)
+ return 0;
+
+ /* Read configured UL head_pad_length for session.*/
+ if (session->ul_head_pad_len > IPC_MEM_DL_ETH_OFFSET)
+ pad_len = session->ul_head_pad_len - IPC_MEM_DL_ETH_OFFSET;
+
+ /* Process all pending UL packets for this session
+ * depending on the allocated datagram table size.
+ */
+ while (nr_of_pkts > 0) {
+ /* get destination skb allocated */
+ if (ipc_mux_ul_adb_allocate(ipc_mux, adb, &ipc_mux->size_needed,
+ MUX_SIG_ADGH)) {
+ dev_err(ipc_mux->dev, "no reserved memory for ADGH");
+ return -ENOMEM;
+ }
+
+ /* Peek at the head of the list. */
+ src_skb = skb_peek(ul_list);
+ if (!src_skb) {
+ dev_err(ipc_mux->dev,
+ "skb peek return NULL with count : %d",
+ nr_of_pkts);
+ break;
+ }
+
+ /* Calculate the memory value. */
+ aligned_size = ALIGN((pad_len + src_skb->len), 4);
+
+ ipc_mux->size_needed = sizeof(struct mux_adgh) + aligned_size;
+
+ if (ipc_mux->size_needed > adb->size) {
+ dev_dbg(ipc_mux->dev, "size needed %d, adgh size %d",
+ ipc_mux->size_needed, adb->size);
+ /* Return 1 if any IP packet is added to the transfer
+ * list.
+ */
+ return nr_of_skb ? 1 : 0;
+ }
+
+ /* Add buffer (without head padding to next pending transfer) */
+ memcpy(adb->buf + offset + pad_len, src_skb->data,
+ src_skb->len);
+
+ adb->adgh->signature = cpu_to_le32(MUX_SIG_ADGH);
+ adb->adgh->if_id = session_id;
+ adb->adgh->length =
+ cpu_to_le16(sizeof(struct mux_adgh) + pad_len +
+ src_skb->len);
+ adb->adgh->service_class = src_skb->priority;
+ adb->adgh->next_count = --nr_of_pkts;
+ adb->dg_cnt_total++;
+ adb->payload_size += src_skb->len;
+
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS)
+ /* Decrement the credit value as we are processing the
+ * datagram from the UL list.
+ */
+ session->ul_flow_credits -= src_skb->len;
+
+ /* Remove the processed elements and free it. */
+ src_skb = skb_dequeue(ul_list);
+ dev_kfree_skb(src_skb);
+ nr_of_skb++;
+
+ ipc_mux_ul_adgh_finish(ipc_mux);
+ }
+
+ if (nr_of_skb) {
+ /* Send QLT info to modem if pending bytes > high watermark
+ * in case of mux lite
+ */
+ if (ipc_mux->ul_flow == MUX_UL_ON_CREDITS ||
+ ipc_mux->ul_data_pend_bytes >=
+ IPC_MEM_MUX_UL_FLOWCTRL_LOW_B)
+ adb_updated = ipc_mux_lite_send_qlt(ipc_mux);
+ else
+ adb_updated = 1;
+
+ /* Updates the TDs with ul_list */
+ (void)ipc_imem_ul_write_td(ipc_mux->imem);
+ }
+
+ return adb_updated;
+}
+
+bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux)
+{
+ struct sk_buff_head *ul_list;
+ struct mux_session *session;
+ int updated = 0;
+ int session_id;
+ int dg_n;
+ int i;
+
+ if (!ipc_mux || ipc_mux->state != MUX_S_ACTIVE ||
+ ipc_mux->adb_prep_ongoing)
+ return false;
+
+ ipc_mux->adb_prep_ongoing = true;
+
+ for (i = 0; i < ipc_mux->nr_sessions; i++) {
+ session_id = ipc_mux->rr_next_session;
+ session = &ipc_mux->session[session_id];
+
+ /* Go to next handle rr_next_session overflow */
+ ipc_mux->rr_next_session++;
+ if (ipc_mux->rr_next_session >= ipc_mux->nr_sessions)
+ ipc_mux->rr_next_session = 0;
+
+ if (!session->wwan || session->flow_ctl_mask ||
+ session->net_tx_stop)
+ continue;
+
+ ul_list = &session->ul_list;
+
+ /* Is something pending in UL and flow ctrl off */
+ dg_n = skb_queue_len(ul_list);
+ if (dg_n > MUX_MAX_UL_DG_ENTRIES)
+ dg_n = MUX_MAX_UL_DG_ENTRIES;
+
+ if (dg_n == 0)
+ /* Nothing to do for ipc_mux session
+ * -> try next session id.
+ */
+ continue;
+
+ updated = ipc_mux_ul_adgh_encode(ipc_mux, session_id, session,
+ ul_list, &ipc_mux->ul_adb,
+ dg_n);
+ }
+
+ ipc_mux->adb_prep_ongoing = false;
+ return updated == 1;
+}
+
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb)
+{
+ struct mux_adgh *adgh;
+ u16 adgh_len;
+
+ adgh = (struct mux_adgh *)skb->data;
+ adgh_len = le16_to_cpu(adgh->length);
+
+ if (adgh->signature == cpu_to_le32(MUX_SIG_ADGH) &&
+ ipc_mux->ul_flow == MUX_UL)
+ ipc_mux->ul_data_pend_bytes = ipc_mux->ul_data_pend_bytes -
+ adgh_len;
+
+ if (ipc_mux->ul_flow == MUX_UL)
+ dev_dbg(ipc_mux->dev, "ul_data_pend_bytes: %lld",
+ ipc_mux->ul_data_pend_bytes);
+
+ /* Reset the skb settings. */
+ skb->tail = 0;
+ skb->len = 0;
+
+ /* Add the consumed ADB to the free list. */
+ skb_queue_tail((&ipc_mux->ul_adb.free_list), skb);
+}
+
+/* Start the NETIF uplink send transfer in MUX mode. */
+static int ipc_mux_tq_ul_trigger_encode(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_mux *ipc_mux = ipc_imem->mux;
+ bool ul_data_pend = false;
+
+ /* Add session UL data to a ADB and ADGH */
+ ul_data_pend = ipc_mux_ul_data_encode(ipc_mux);
+ if (ul_data_pend)
+ /* Delay the doorbell irq */
+ ipc_imem_td_update_timer_start(ipc_mux->imem);
+
+ /* reset the debounce flag */
+ ipc_mux->ev_mux_net_transmit_pending = false;
+
+ return 0;
+}
+
+int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
+ struct sk_buff *skb)
+{
+ struct mux_session *session = &ipc_mux->session[if_id];
+ int ret = -EINVAL;
+
+ if (ipc_mux->channel &&
+ ipc_mux->channel->state != IMEM_CHANNEL_ACTIVE) {
+ dev_err(ipc_mux->dev,
+ "channel state is not IMEM_CHANNEL_ACTIVE");
+ goto out;
+ }
+
+ if (!session->wwan) {
+ dev_err(ipc_mux->dev, "session net ID is NULL");
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /* Session is under flow control.
+ * Check if packet can be queued in session list, if not
+ * suspend net tx
+ */
+ if (skb_queue_len(&session->ul_list) >=
+ (session->net_tx_stop ?
+ IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD :
+ (IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD *
+ IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR))) {
+ ipc_mux_netif_tx_flowctrl(session, session->if_id, true);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Add skb to the uplink skb accumulator. */
+ skb_queue_tail(&session->ul_list, skb);
+
+ /* Inform the IPC kthread to pass uplink IP packets to CP. */
+ if (!ipc_mux->ev_mux_net_transmit_pending) {
+ ipc_mux->ev_mux_net_transmit_pending = true;
+ ret = ipc_task_queue_send_task(ipc_mux->imem,
+ ipc_mux_tq_ul_trigger_encode, 0,
+ NULL, 0, false);
+ if (ret)
+ goto out;
+ }
+ dev_dbg(ipc_mux->dev, "mux ul if[%d] qlen=%d/%u, len=%d/%d, prio=%d",
+ if_id, skb_queue_len(&session->ul_list), session->ul_list.qlen,
+ skb->len, skb->truesize, skb->priority);
+ ret = 0;
+out:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
new file mode 100644
index 000000000000..4a74e3c9457f
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_MUX_CODEC_H
+#define IOSM_IPC_MUX_CODEC_H
+
+#include "iosm_ipc_mux.h"
+
+/* Queue level size and reporting
+ * >1 is enable, 0 is disable
+ */
+#define MUX_QUEUE_LEVEL 1
+
+/* Size of the buffer for the IP MUX commands. */
+#define MUX_MAX_UL_ACB_BUF_SIZE 256
+
+/* Maximum number of packets in a go per session */
+#define MUX_MAX_UL_DG_ENTRIES 100
+
+/* ADGH: Signature of the Datagram Header. */
+#define MUX_SIG_ADGH 0x48474441
+
+/* CMDH: Signature of the Command Header. */
+#define MUX_SIG_CMDH 0x48444D43
+
+/* QLTH: Signature of the Queue Level Table */
+#define MUX_SIG_QLTH 0x48544C51
+
+/* FCTH: Signature of the Flow Credit Table */
+#define MUX_SIG_FCTH 0x48544346
+
+/* MUX UL session threshold factor */
+#define IPC_MEM_MUX_UL_SESS_FCOFF_THRESHOLD_FACTOR (4)
+
+/* Size of the buffer for the IP MUX Lite data buffer. */
+#define IPC_MEM_MAX_DL_MUX_LITE_BUF_SIZE (2 * 1024)
+
+/* MUX UL session threshold in number of packets */
+#define IPC_MEM_MUX_UL_SESS_FCON_THRESHOLD (64)
+
+/* Default time out for sending IPC session commands like
+ * open session, close session etc
+ * unit : milliseconds
+ */
+#define IPC_MUX_CMD_RUN_DEFAULT_TIMEOUT 1000 /* 1 second */
+
+/* MUX UL flow control lower threshold in bytes */
+#define IPC_MEM_MUX_UL_FLOWCTRL_LOW_B 10240 /* 10KB */
+
+/* MUX UL flow control higher threshold in bytes (5ms worth of data)*/
+#define IPC_MEM_MUX_UL_FLOWCTRL_HIGH_B (110 * 1024)
+
+/**
+ * struct mux_adgh - Aggregated Datagram Header.
+ * @signature: Signature of the Aggregated Datagram Header(0x48474441)
+ * @length: Length (in bytes) of the datagram header. This length
+ * shall include the header size. Min value: 0x10
+ * @if_id: ID of the interface the datagrams belong to
+ * @opt_ipv4v6: Indicates IPv4(=0)/IPv6(=1), It is optional if not
+ * used set it to zero.
+ * @reserved: Reserved bits. Set to zero.
+ * @service_class: Service class identifier for the datagram.
+ * @next_count: Count of the datagrams that shall be following this
+ * datagrams for this interface. A count of zero means
+ * the next datagram may not belong to this interface.
+ * @reserved1: Reserved bytes, Set to zero
+ */
+struct mux_adgh {
+ __le32 signature;
+ __le16 length;
+ u8 if_id;
+ u8 opt_ipv4v6;
+ u8 service_class;
+ u8 next_count;
+ u8 reserved1[6];
+};
+
+/**
+ * struct mux_lite_cmdh - MUX Lite Command Header
+ * @signature: Signature of the Command Header(0x48444D43)
+ * @cmd_len: Length (in bytes) of the command. This length shall
+ * include the header size. Minimum value: 0x10
+ * @if_id: ID of the interface the commands in the table belong to.
+ * @reserved: Reserved Set to zero.
+ * @command_type: Command Enum.
+ * @transaction_id: 4 byte value shall be generated and sent along with a
+ * command Responses and ACKs shall have the same
+ * Transaction ID as their commands. It shall be unique to
+ * the command transaction on the given interface.
+ * @param: Optional parameters used with the command.
+ */
+struct mux_lite_cmdh {
+ __le32 signature;
+ __le16 cmd_len;
+ u8 if_id;
+ u8 reserved;
+ __le32 command_type;
+ __le32 transaction_id;
+ union mux_cmd_param param;
+};
+
+/**
+ * struct mux_lite_vfl - value field in generic table
+ * @nr_of_bytes: Number of bytes available to transmit in the queue.
+ */
+struct mux_lite_vfl {
+ u32 nr_of_bytes;
+};
+
+/**
+ * struct ipc_mem_lite_gen_tbl - Generic table format for Queue Level
+ * and Flow Credit
+ * @signature: Signature of the table
+ * @length: Length of the table
+ * @if_id: ID of the interface the table belongs to
+ * @vfl_length: Value field length
+ * @reserved: Reserved
+ * @vfl: Value field of variable length
+ */
+struct ipc_mem_lite_gen_tbl {
+ __le32 signature;
+ __le16 length;
+ u8 if_id;
+ u8 vfl_length;
+ u32 reserved[2];
+ struct mux_lite_vfl vfl;
+};
+
+/**
+ * ipc_mux_dl_decode -Route the DL packet through the IP MUX layer
+ * depending on Header.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
+void ipc_mux_dl_decode(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+
+/**
+ * ipc_mux_dl_acb_send_cmds - Respond to the Command blocks.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @cmd_type: Command
+ * @if_id: Session interface id.
+ * @transaction_id: Command transaction id.
+ * @param: Pointer to command params.
+ * @res_size: Response size
+ * @blocking: True for blocking send
+ * @respond: If true return transaction ID
+ *
+ * Returns: 0 in success and failure value on error
+ */
+int ipc_mux_dl_acb_send_cmds(struct iosm_mux *ipc_mux, u32 cmd_type, u8 if_id,
+ u32 transaction_id, union mux_cmd_param *param,
+ size_t res_size, bool blocking, bool respond);
+
+/**
+ * ipc_mux_netif_tx_flowctrl - Enable/Disable TX flow control on MUX sessions.
+ * @session: Pointer to mux_session struct
+ * @idx: Session ID
+ * @on: true for Enable and false for disable flow control
+ */
+void ipc_mux_netif_tx_flowctrl(struct mux_session *session, int idx, bool on);
+
+/**
+ * ipc_mux_ul_trigger_encode - Route the UL packet through the IP MUX layer
+ * for encoding.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @if_id: Session ID.
+ * @skb: Pointer to ipc_skb.
+ *
+ * Returns: 0 if successfully encoded
+ * failure value on error
+ * -EBUSY if packet has to be retransmitted.
+ */
+int ipc_mux_ul_trigger_encode(struct iosm_mux *ipc_mux, int if_id,
+ struct sk_buff *skb);
+/**
+ * ipc_mux_ul_data_encode - UL encode function for calling from Tasklet context.
+ * @ipc_mux: Pointer to MUX data-struct
+ *
+ * Returns: TRUE if any packet of any session is encoded FALSE otherwise.
+ */
+bool ipc_mux_ul_data_encode(struct iosm_mux *ipc_mux);
+
+/**
+ * ipc_mux_ul_encoded_process - Handles the Modem processed UL data by adding
+ * the SKB to the UL free list.
+ * @ipc_mux: Pointer to MUX data-struct
+ * @skb: Pointer to ipc_skb.
+ */
+void ipc_mux_ul_encoded_process(struct iosm_mux *ipc_mux, struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
new file mode 100644
index 000000000000..7f7d364d3a51
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/module.h>
+#include <net/rtnetlink.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_pcie.h"
+#include "iosm_ipc_protocol.h"
+
+MODULE_DESCRIPTION("IOSM Driver");
+MODULE_LICENSE("GPL v2");
+
+/* WWAN GUID */
+static guid_t wwan_acpi_guid = GUID_INIT(0xbad01b75, 0x22a8, 0x4f48, 0x87, 0x92,
+ 0xbd, 0xde, 0x94, 0x67, 0x74, 0x7d);
+
+static void ipc_pcie_resources_release(struct iosm_pcie *ipc_pcie)
+{
+ /* Free the MSI resources. */
+ ipc_release_irq(ipc_pcie);
+
+ /* Free mapped doorbell scratchpad bus memory into CPU space. */
+ iounmap(ipc_pcie->scratchpad);
+
+ /* Free mapped IPC_REGS bus memory into CPU space. */
+ iounmap(ipc_pcie->ipc_regs);
+
+ /* Releases all PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_regions. Call this function only
+ * after all use of the PCI regions has ceased.
+ */
+ pci_release_regions(ipc_pcie->pci);
+}
+
+static void ipc_pcie_cleanup(struct iosm_pcie *ipc_pcie)
+{
+ /* Free the shared memory resources. */
+ ipc_imem_cleanup(ipc_pcie->imem);
+
+ ipc_pcie_resources_release(ipc_pcie);
+
+ /* Signal to the system that the PCI device is not in use. */
+ pci_disable_device(ipc_pcie->pci);
+}
+
+static void ipc_pcie_deinit(struct iosm_pcie *ipc_pcie)
+{
+ kfree(ipc_pcie->imem);
+ kfree(ipc_pcie);
+}
+
+static void ipc_pcie_remove(struct pci_dev *pci)
+{
+ struct iosm_pcie *ipc_pcie = pci_get_drvdata(pci);
+
+ ipc_pcie_cleanup(ipc_pcie);
+
+ ipc_pcie_deinit(ipc_pcie);
+}
+
+static int ipc_pcie_resources_request(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pci = ipc_pcie->pci;
+ u32 cap = 0;
+ u32 ret;
+
+ /* Reserved PCI I/O and memory resources.
+ * Mark all PCI regions associated with PCI device pci as
+ * being reserved by owner IOSM_IPC.
+ */
+ ret = pci_request_regions(pci, "IOSM_IPC");
+ if (ret) {
+ dev_err(ipc_pcie->dev, "failed pci request regions");
+ goto pci_request_region_fail;
+ }
+
+ /* Reserve the doorbell IPC REGS memory resources.
+ * Remap the memory into CPU space. Arrange for the physical address
+ * (BAR) to be visible from this driver.
+ * pci_ioremap_bar() ensures that the memory is marked uncachable.
+ */
+ ipc_pcie->ipc_regs = pci_ioremap_bar(pci, ipc_pcie->ipc_regs_bar_nr);
+
+ if (!ipc_pcie->ipc_regs) {
+ dev_err(ipc_pcie->dev, "IPC REGS ioremap error");
+ ret = -EBUSY;
+ goto ipc_regs_remap_fail;
+ }
+
+ /* Reserve the MMIO scratchpad memory resources.
+ * Remap the memory into CPU space. Arrange for the physical address
+ * (BAR) to be visible from this driver.
+ * pci_ioremap_bar() ensures that the memory is marked uncachable.
+ */
+ ipc_pcie->scratchpad =
+ pci_ioremap_bar(pci, ipc_pcie->scratchpad_bar_nr);
+
+ if (!ipc_pcie->scratchpad) {
+ dev_err(ipc_pcie->dev, "doorbell scratchpad ioremap error");
+ ret = -EBUSY;
+ goto scratch_remap_fail;
+ }
+
+ /* Install the irq handler triggered by CP. */
+ ret = ipc_acquire_irq(ipc_pcie);
+ if (ret) {
+ dev_err(ipc_pcie->dev, "acquiring MSI irq failed!");
+ goto irq_acquire_fail;
+ }
+
+ /* Enable bus-mastering for the IOSM IPC device. */
+ pci_set_master(pci);
+
+ /* Enable LTR if possible
+ * This is needed for L1.2!
+ */
+ pcie_capability_read_dword(ipc_pcie->pci, PCI_EXP_DEVCAP2, &cap);
+ if (cap & PCI_EXP_DEVCAP2_LTR)
+ pcie_capability_set_word(ipc_pcie->pci, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
+
+ dev_dbg(ipc_pcie->dev, "link between AP and CP is fully on");
+
+ return ret;
+
+irq_acquire_fail:
+ iounmap(ipc_pcie->scratchpad);
+scratch_remap_fail:
+ iounmap(ipc_pcie->ipc_regs);
+ipc_regs_remap_fail:
+ pci_release_regions(pci);
+pci_request_region_fail:
+ return ret;
+}
+
+bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
+ bool parent)
+{
+ struct pci_dev *pdev;
+ u16 value = 0;
+ u32 enabled;
+
+ if (parent)
+ pdev = ipc_pcie->pci->bus->self;
+ else
+ pdev = ipc_pcie->pci;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &value);
+ enabled = value & PCI_EXP_LNKCTL_ASPMC;
+ dev_dbg(ipc_pcie->dev, "ASPM L1: 0x%04X 0x%03X", pdev->device, value);
+
+ return (enabled == PCI_EXP_LNKCTL_ASPM_L1 ||
+ enabled == PCI_EXP_LNKCTL_ASPMC);
+}
+
+bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *parent;
+ u16 link_status = 0;
+
+ if (!ipc_pcie->pci->bus || !ipc_pcie->pci->bus->self) {
+ dev_err(ipc_pcie->dev, "root port not found");
+ return false;
+ }
+
+ parent = ipc_pcie->pci->bus->self;
+
+ pcie_capability_read_word(parent, PCI_EXP_LNKSTA, &link_status);
+ dev_dbg(ipc_pcie->dev, "Link status: 0x%04X", link_status);
+
+ return link_status & PCI_EXP_LNKSTA_DLLLA;
+}
+
+static bool ipc_pcie_check_aspm_supported(struct iosm_pcie *ipc_pcie,
+ bool parent)
+{
+ struct pci_dev *pdev;
+ u32 support;
+ u32 cap = 0;
+
+ if (parent)
+ pdev = ipc_pcie->pci->bus->self;
+ else
+ pdev = ipc_pcie->pci;
+ pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &cap);
+ support = u32_get_bits(cap, PCI_EXP_LNKCAP_ASPMS);
+ if (support < PCI_EXP_LNKCTL_ASPM_L1) {
+ dev_dbg(ipc_pcie->dev, "ASPM L1 not supported: 0x%04X",
+ pdev->device);
+ return false;
+ }
+ return true;
+}
+
+void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie)
+{
+ bool parent_aspm_enabled, dev_aspm_enabled;
+
+ /* check if both root port and child supports ASPM L1 */
+ if (!ipc_pcie_check_aspm_supported(ipc_pcie, true) ||
+ !ipc_pcie_check_aspm_supported(ipc_pcie, false))
+ return;
+
+ parent_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, true);
+ dev_aspm_enabled = ipc_pcie_check_aspm_enabled(ipc_pcie, false);
+
+ dev_dbg(ipc_pcie->dev, "ASPM parent: %s device: %s",
+ parent_aspm_enabled ? "Enabled" : "Disabled",
+ dev_aspm_enabled ? "Enabled" : "Disabled");
+}
+
+/* Initializes PCIe endpoint configuration */
+static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie)
+{
+ /* BAR0 is used for doorbell */
+ ipc_pcie->ipc_regs_bar_nr = IPC_DOORBELL_BAR0;
+
+ /* update HW configuration */
+ ipc_pcie->scratchpad_bar_nr = IPC_SCRATCHPAD_BAR2;
+ ipc_pcie->doorbell_reg_offset = IPC_DOORBELL_CH_OFFSET;
+ ipc_pcie->doorbell_write = IPC_WRITE_PTR_REG_0;
+ ipc_pcie->doorbell_capture = IPC_CAPTURE_PTR_REG_0;
+}
+
+/* This will read the BIOS WWAN RTD3 settings:
+ * D0L1.2/D3L2/Disabled
+ */
+static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev)
+{
+ union acpi_object *object;
+ acpi_handle handle_acpi;
+
+ handle_acpi = ACPI_HANDLE(dev);
+ if (!handle_acpi) {
+ pr_debug("pci device is NOT ACPI supporting device\n");
+ goto default_ret;
+ }
+
+ object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL);
+
+ if (object && object->integer.value == 3)
+ return IPC_PCIE_D3L2;
+
+default_ret:
+ return IPC_PCIE_D0L12;
+}
+
+static int ipc_pcie_probe(struct pci_dev *pci,
+ const struct pci_device_id *pci_id)
+{
+ struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL);
+
+ pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device,
+ pci_id->vendor);
+
+ if (!ipc_pcie)
+ goto ret_fail;
+
+ /* Initialize ipc dbg component for the PCIe device */
+ ipc_pcie->dev = &pci->dev;
+
+ /* Set the driver specific data. */
+ pci_set_drvdata(pci, ipc_pcie);
+
+ /* Save the address of the PCI device configuration. */
+ ipc_pcie->pci = pci;
+
+ /* Update platform configuration */
+ ipc_pcie_config_init(ipc_pcie);
+
+ /* Initialize the device before it is used. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ */
+ if (pci_enable_device(pci)) {
+ dev_err(ipc_pcie->dev, "failed to enable the AP PCIe device");
+ /* If enable of PCIe device has failed then calling
+ * ipc_pcie_cleanup will panic the system. More over
+ * ipc_pcie_cleanup() is required to be called after
+ * ipc_imem_mount()
+ */
+ goto pci_enable_fail;
+ }
+
+ ipc_pcie_config_aspm(ipc_pcie);
+ dev_dbg(ipc_pcie->dev, "PCIe device enabled.");
+
+ /* Read WWAN RTD3 BIOS Setting
+ */
+ ipc_pcie->d3l2_support = ipc_pcie_read_bios_cfg(&pci->dev);
+
+ ipc_pcie->suspend = 0;
+
+ if (ipc_pcie_resources_request(ipc_pcie))
+ goto resources_req_fail;
+
+ /* Establish the link to the imem layer. */
+ ipc_pcie->imem = ipc_imem_init(ipc_pcie, pci->device,
+ ipc_pcie->scratchpad, ipc_pcie->dev);
+ if (!ipc_pcie->imem) {
+ dev_err(ipc_pcie->dev, "failed to init imem");
+ goto imem_init_fail;
+ }
+
+ return 0;
+
+imem_init_fail:
+ ipc_pcie_resources_release(ipc_pcie);
+resources_req_fail:
+ pci_disable_device(pci);
+pci_enable_fail:
+ kfree(ipc_pcie);
+ret_fail:
+ return -EIO;
+}
+
+static const struct pci_device_id iosm_ipc_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CP_DEVICE_7560_ID) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, iosm_ipc_ids);
+
+/* Enter sleep in s2idle case
+ */
+static int __maybe_unused ipc_pcie_suspend_s2idle(struct iosm_pcie *ipc_pcie)
+{
+ ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_SLEEP);
+
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ set_bit(0, &ipc_pcie->suspend);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, true);
+
+ return 0;
+}
+
+/* Resume from sleep in s2idle case
+ */
+static int __maybe_unused ipc_pcie_resume_s2idle(struct iosm_pcie *ipc_pcie)
+{
+ ipc_cp_irq_sleep_control(ipc_pcie, IPC_MEM_DEV_PM_FORCE_ACTIVE);
+
+ ipc_imem_pm_s2idle_sleep(ipc_pcie->imem, false);
+
+ /* Complete all memory stores before clearing bit. */
+ smp_mb__before_atomic();
+
+ clear_bit(0, &ipc_pcie->suspend);
+
+ /* Complete all memory stores after clearing bit. */
+ smp_mb__after_atomic();
+ return 0;
+}
+
+int __maybe_unused ipc_pcie_suspend(struct iosm_pcie *ipc_pcie)
+{
+ struct pci_dev *pdev;
+ int ret;
+
+ pdev = ipc_pcie->pci;
+
+ /* Execute D3 one time. */
+ if (pdev->current_state != PCI_D0) {
+ dev_dbg(ipc_pcie->dev, "done for PM=%d", pdev->current_state);
+ return 0;
+ }
+
+ /* The HAL shall ask the shared memory layer whether D3 is allowed. */
+ ipc_imem_pm_suspend(ipc_pcie->imem);
+
+ /* Save the PCI configuration space of a device before suspending. */
+ ret = pci_save_state(pdev);
+
+ if (ret) {
+ dev_err(ipc_pcie->dev, "pci_save_state error=%d", ret);
+ return ret;
+ }
+
+ /* Set the power state of a PCI device.
+ * Transition a device to a new power state, using the device's PCI PM
+ * registers.
+ */
+ ret = pci_set_power_state(pdev, PCI_D3cold);
+
+ if (ret) {
+ dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
+ return ret;
+ }
+
+ dev_dbg(ipc_pcie->dev, "SUSPEND done");
+ return ret;
+}
+
+int __maybe_unused ipc_pcie_resume(struct iosm_pcie *ipc_pcie)
+{
+ int ret;
+
+ /* Set the power state of a PCI device.
+ * Transition a device to a new power state, using the device's PCI PM
+ * registers.
+ */
+ ret = pci_set_power_state(ipc_pcie->pci, PCI_D0);
+
+ if (ret) {
+ dev_err(ipc_pcie->dev, "pci_set_power_state error=%d", ret);
+ return ret;
+ }
+
+ pci_restore_state(ipc_pcie->pci);
+
+ /* The HAL shall inform the shared memory layer that the device is
+ * active.
+ */
+ ipc_imem_pm_resume(ipc_pcie->imem);
+
+ dev_dbg(ipc_pcie->dev, "RESUME done");
+ return ret;
+}
+
+static int __maybe_unused ipc_pcie_suspend_cb(struct device *dev)
+{
+ struct iosm_pcie *ipc_pcie;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ ipc_pcie = pci_get_drvdata(pdev);
+
+ switch (ipc_pcie->d3l2_support) {
+ case IPC_PCIE_D0L12:
+ ipc_pcie_suspend_s2idle(ipc_pcie);
+ break;
+ case IPC_PCIE_D3L2:
+ ipc_pcie_suspend(ipc_pcie);
+ break;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
+{
+ struct iosm_pcie *ipc_pcie;
+ struct pci_dev *pdev;
+
+ pdev = to_pci_dev(dev);
+
+ ipc_pcie = pci_get_drvdata(pdev);
+
+ switch (ipc_pcie->d3l2_support) {
+ case IPC_PCIE_D0L12:
+ ipc_pcie_resume_s2idle(ipc_pcie);
+ break;
+ case IPC_PCIE_D3L2:
+ ipc_pcie_resume(ipc_pcie);
+ break;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
+
+static struct pci_driver iosm_ipc_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = ipc_pcie_probe,
+ .remove = ipc_pcie_remove,
+ .driver = {
+ .pm = &iosm_ipc_pm,
+ },
+ .id_table = iosm_ipc_ids,
+};
+
+int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ size_t size, dma_addr_t *mapping, int direction)
+{
+ if (ipc_pcie->pci) {
+ *mapping = dma_map_single(&ipc_pcie->pci->dev, data, size,
+ direction);
+ if (dma_mapping_error(&ipc_pcie->pci->dev, *mapping)) {
+ dev_err(ipc_pcie->dev, "dma mapping failed");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
+ dma_addr_t mapping, int direction)
+{
+ if (!mapping)
+ return;
+ if (ipc_pcie->pci)
+ dma_unmap_single(&ipc_pcie->pci->dev, mapping, size, direction);
+}
+
+struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
+ gfp_t flags, size_t size)
+{
+ struct sk_buff *skb;
+
+ if (!ipc_pcie || !size) {
+ pr_err("invalid pcie object or size");
+ return NULL;
+ }
+
+ skb = __netdev_alloc_skb(NULL, size, flags);
+ if (!skb)
+ return NULL;
+
+ IPC_CB(skb)->op_type = (u8)UL_DEFAULT;
+ IPC_CB(skb)->mapping = 0;
+
+ return skb;
+}
+
+struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
+ gfp_t flags, dma_addr_t *mapping,
+ int direction, size_t headroom)
+{
+ struct sk_buff *skb = ipc_pcie_alloc_local_skb(ipc_pcie, flags,
+ size + headroom);
+ if (!skb)
+ return NULL;
+
+ if (headroom)
+ skb_reserve(skb, headroom);
+
+ if (ipc_pcie_addr_map(ipc_pcie, skb->data, size, mapping, direction)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ BUILD_BUG_ON(sizeof(*IPC_CB(skb)) > sizeof(skb->cb));
+
+ /* Store the mapping address in skb scratch pad for later usage */
+ IPC_CB(skb)->mapping = *mapping;
+ IPC_CB(skb)->direction = direction;
+ IPC_CB(skb)->len = size;
+
+ return skb;
+}
+
+void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb)
+{
+ if (!skb)
+ return;
+
+ ipc_pcie_addr_unmap(ipc_pcie, IPC_CB(skb)->len, IPC_CB(skb)->mapping,
+ IPC_CB(skb)->direction);
+ IPC_CB(skb)->mapping = 0;
+ dev_kfree_skb(skb);
+}
+
+static int __init iosm_ipc_driver_init(void)
+{
+ if (pci_register_driver(&iosm_ipc_driver)) {
+ pr_err("registering of IOSM PCIe driver failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void __exit iosm_ipc_driver_exit(void)
+{
+ pci_unregister_driver(&iosm_ipc_driver);
+}
+
+module_init(iosm_ipc_driver_init);
+module_exit(iosm_ipc_driver_exit);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.h b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
new file mode 100644
index 000000000000..7d1f0cd7364c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PCIE_H
+#define IOSM_IPC_PCIE_H
+
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+
+#include "iosm_ipc_irq.h"
+
+/* Device ID */
+#define INTEL_CP_DEVICE_7560_ID 0x7560
+
+/* Define for BAR area usage */
+#define IPC_DOORBELL_BAR0 0
+#define IPC_SCRATCHPAD_BAR2 2
+
+/* Defines for DOORBELL registers information */
+#define IPC_DOORBELL_CH_OFFSET BIT(5)
+#define IPC_WRITE_PTR_REG_0 BIT(4)
+#define IPC_CAPTURE_PTR_REG_0 BIT(3)
+
+/* Number of MSI used for IPC */
+#define IPC_MSI_VECTORS 1
+
+/* Total number of Maximum IPC IRQ vectors used for IPC */
+#define IPC_IRQ_VECTORS IPC_MSI_VECTORS
+
+/**
+ * enum ipc_pcie_sleep_state - Enum type to different sleep state transitions
+ * @IPC_PCIE_D0L12: Put the sleep state in D0L12
+ * @IPC_PCIE_D3L2: Put the sleep state in D3L2
+ */
+enum ipc_pcie_sleep_state {
+ IPC_PCIE_D0L12,
+ IPC_PCIE_D3L2,
+};
+
+/**
+ * struct iosm_pcie - IPC_PCIE struct.
+ * @pci: Address of the device description
+ * @dev: Pointer to generic device structure
+ * @ipc_regs: Remapped CP doorbell address of the irq register
+ * set, to fire the doorbell irq.
+ * @scratchpad: Remapped CP scratchpad address, to send the
+ * configuration. tuple and the IPC descriptors
+ * to CP in the ROM phase. The config tuple
+ * information are saved on the MSI scratchpad.
+ * @imem: Pointer to imem data struct
+ * @ipc_regs_bar_nr: BAR number to be used for IPC doorbell
+ * @scratchpad_bar_nr: BAR number to be used for Scratchpad
+ * @nvec: number of requested irq vectors
+ * @doorbell_reg_offset: doorbell_reg_offset
+ * @doorbell_write: doorbell write register
+ * @doorbell_capture: doorbell capture resgister
+ * @suspend: S2IDLE sleep/active
+ * @d3l2_support: Read WWAN RTD3 BIOS setting for D3L2 support
+ */
+struct iosm_pcie {
+ struct pci_dev *pci;
+ struct device *dev;
+ void __iomem *ipc_regs;
+ void __iomem *scratchpad;
+ struct iosm_imem *imem;
+ int ipc_regs_bar_nr;
+ int scratchpad_bar_nr;
+ int nvec;
+ u32 doorbell_reg_offset;
+ u32 doorbell_write;
+ u32 doorbell_capture;
+ unsigned long suspend;
+ enum ipc_pcie_sleep_state d3l2_support;
+};
+
+/**
+ * struct ipc_skb_cb - Struct definition of the socket buffer which is mapped to
+ * the cb field of sbk
+ * @mapping: Store physical or IOVA mapped address of skb virtual add.
+ * @direction: DMA direction
+ * @len: Length of the DMA mapped region
+ * @op_type: Expected values are defined about enum ipc_ul_usr_op.
+ */
+struct ipc_skb_cb {
+ dma_addr_t mapping;
+ int direction;
+ int len;
+ u8 op_type;
+};
+
+/**
+ * enum ipc_ul_usr_op - Control operation to execute the right action on
+ * the user interface.
+ * @UL_USR_OP_BLOCKED: The uplink app was blocked until CP confirms that the
+ * uplink buffer was consumed triggered by the IRQ.
+ * @UL_MUX_OP_ADB: In MUX mode the UL ADB shall be addedd to the free list.
+ * @UL_DEFAULT: SKB in non muxing mode
+ */
+enum ipc_ul_usr_op {
+ UL_USR_OP_BLOCKED,
+ UL_MUX_OP_ADB,
+ UL_DEFAULT,
+};
+
+/**
+ * ipc_pcie_addr_map - Maps the kernel's virtual address to either IOVA
+ * address space or Physical address space, the mapping is
+ * stored in the skb's cb.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @data: Skb mem containing data
+ * @size: Data size
+ * @mapping: Dma mapping address
+ * @direction: Data direction
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_addr_map(struct iosm_pcie *ipc_pcie, unsigned char *data,
+ size_t size, dma_addr_t *mapping, int direction);
+
+/**
+ * ipc_pcie_addr_unmap - Unmaps the skb memory region from IOVA address space
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @size: Data size
+ * @mapping: Dma mapping address
+ * @direction: Data direction
+ */
+void ipc_pcie_addr_unmap(struct iosm_pcie *ipc_pcie, size_t size,
+ dma_addr_t mapping, int direction);
+
+/**
+ * ipc_pcie_alloc_skb - Allocate an uplink SKB for the given size.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @size: Size of the SKB required.
+ * @flags: Allocation flags
+ * @mapping: Copies either mapped IOVA add. or converted Phy address
+ * @direction: DMA data direction
+ * @headroom: Header data offset
+ *
+ * Returns: Pointer to ipc_skb on Success, NULL on failure.
+ */
+struct sk_buff *ipc_pcie_alloc_skb(struct iosm_pcie *ipc_pcie, size_t size,
+ gfp_t flags, dma_addr_t *mapping,
+ int direction, size_t headroom);
+
+/**
+ * ipc_pcie_alloc_local_skb - Allocate a local SKB for the given size.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @flags: Allocation flags
+ * @size: Size of the SKB required.
+ *
+ * Returns: Pointer to ipc_skb on Success, NULL on failure.
+ */
+struct sk_buff *ipc_pcie_alloc_local_skb(struct iosm_pcie *ipc_pcie,
+ gfp_t flags, size_t size);
+
+/**
+ * ipc_pcie_kfree_skb - Free skb allocated by ipc_pcie_alloc_*_skb().
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @skb: Pointer to the skb
+ */
+void ipc_pcie_kfree_skb(struct iosm_pcie *ipc_pcie, struct sk_buff *skb);
+
+/**
+ * ipc_pcie_check_data_link_active - Check Data Link Layer Active
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: true if active, otherwise false
+ */
+bool ipc_pcie_check_data_link_active(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_suspend - Callback invoked by pm_runtime_suspend. It decrements
+ * the device's usage count then, carry out a suspend,
+ * either synchronous or asynchronous.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_suspend(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_resume - Callback invoked by pm_runtime_resume. It increments
+ * the device's usage count then, carry out a resume,
+ * either synchronous or asynchronous.
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_pcie_resume(struct iosm_pcie *ipc_pcie);
+
+/**
+ * ipc_pcie_check_aspm_enabled - Check if ASPM L1 is already enabled
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ * @parent: True if checking ASPM L1 for parent else false
+ *
+ * Returns: true if ASPM is already enabled else false
+ */
+bool ipc_pcie_check_aspm_enabled(struct iosm_pcie *ipc_pcie,
+ bool parent);
+/**
+ * ipc_pcie_config_aspm - Configure ASPM L1
+ * @ipc_pcie: Pointer to struct iosm_pcie
+ */
+void ipc_pcie_config_aspm(struct iosm_pcie *ipc_pcie);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.c b/drivers/net/wwan/iosm/iosm_ipc_pm.c
new file mode 100644
index 000000000000..413601c72dcd
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pm.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_protocol.h"
+
+/* Timeout value in MS for the PM to wait for device to reach active state */
+#define IPC_PM_ACTIVE_TIMEOUT_MS (500)
+
+/* Note that here "active" has the value 1, as compared to the enums
+ * ipc_mem_host_pm_state or ipc_mem_dev_pm_state, where "active" is 0
+ */
+#define IPC_PM_SLEEP (0)
+#define CONSUME_STATE (0)
+#define IPC_PM_ACTIVE (1)
+
+void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
+ bool host_slp_check)
+{
+ if (host_slp_check && ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE &&
+ ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE_WAIT) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev,
+ "Pend HPDA update set. Host PM_State: %d identifier:%d",
+ ipc_pm->host_pm_state, identifier);
+ return;
+ }
+
+ if (!ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, true)) {
+ ipc_pm->pending_hpda_update = true;
+ dev_dbg(ipc_pm->dev, "Pending HPDA update set. identifier:%d",
+ identifier);
+ return;
+ }
+ ipc_pm->pending_hpda_update = false;
+
+ /* Trigger the irq towards CP */
+ ipc_cp_irq_hpda_update(ipc_pm->pcie, identifier);
+
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_IRQ, false);
+}
+
+/* Wake up the device if it is in low power mode. */
+static bool ipc_pm_link_activate(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_ACTIVE)
+ return true;
+
+ if (ipc_pm->cp_state == IPC_MEM_DEV_PM_SLEEP) {
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_SLEEP) {
+ /* Wake up the device. */
+ ipc_cp_irq_sleep_control(ipc_pm->pcie,
+ IPC_MEM_DEV_PM_WAKEUP);
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE_WAIT;
+
+ goto not_active;
+ }
+
+ if (ipc_pm->ap_state == IPC_MEM_DEV_PM_ACTIVE_WAIT)
+ goto not_active;
+
+ return true;
+ }
+
+not_active:
+ /* link is not ready */
+ return false;
+}
+
+bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm)
+{
+ bool ret_val = false;
+
+ if (ipc_pm->ap_state != IPC_MEM_DEV_PM_ACTIVE) {
+ /* Complete all memory stores before setting bit */
+ smp_mb__before_atomic();
+
+ /* Wait for IPC_PM_ACTIVE_TIMEOUT_MS for Device sleep state
+ * machine to enter ACTIVE state.
+ */
+ set_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after setting bit */
+ smp_mb__after_atomic();
+
+ if (!wait_for_completion_interruptible_timeout
+ (&ipc_pm->host_sleep_complete,
+ msecs_to_jiffies(IPC_PM_ACTIVE_TIMEOUT_MS))) {
+ dev_err(ipc_pm->dev,
+ "PM timeout. Expected State:%d. Actual: %d",
+ IPC_MEM_DEV_PM_ACTIVE, ipc_pm->ap_state);
+ goto active_timeout;
+ }
+ }
+
+ ret_val = true;
+active_timeout:
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ /* Reset the atomic variable in any case as device sleep
+ * state machine change is no longer of interest.
+ */
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+
+ return ret_val;
+}
+
+static void ipc_pm_on_link_sleep(struct iosm_pm *ipc_pm)
+{
+ /* pending sleep ack and all conditions are cleared
+ * -> signal SLEEP__ACK to CP
+ */
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_SLEEP);
+}
+
+static void ipc_pm_on_link_wake(struct iosm_pm *ipc_pm, bool ack)
+{
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ if (ack) {
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+
+ ipc_cp_irq_sleep_control(ipc_pm->pcie, IPC_MEM_DEV_PM_ACTIVE);
+
+ /* check the consume state !!! */
+ if (test_bit(CONSUME_STATE, &ipc_pm->host_sleep_pend))
+ complete(&ipc_pm->host_sleep_complete);
+ }
+
+ /* Check for pending HPDA update.
+ * Pending HP update could be because of sending message was
+ * put on hold due to Device sleep state or due to TD update
+ * which could be because of Device Sleep and Host Sleep
+ * states.
+ */
+ if (ipc_pm->pending_hpda_update &&
+ ipc_pm->host_pm_state == IPC_MEM_HOST_PM_ACTIVE)
+ ipc_pm_signal_hpda_doorbell(ipc_pm, IPC_HP_PM_TRIGGER, true);
+}
+
+bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active)
+{
+ union ipc_pm_cond old_cond;
+ union ipc_pm_cond new_cond;
+ bool link_active;
+
+ /* Save the current D3 state. */
+ new_cond = ipc_pm->pm_cond;
+ old_cond = ipc_pm->pm_cond;
+
+ /* Calculate the power state only in the runtime phase. */
+ switch (unit) {
+ case IPC_PM_UNIT_IRQ: /* CP irq */
+ new_cond.irq = active;
+ break;
+
+ case IPC_PM_UNIT_LINK: /* Device link state. */
+ new_cond.link = active;
+ break;
+
+ case IPC_PM_UNIT_HS: /* Host sleep trigger requires Link. */
+ new_cond.hs = active;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Something changed ? */
+ if (old_cond.raw == new_cond.raw) {
+ /* Stay in the current PM state. */
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+ goto ret;
+ }
+
+ ipc_pm->pm_cond = new_cond;
+
+ if (new_cond.link)
+ ipc_pm_on_link_wake(ipc_pm, unit == IPC_PM_UNIT_LINK);
+ else if (unit == IPC_PM_UNIT_LINK)
+ ipc_pm_on_link_sleep(ipc_pm);
+
+ if (old_cond.link == IPC_PM_SLEEP && new_cond.raw) {
+ link_active = ipc_pm_link_activate(ipc_pm);
+ goto ret;
+ }
+
+ link_active = old_cond.link == IPC_PM_ACTIVE;
+
+ret:
+ return link_active;
+}
+
+bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm)
+{
+ /* suspend not allowed if host_pm_state is not IPC_MEM_HOST_PM_ACTIVE */
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_ACTIVE) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_ACTIVE);
+ return false;
+ }
+
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_SLEEP_WAIT_D3;
+
+ return true;
+}
+
+bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm)
+{
+ if (ipc_pm->host_pm_state != IPC_MEM_HOST_PM_SLEEP) {
+ dev_err(ipc_pm->dev, "host_pm_state=%d\tExpected to be: %d",
+ ipc_pm->host_pm_state, IPC_MEM_HOST_PM_SLEEP);
+ return false;
+ }
+
+ /* Sending Sleep Exit message to CP. Update the state */
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE_WAIT;
+
+ return true;
+}
+
+void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep)
+{
+ if (sleep) {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_SLEEP;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_SLEEP;
+ } else {
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->device_sleep_notification = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+ }
+}
+
+bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm, u32 cp_pm_req)
+{
+ if (cp_pm_req == ipc_pm->device_sleep_notification)
+ return false;
+
+ ipc_pm->device_sleep_notification = cp_pm_req;
+
+ /* Evaluate the PM request. */
+ switch (ipc_pm->cp_state) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ /* Inform the PM that the device link can go down. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, false);
+ return true;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d active: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ switch (cp_pm_req) {
+ case IPC_MEM_DEV_PM_ACTIVE:
+ /* Inform the PM that the device link is active. */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_LINK, true);
+ break;
+
+ case IPC_MEM_DEV_PM_SLEEP:
+ break;
+
+ default:
+ dev_err(ipc_pm->dev,
+ "loc-pm=%d sleep: confused req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+ break;
+
+ default:
+ dev_err(ipc_pm->dev, "confused loc-pm=%d, req-pm=%d",
+ ipc_pm->cp_state, cp_pm_req);
+ break;
+ }
+
+ return false;
+}
+
+void ipc_pm_init(struct iosm_protocol *ipc_protocol)
+{
+ struct iosm_imem *ipc_imem = ipc_protocol->imem;
+ struct iosm_pm *ipc_pm = &ipc_protocol->pm;
+
+ ipc_pm->pcie = ipc_imem->pcie;
+ ipc_pm->dev = ipc_imem->dev;
+
+ ipc_pm->pm_cond.irq = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.hs = IPC_PM_SLEEP;
+ ipc_pm->pm_cond.link = IPC_PM_ACTIVE;
+
+ ipc_pm->cp_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->ap_state = IPC_MEM_DEV_PM_ACTIVE;
+ ipc_pm->host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+
+ /* Create generic wait-for-completion handler for Host Sleep
+ * and device sleep coordination.
+ */
+ init_completion(&ipc_pm->host_sleep_complete);
+
+ /* Complete all memory stores before clearing bit */
+ smp_mb__before_atomic();
+
+ clear_bit(0, &ipc_pm->host_sleep_pend);
+
+ /* Complete all memory stores after clearing bit */
+ smp_mb__after_atomic();
+}
+
+void ipc_pm_deinit(struct iosm_protocol *proto)
+{
+ struct iosm_pm *ipc_pm = &proto->pm;
+
+ complete(&ipc_pm->host_sleep_complete);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_pm.h b/drivers/net/wwan/iosm/iosm_ipc_pm.h
new file mode 100644
index 000000000000..e7c00f388cb0
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_pm.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PM_H
+#define IOSM_IPC_PM_H
+
+/* Trigger the doorbell interrupt on cp to change the PM sleep/active status */
+#define ipc_cp_irq_sleep_control(ipc_pcie, data) \
+ ipc_doorbell_fire(ipc_pcie, IPC_DOORBELL_IRQ_SLEEP, data)
+
+/* Trigger the doorbell interrupt on CP to do hpda update */
+#define ipc_cp_irq_hpda_update(ipc_pcie, data) \
+ ipc_doorbell_fire(ipc_pcie, IPC_DOORBELL_IRQ_HPDA, 0xFF & (data))
+
+/**
+ * union ipc_pm_cond - Conditions for D3 and the sleep message to CP.
+ * @raw: raw/combined value for faster check
+ * @irq: IRQ towards CP
+ * @hs: Host Sleep
+ * @link: Device link state.
+ */
+union ipc_pm_cond {
+ unsigned int raw;
+
+ struct {
+ unsigned int irq:1,
+ hs:1,
+ link:1;
+ };
+};
+
+/**
+ * enum ipc_mem_host_pm_state - Possible states of the HOST SLEEP finite state
+ * machine.
+ * @IPC_MEM_HOST_PM_ACTIVE: Host is active
+ * @IPC_MEM_HOST_PM_ACTIVE_WAIT: Intermediate state before going to
+ * active
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_IDLE: Intermediate state to wait for idle
+ * before going into sleep
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_D3: Intermediate state to wait for D3
+ * before going to sleep
+ * @IPC_MEM_HOST_PM_SLEEP: after this state the interface is not
+ * accessible host is in suspend to RAM
+ * @IPC_MEM_HOST_PM_SLEEP_WAIT_EXIT_SLEEP: Intermediate state before exiting
+ * sleep
+ */
+enum ipc_mem_host_pm_state {
+ IPC_MEM_HOST_PM_ACTIVE,
+ IPC_MEM_HOST_PM_ACTIVE_WAIT,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_IDLE,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_D3,
+ IPC_MEM_HOST_PM_SLEEP,
+ IPC_MEM_HOST_PM_SLEEP_WAIT_EXIT_SLEEP,
+};
+
+/**
+ * enum ipc_mem_dev_pm_state - Possible states of the DEVICE SLEEP finite state
+ * machine.
+ * @IPC_MEM_DEV_PM_ACTIVE: IPC_MEM_DEV_PM_ACTIVE is the initial
+ * power management state.
+ * IRQ(struct ipc_mem_device_info:
+ * device_sleep_notification)
+ * and DOORBELL-IRQ-HPDA(data) values.
+ * @IPC_MEM_DEV_PM_SLEEP: IPC_MEM_DEV_PM_SLEEP is PM state for
+ * sleep.
+ * @IPC_MEM_DEV_PM_WAKEUP: DOORBELL-IRQ-DEVICE_WAKE(data).
+ * @IPC_MEM_DEV_PM_HOST_SLEEP: DOORBELL-IRQ-HOST_SLEEP(data).
+ * @IPC_MEM_DEV_PM_ACTIVE_WAIT: Local intermediate states.
+ * @IPC_MEM_DEV_PM_FORCE_SLEEP: DOORBELL-IRQ-FORCE_SLEEP.
+ * @IPC_MEM_DEV_PM_FORCE_ACTIVE: DOORBELL-IRQ-FORCE_ACTIVE.
+ */
+enum ipc_mem_dev_pm_state {
+ IPC_MEM_DEV_PM_ACTIVE,
+ IPC_MEM_DEV_PM_SLEEP,
+ IPC_MEM_DEV_PM_WAKEUP,
+ IPC_MEM_DEV_PM_HOST_SLEEP,
+ IPC_MEM_DEV_PM_ACTIVE_WAIT,
+ IPC_MEM_DEV_PM_FORCE_SLEEP = 7,
+ IPC_MEM_DEV_PM_FORCE_ACTIVE,
+};
+
+/**
+ * struct iosm_pm - Power management instance
+ * @pcie: Pointer to iosm_pcie structure
+ * @dev: Pointer to device structure
+ * @host_pm_state: PM states for host
+ * @host_sleep_pend: Variable to indicate Host Sleep Pending
+ * @host_sleep_complete: Generic wait-for-completion used in
+ * case of Host Sleep
+ * @pm_cond: Conditions for power management
+ * @ap_state: Current power management state, the
+ * initial state is IPC_MEM_DEV_PM_ACTIVE eq. 0.
+ * @cp_state: PM State of CP
+ * @device_sleep_notification: last handled device_sleep_notfication
+ * @pending_hpda_update: is a HPDA update pending?
+ */
+struct iosm_pm {
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ enum ipc_mem_host_pm_state host_pm_state;
+ unsigned long host_sleep_pend;
+ struct completion host_sleep_complete;
+ union ipc_pm_cond pm_cond;
+ enum ipc_mem_dev_pm_state ap_state;
+ enum ipc_mem_dev_pm_state cp_state;
+ u32 device_sleep_notification;
+ u8 pending_hpda_update:1;
+};
+
+/**
+ * enum ipc_pm_unit - Power management units.
+ * @IPC_PM_UNIT_IRQ: IRQ towards CP
+ * @IPC_PM_UNIT_HS: Host Sleep for converged protocol
+ * @IPC_PM_UNIT_LINK: Link state controlled by CP.
+ */
+enum ipc_pm_unit {
+ IPC_PM_UNIT_IRQ,
+ IPC_PM_UNIT_HS,
+ IPC_PM_UNIT_LINK,
+};
+
+/**
+ * ipc_pm_init - Allocate power management component
+ * @ipc_protocol: Pointer to iosm_protocol structure
+ */
+void ipc_pm_init(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_pm_deinit - Free power management component, invalidating its pointer.
+ * @ipc_protocol: Pointer to iosm_protocol structure
+ */
+void ipc_pm_deinit(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_pm_dev_slp_notification - Handle a sleep notification message from the
+ * device. This can be called from interrupt state
+ * This function handles Host Sleep requests too
+ * if the Host Sleep protocol is register based.
+ * @ipc_pm: Pointer to power management component
+ * @sleep_notification: Actual notification from device
+ *
+ * Returns: true if dev sleep state has to be checked, false otherwise.
+ */
+bool ipc_pm_dev_slp_notification(struct iosm_pm *ipc_pm,
+ u32 sleep_notification);
+
+/**
+ * ipc_pm_set_s2idle_sleep - Set PM variables to sleep/active
+ * @ipc_pm: Pointer to power management component
+ * @sleep: true to enter sleep/false to exit sleep
+ */
+void ipc_pm_set_s2idle_sleep(struct iosm_pm *ipc_pm, bool sleep);
+
+/**
+ * ipc_pm_prepare_host_sleep - Prepare the PM for sleep by entering
+ * IPC_MEM_HOST_PM_SLEEP_WAIT_D3 state.
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true on success, false if the host was not active.
+ */
+bool ipc_pm_prepare_host_sleep(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_prepare_host_active - Prepare the PM for wakeup by entering
+ * IPC_MEM_HOST_PM_ACTIVE_WAIT state.
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true on success, false if the host was not sleeping.
+ */
+bool ipc_pm_prepare_host_active(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_wait_for_device_active - Wait upto IPC_PM_ACTIVE_TIMEOUT_MS ms
+ * for the device to reach active state
+ * @ipc_pm: Pointer to power management component
+ *
+ * Returns: true if device is active, false on timeout
+ */
+bool ipc_pm_wait_for_device_active(struct iosm_pm *ipc_pm);
+
+/**
+ * ipc_pm_signal_hpda_doorbell - Wake up the device if it is in low power mode
+ * and trigger a head pointer update interrupt.
+ * @ipc_pm: Pointer to power management component
+ * @identifier: specifies what component triggered hpda update irq
+ * @host_slp_check: if set to true then Host Sleep state machine check will
+ * be performed. If Host Sleep state machine allows HP
+ * update then only doorbell is triggered otherwise pending
+ * flag will be set. If set to false then Host Sleep check
+ * will not be performed. This is helpful for Host Sleep
+ * negotiation through message ring.
+ */
+void ipc_pm_signal_hpda_doorbell(struct iosm_pm *ipc_pm, u32 identifier,
+ bool host_slp_check);
+/**
+ * ipc_pm_trigger - Update power manager and wake up the link if needed
+ * @ipc_pm: Pointer to power management component
+ * @unit: Power management units
+ * @active: Device link state
+ *
+ * Returns: true if link is unchanged or active, false otherwise
+ */
+bool ipc_pm_trigger(struct iosm_pm *ipc_pm, enum ipc_pm_unit unit, bool active);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.c b/drivers/net/wwan/iosm/iosm_ipc_port.c
new file mode 100644
index 000000000000..beb944847398
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_port.h"
+
+/* open logical channel for control communication */
+static int ipc_port_ctrl_start(struct wwan_port *port)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+ int ret = 0;
+
+ ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
+ ipc_port->chl_id,
+ IPC_HP_CDEV_OPEN);
+ if (!ipc_port->channel)
+ ret = -EIO;
+
+ return ret;
+}
+
+/* close logical channel */
+static void ipc_port_ctrl_stop(struct wwan_port *port)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+
+ ipc_imem_sys_cdev_close(ipc_port);
+}
+
+/* transfer control data to modem */
+static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
+{
+ struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
+
+ return ipc_imem_sys_cdev_write(ipc_port, skb);
+}
+
+static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
+ .start = ipc_port_ctrl_start,
+ .stop = ipc_port_ctrl_stop,
+ .tx = ipc_port_ctrl_tx,
+};
+
+/* Port init func */
+struct iosm_cdev *ipc_port_init(struct iosm_imem *ipc_imem,
+ struct ipc_chnl_cfg ipc_port_cfg)
+{
+ struct iosm_cdev *ipc_port = kzalloc(sizeof(*ipc_port), GFP_KERNEL);
+ enum wwan_port_type port_type = ipc_port_cfg.wwan_port_type;
+ enum ipc_channel_id chl_id = ipc_port_cfg.id;
+
+ if (!ipc_port)
+ return NULL;
+
+ ipc_port->dev = ipc_imem->dev;
+ ipc_port->pcie = ipc_imem->pcie;
+
+ ipc_port->port_type = port_type;
+ ipc_port->chl_id = chl_id;
+ ipc_port->ipc_imem = ipc_imem;
+
+ ipc_port->iosm_port = wwan_create_port(ipc_port->dev, port_type,
+ &ipc_wwan_ctrl_ops, ipc_port);
+
+ return ipc_port;
+}
+
+/* Port deinit func */
+void ipc_port_deinit(struct iosm_cdev *port[])
+{
+ struct iosm_cdev *ipc_port;
+ u8 ctrl_chl_nr;
+
+ for (ctrl_chl_nr = 0; ctrl_chl_nr < IPC_MEM_MAX_CHANNELS;
+ ctrl_chl_nr++) {
+ if (port[ctrl_chl_nr]) {
+ ipc_port = port[ctrl_chl_nr];
+ wwan_remove_port(ipc_port->iosm_port);
+ kfree(ipc_port);
+ }
+ }
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_port.h b/drivers/net/wwan/iosm/iosm_ipc_port.h
new file mode 100644
index 000000000000..11bc8ed21616
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_port.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PORT_H
+#define IOSM_IPC_PORT_H
+
+#include <linux/wwan.h>
+
+#include "iosm_ipc_imem_ops.h"
+
+/**
+ * struct iosm_cdev - State of the char driver layer.
+ * @iosm_port: Pointer of type wwan_port
+ * @ipc_imem: imem instance
+ * @dev: Pointer to device struct
+ * @pcie: PCIe component
+ * @port_type: WWAN port type
+ * @channel: Channel instance
+ * @chl_id: Channel Indentifier
+ */
+struct iosm_cdev {
+ struct wwan_port *iosm_port;
+ struct iosm_imem *ipc_imem;
+ struct device *dev;
+ struct iosm_pcie *pcie;
+ enum wwan_port_type port_type;
+ struct ipc_mem_channel *channel;
+ enum ipc_channel_id chl_id;
+};
+
+/**
+ * ipc_port_init - Allocate IPC port & register to wwan subsystem for AT/MBIM
+ * communication.
+ * @ipc_imem: Pointer to iosm_imem structure
+ * @ipc_port_cfg: IPC Port Config
+ *
+ * Returns: 0 on success & NULL on failure
+ */
+struct iosm_cdev *ipc_port_init(struct iosm_imem *ipc_imem,
+ struct ipc_chnl_cfg ipc_port_cfg);
+
+/**
+ * ipc_port_deinit - Free IPC port & unregister port with wwan subsystem.
+ * @ipc_port: Array of pointer to the ipc port data-struct
+ */
+void ipc_port_deinit(struct iosm_cdev *ipc_port[]);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.c b/drivers/net/wwan/iosm/iosm_ipc_protocol.c
new file mode 100644
index 000000000000..834d8b146a94
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_protocol.h"
+#include "iosm_ipc_protocol_ops.h"
+#include "iosm_ipc_pm.h"
+#include "iosm_ipc_task_queue.h"
+
+int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *prep_args,
+ struct ipc_rsp *response)
+{
+ int index = ipc_protocol_msg_prep(ipc_protocol->imem, msg_type,
+ prep_args);
+
+ /* Store reference towards caller specified response in response ring
+ * and signal CP
+ */
+ if (index >= 0 && index < IPC_MEM_MSG_ENTRIES) {
+ ipc_protocol->rsp_ring[index] = response;
+ ipc_protocol_msg_hp_update(ipc_protocol->imem);
+ }
+
+ return index;
+}
+
+/* Callback for message send */
+static int ipc_protocol_tq_msg_send_cb(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct ipc_call_msg_send_args *send_args = msg;
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ return ipc_protocol_tq_msg_send(ipc_protocol, send_args->msg_type,
+ send_args->prep_args,
+ send_args->response);
+}
+
+/* Remove reference to a response. This is typically used when a requestor timed
+ * out and is no longer interested in the response.
+ */
+static int ipc_protocol_tq_msg_remove(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ ipc_protocol->rsp_ring[arg] = NULL;
+ return 0;
+}
+
+int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type prep,
+ union ipc_msg_prep_args *prep_args)
+{
+ struct ipc_call_msg_send_args send_args;
+ unsigned int exec_timeout;
+ struct ipc_rsp response;
+ int index;
+
+ exec_timeout = (ipc_protocol_get_ap_exec_stage(ipc_protocol) ==
+ IPC_MEM_EXEC_STAGE_RUN ?
+ IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT :
+ IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT);
+
+ /* Trap if called from non-preemptible context */
+ might_sleep();
+
+ response.status = IPC_MEM_MSG_CS_INVALID;
+ init_completion(&response.completion);
+
+ send_args.msg_type = prep;
+ send_args.prep_args = prep_args;
+ send_args.response = &response;
+
+ /* Allocate and prepare message to be sent in tasklet context.
+ * A positive index returned form tasklet_call references the message
+ * in case it needs to be cancelled when there is a timeout.
+ */
+ index = ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_msg_send_cb, 0,
+ &send_args, 0, true);
+
+ if (index < 0) {
+ dev_err(ipc_protocol->dev, "msg %d failed", prep);
+ return index;
+ }
+
+ /* Wait for the device to respond to the message */
+ switch (wait_for_completion_timeout(&response.completion,
+ msecs_to_jiffies(exec_timeout))) {
+ case 0:
+ /* Timeout, there was no response from the device.
+ * Remove the reference to the local response completion
+ * object as we are no longer interested in the response.
+ */
+ ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_msg_remove, index,
+ NULL, 0, true);
+ dev_err(ipc_protocol->dev, "msg timeout");
+ ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
+ break;
+ default:
+ /* We got a response in time; check completion status: */
+ if (response.status != IPC_MEM_MSG_CS_SUCCESS) {
+ dev_err(ipc_protocol->dev,
+ "msg completion status error %d",
+ response.status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int ipc_protocol_msg_send_host_sleep(struct iosm_protocol *ipc_protocol,
+ u32 state)
+{
+ union ipc_msg_prep_args prep_args = {
+ .sleep.target = 0,
+ .sleep.state = state,
+ };
+
+ return ipc_protocol_msg_send(ipc_protocol, IPC_MSG_PREP_SLEEP,
+ &prep_args);
+}
+
+void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
+ u32 identifier)
+{
+ ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, identifier, true);
+}
+
+bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol)
+{
+ u32 ipc_status = ipc_protocol_get_ipc_status(ipc_protocol);
+ u32 requested;
+
+ if (ipc_status != IPC_MEM_DEVICE_IPC_RUNNING) {
+ dev_err(ipc_protocol->dev,
+ "irq ignored, CP IPC state is %d, should be RUNNING",
+ ipc_status);
+
+ /* Stop further processing. */
+ return false;
+ }
+
+ /* Get a copy of the requested PM state by the device and the local
+ * device PM state.
+ */
+ requested = ipc_protocol_pm_dev_get_sleep_notification(ipc_protocol);
+
+ return ipc_pm_dev_slp_notification(&ipc_protocol->pm, requested);
+}
+
+static int ipc_protocol_tq_wakeup_dev_slp(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size)
+{
+ struct iosm_pm *ipc_pm = &ipc_imem->ipc_protocol->pm;
+
+ /* Wakeup from device sleep if it is not ACTIVE */
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, true);
+
+ ipc_pm_trigger(ipc_pm, IPC_PM_UNIT_HS, false);
+
+ return 0;
+}
+
+void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep)
+{
+ ipc_pm_set_s2idle_sleep(&ipc_protocol->pm, sleep);
+}
+
+bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol)
+{
+ if (!ipc_pm_prepare_host_sleep(&ipc_protocol->pm))
+ goto err;
+
+ ipc_task_queue_send_task(ipc_protocol->imem,
+ ipc_protocol_tq_wakeup_dev_slp, 0, NULL, 0,
+ true);
+
+ if (!ipc_pm_wait_for_device_active(&ipc_protocol->pm)) {
+ ipc_uevent_send(ipc_protocol->pcie->dev, UEVENT_MDM_TIMEOUT);
+ goto err;
+ }
+
+ /* Send the sleep message for sync sys calls. */
+ dev_dbg(ipc_protocol->dev, "send TARGET_HOST, ENTER_SLEEP");
+ if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
+ IPC_HOST_SLEEP_ENTER_SLEEP)) {
+ /* Sending ENTER_SLEEP message failed, we are still active */
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+ goto err;
+ }
+
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
+ return true;
+err:
+ return false;
+}
+
+bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol)
+{
+ if (!ipc_pm_prepare_host_active(&ipc_protocol->pm))
+ return false;
+
+ dev_dbg(ipc_protocol->dev, "send TARGET_HOST, EXIT_SLEEP");
+ if (ipc_protocol_msg_send_host_sleep(ipc_protocol,
+ IPC_HOST_SLEEP_EXIT_SLEEP)) {
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_SLEEP;
+ return false;
+ }
+
+ ipc_protocol->pm.host_pm_state = IPC_MEM_HOST_PM_ACTIVE;
+
+ return true;
+}
+
+struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem)
+{
+ struct iosm_protocol *ipc_protocol =
+ kzalloc(sizeof(*ipc_protocol), GFP_KERNEL);
+ struct ipc_protocol_context_info *p_ci;
+ u64 addr;
+
+ if (!ipc_protocol)
+ return NULL;
+
+ ipc_protocol->dev = ipc_imem->dev;
+ ipc_protocol->pcie = ipc_imem->pcie;
+ ipc_protocol->imem = ipc_imem;
+ ipc_protocol->p_ap_shm = NULL;
+ ipc_protocol->phy_ap_shm = 0;
+
+ ipc_protocol->old_msg_tail = 0;
+
+ ipc_protocol->p_ap_shm =
+ pci_alloc_consistent(ipc_protocol->pcie->pci,
+ sizeof(*ipc_protocol->p_ap_shm),
+ &ipc_protocol->phy_ap_shm);
+
+ if (!ipc_protocol->p_ap_shm) {
+ dev_err(ipc_protocol->dev, "pci shm alloc error");
+ kfree(ipc_protocol);
+ return NULL;
+ }
+
+ /* Prepare the context info for CP. */
+ addr = ipc_protocol->phy_ap_shm;
+ p_ci = &ipc_protocol->p_ap_shm->ci;
+ p_ci->device_info_addr =
+ addr + offsetof(struct ipc_protocol_ap_shm, device_info);
+ p_ci->head_array =
+ addr + offsetof(struct ipc_protocol_ap_shm, head_array);
+ p_ci->tail_array =
+ addr + offsetof(struct ipc_protocol_ap_shm, tail_array);
+ p_ci->msg_head = addr + offsetof(struct ipc_protocol_ap_shm, msg_head);
+ p_ci->msg_tail = addr + offsetof(struct ipc_protocol_ap_shm, msg_tail);
+ p_ci->msg_ring_addr =
+ addr + offsetof(struct ipc_protocol_ap_shm, msg_ring);
+ p_ci->msg_ring_entries = cpu_to_le16(IPC_MEM_MSG_ENTRIES);
+ p_ci->msg_irq_vector = IPC_MSG_IRQ_VECTOR;
+ p_ci->device_info_irq_vector = IPC_DEVICE_IRQ_VECTOR;
+
+ ipc_mmio_set_contex_info_addr(ipc_imem->mmio, addr);
+
+ ipc_pm_init(ipc_protocol);
+
+ return ipc_protocol;
+}
+
+void ipc_protocol_deinit(struct iosm_protocol *proto)
+{
+ pci_free_consistent(proto->pcie->pci, sizeof(*proto->p_ap_shm),
+ proto->p_ap_shm, proto->phy_ap_shm);
+
+ ipc_pm_deinit(proto);
+ kfree(proto);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol.h b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
new file mode 100644
index 000000000000..9b3a6d86ece7
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PROTOCOL_H
+#define IOSM_IPC_PROTOCOL_H
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_pm.h"
+#include "iosm_ipc_protocol_ops.h"
+
+/* Trigger the doorbell interrupt on CP. */
+#define IPC_DOORBELL_IRQ_HPDA 0
+#define IPC_DOORBELL_IRQ_IPC 1
+#define IPC_DOORBELL_IRQ_SLEEP 2
+
+/* IRQ vector number. */
+#define IPC_DEVICE_IRQ_VECTOR 0
+#define IPC_MSG_IRQ_VECTOR 0
+#define IPC_UL_PIPE_IRQ_VECTOR 0
+#define IPC_DL_PIPE_IRQ_VECTOR 0
+
+#define IPC_MEM_MSG_ENTRIES 128
+
+/* Default time out for sending IPC messages like open pipe, close pipe etc.
+ * during run mode.
+ *
+ * If the message interface lock to CP times out, the link to CP is broken.
+ * mode : run mode (IPC_MEM_EXEC_STAGE_RUN)
+ * unit : milliseconds
+ */
+#define IPC_MSG_COMPLETE_RUN_DEFAULT_TIMEOUT 500 /* 0.5 seconds */
+
+/* Default time out for sending IPC messages like open pipe, close pipe etc.
+ * during boot mode.
+ *
+ * If the message interface lock to CP times out, the link to CP is broken.
+ * mode : boot mode
+ * (IPC_MEM_EXEC_STAGE_BOOT | IPC_MEM_EXEC_STAGE_PSI | IPC_MEM_EXEC_STAGE_EBL)
+ * unit : milliseconds
+ */
+#define IPC_MSG_COMPLETE_BOOT_DEFAULT_TIMEOUT 500 /* 0.5 seconds */
+
+/**
+ * struct ipc_protocol_context_info - Structure of the context info
+ * @device_info_addr: 64 bit address to device info
+ * @head_array: 64 bit address to head pointer arr for the pipes
+ * @tail_array: 64 bit address to tail pointer arr for the pipes
+ * @msg_head: 64 bit address to message head pointer
+ * @msg_tail: 64 bit address to message tail pointer
+ * @msg_ring_addr: 64 bit pointer to the message ring buffer
+ * @msg_ring_entries: This field provides the number of entries which
+ * the MR can hold
+ * @msg_irq_vector: This field provides the IRQ which shall be
+ * generated by the EP device when generating
+ * completion for Messages.
+ * @device_info_irq_vector: This field provides the IRQ which shall be
+ * generated by the EP dev after updating Dev. Info
+ */
+struct ipc_protocol_context_info {
+ phys_addr_t device_info_addr;
+ phys_addr_t head_array;
+ phys_addr_t tail_array;
+ phys_addr_t msg_head;
+ phys_addr_t msg_tail;
+ phys_addr_t msg_ring_addr;
+ __le16 msg_ring_entries;
+ u8 msg_irq_vector;
+ u8 device_info_irq_vector;
+};
+
+/**
+ * struct ipc_protocol_device_info - Structure for the device information
+ * @execution_stage: CP execution stage
+ * @ipc_status: IPC states
+ * @device_sleep_notification: Requested device pm states
+ */
+struct ipc_protocol_device_info {
+ __le32 execution_stage;
+ __le32 ipc_status;
+ __le32 device_sleep_notification;
+};
+
+/**
+ * struct ipc_protocol_ap_shm - Protocol Shared Memory Structure
+ * @ci: Context information struct
+ * @device_info: Device information struct
+ * @msg_head: Point to msg head
+ * @head_array: Array of head pointer
+ * @msg_tail: Point to msg tail
+ * @tail_array: Array of tail pointer
+ * @msg_ring: Circular buffers for the read/tail and write/head
+ * indeces.
+ */
+struct ipc_protocol_ap_shm {
+ struct ipc_protocol_context_info ci;
+ struct ipc_protocol_device_info device_info;
+ __le32 msg_head;
+ __le32 head_array[IPC_MEM_MAX_PIPES];
+ __le32 msg_tail;
+ __le32 tail_array[IPC_MEM_MAX_PIPES];
+ union ipc_mem_msg_entry msg_ring[IPC_MEM_MSG_ENTRIES];
+};
+
+/**
+ * struct iosm_protocol - Structure for IPC protocol.
+ * @p_ap_shm: Pointer to Protocol Shared Memory Structure
+ * @pm: Instance to struct iosm_pm
+ * @pcie: Pointer to struct iosm_pcie
+ * @imem: Pointer to struct iosm_imem
+ * @rsp_ring: Array of OS completion objects to be triggered once CP
+ * acknowledges a request in the message ring
+ * @dev: Pointer to device structure
+ * @phy_ap_shm: Physical/Mapped representation of the shared memory info
+ * @old_msg_tail: Old msg tail ptr, until AP has handled ACK's from CP
+ */
+struct iosm_protocol {
+ struct ipc_protocol_ap_shm *p_ap_shm;
+ struct iosm_pm pm;
+ struct iosm_pcie *pcie;
+ struct iosm_imem *imem;
+ struct ipc_rsp *rsp_ring[IPC_MEM_MSG_ENTRIES];
+ struct device *dev;
+ phys_addr_t phy_ap_shm;
+ u32 old_msg_tail;
+};
+
+/**
+ * struct ipc_call_msg_send_args - Structure for message argument for
+ * tasklet function.
+ * @prep_args: Arguments for message preparation function
+ * @response: Can be NULL if result can be ignored
+ * @msg_type: Message Type
+ */
+struct ipc_call_msg_send_args {
+ union ipc_msg_prep_args *prep_args;
+ struct ipc_rsp *response;
+ enum ipc_msg_prep_type msg_type;
+};
+
+/**
+ * ipc_protocol_tq_msg_send - prepare the msg and send to CP
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @msg_type: Message type
+ * @prep_args: Message arguments
+ * @response: Pointer to a response object which has a
+ * completion object and return code.
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_protocol_tq_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *prep_args,
+ struct ipc_rsp *response);
+
+/**
+ * ipc_protocol_msg_send - Send ipc control message to CP and wait for response
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @prep: Message type
+ * @prep_args: Message arguments
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_protocol_msg_send(struct iosm_protocol *ipc_protocol,
+ enum ipc_msg_prep_type prep,
+ union ipc_msg_prep_args *prep_args);
+
+/**
+ * ipc_protocol_suspend - Signal to CP that host wants to go to sleep (suspend).
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ *
+ * Returns: true if host can suspend, false if suspend must be aborted.
+ */
+bool ipc_protocol_suspend(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_s2idle_sleep - Call PM function to set PM variables in s2idle
+ * sleep/active case
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ * @sleep: True for sleep/False for active
+ */
+void ipc_protocol_s2idle_sleep(struct iosm_protocol *ipc_protocol, bool sleep);
+
+/**
+ * ipc_protocol_resume - Signal to CP that host wants to resume operation.
+ * @ipc_protocol: Pointer to ipc_protocol instance
+ *
+ * Returns: true if host can resume, false if there is a problem.
+ */
+bool ipc_protocol_resume(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_pm_dev_sleep_handle - Handles the Device Sleep state change
+ * notification.
+ * @ipc_protocol: Pointer to ipc_protocol instance.
+ *
+ * Returns: true if sleep notification handled, false otherwise.
+ */
+bool ipc_protocol_pm_dev_sleep_handle(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_doorbell_trigger - Wrapper for PM function which wake up the
+ * device if it is in low power mode
+ * and trigger a head pointer update interrupt.
+ * @ipc_protocol: Pointer to ipc_protocol instance.
+ * @identifier: Specifies what component triggered hpda
+ * update irq
+ */
+void ipc_protocol_doorbell_trigger(struct iosm_protocol *ipc_protocol,
+ u32 identifier);
+
+/**
+ * ipc_protocol_sleep_notification_string - Returns last Sleep Notification as
+ * string.
+ * @ipc_protocol: Instance pointer of Protocol module.
+ *
+ * Returns: Pointer to string.
+ */
+const char *
+ipc_protocol_sleep_notification_string(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_init - Allocates IPC protocol instance
+ * @ipc_imem: Pointer to iosm_imem structure
+ *
+ * Returns: Address of IPC protocol instance on success & NULL on failure.
+ */
+struct iosm_protocol *ipc_protocol_init(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_protocol_deinit - Deallocates IPC protocol instance
+ * @ipc_protocol: pointer to the IPC protocol instance
+ */
+void ipc_protocol_deinit(struct iosm_protocol *ipc_protocol);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
new file mode 100644
index 000000000000..91109e27efd3
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_protocol.h"
+#include "iosm_ipc_protocol_ops.h"
+
+/* Get the next free message element.*/
+static union ipc_mem_msg_entry *
+ipc_protocol_free_msg_get(struct iosm_protocol *ipc_protocol, int *index)
+{
+ u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
+ u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
+ union ipc_mem_msg_entry *msg;
+
+ if (new_head == le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail)) {
+ dev_err(ipc_protocol->dev, "message ring is full");
+ return NULL;
+ }
+
+ /* Get the pointer to the next free message element,
+ * reset the fields and mark is as invalid.
+ */
+ msg = &ipc_protocol->p_ap_shm->msg_ring[head];
+ memset(msg, 0, sizeof(*msg));
+
+ /* return index in message ring */
+ *index = head;
+
+ return msg;
+}
+
+/* Updates the message ring Head pointer */
+void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+ u32 head = le32_to_cpu(ipc_protocol->p_ap_shm->msg_head);
+ u32 new_head = (head + 1) % IPC_MEM_MSG_ENTRIES;
+
+ /* Update head pointer and fire doorbell. */
+ ipc_protocol->p_ap_shm->msg_head = cpu_to_le32(new_head);
+ ipc_protocol->old_msg_tail =
+ le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
+
+ ipc_pm_signal_hpda_doorbell(&ipc_protocol->pm, IPC_HP_MR, false);
+}
+
+/* Allocate and prepare a OPEN_PIPE message.
+ * This also allocates the memory for the new TDR structure and
+ * updates the pipe structure referenced in the preparation arguments.
+ */
+static int ipc_protocol_msg_prepipe_open(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+ struct ipc_pipe *pipe = args->pipe_open.pipe;
+ struct ipc_protocol_td *tdr;
+ struct sk_buff **skbr;
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ /* Allocate the skbuf elements for the skbuf which are on the way.
+ * SKB ring is internal memory allocation for driver. No need to
+ * re-calculate the start and end addresses.
+ */
+ skbr = kcalloc(pipe->nr_of_entries, sizeof(*skbr), GFP_ATOMIC);
+ if (!skbr)
+ return -ENOMEM;
+
+ /* Allocate the transfer descriptors for the pipe. */
+ tdr = pci_alloc_consistent(ipc_protocol->pcie->pci,
+ pipe->nr_of_entries * sizeof(*tdr),
+ &pipe->phy_tdr_start);
+ if (!tdr) {
+ kfree(skbr);
+ dev_err(ipc_protocol->dev, "tdr alloc error");
+ return -ENOMEM;
+ }
+
+ pipe->max_nr_of_queued_entries = pipe->nr_of_entries - 1;
+ pipe->nr_of_queued_entries = 0;
+ pipe->tdr_start = tdr;
+ pipe->skbr_start = skbr;
+ pipe->old_tail = 0;
+
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
+
+ msg->open_pipe.type_of_message = IPC_MEM_MSG_OPEN_PIPE;
+ msg->open_pipe.pipe_nr = pipe->pipe_nr;
+ msg->open_pipe.tdr_addr = cpu_to_le64(pipe->phy_tdr_start);
+ msg->open_pipe.tdr_entries = cpu_to_le16(pipe->nr_of_entries);
+ msg->open_pipe.accumulation_backoff =
+ cpu_to_le32(pipe->accumulation_backoff);
+ msg->open_pipe.irq_vector = cpu_to_le32(pipe->irq);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prepipe_close(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+ struct ipc_pipe *pipe = args->pipe_close.pipe;
+
+ if (!msg)
+ return -EIO;
+
+ msg->close_pipe.type_of_message = IPC_MEM_MSG_CLOSE_PIPE;
+ msg->close_pipe.pipe_nr = pipe->pipe_nr;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_CLOSE_PIPE(pipe_nr=%d)",
+ msg->close_pipe.pipe_nr);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prep_sleep(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ /* Prepare and send the host sleep message to CP to enter or exit D3. */
+ msg->host_sleep.type_of_message = IPC_MEM_MSG_SLEEP;
+ msg->host_sleep.target = args->sleep.target; /* 0=host, 1=device */
+
+ /* state; 0=enter, 1=exit 2=enter w/o protocol */
+ msg->host_sleep.state = args->sleep.state;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_SLEEP(target=%d; state=%d)",
+ msg->host_sleep.target, msg->host_sleep.state);
+
+ return index;
+}
+
+static int ipc_protocol_msg_prep_feature_set(struct iosm_protocol *ipc_protocol,
+ union ipc_msg_prep_args *args)
+{
+ int index = -1;
+ union ipc_mem_msg_entry *msg =
+ ipc_protocol_free_msg_get(ipc_protocol, &index);
+
+ if (!msg) {
+ dev_err(ipc_protocol->dev, "failed to get free message");
+ return -EIO;
+ }
+
+ msg->feature_set.type_of_message = IPC_MEM_MSG_FEATURE_SET;
+ msg->feature_set.reset_enable = args->feature_set.reset_enable <<
+ RESET_BIT;
+
+ dev_dbg(ipc_protocol->dev, "IPC_MEM_MSG_FEATURE_SET(reset_enable=%d)",
+ msg->feature_set.reset_enable >> RESET_BIT);
+
+ return index;
+}
+
+/* Processes the message consumed by CP. */
+bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+ struct ipc_rsp **rsp_ring = ipc_protocol->rsp_ring;
+ bool msg_processed = false;
+ u32 i;
+
+ if (le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail) >=
+ IPC_MEM_MSG_ENTRIES) {
+ dev_err(ipc_protocol->dev, "msg_tail out of range: %d",
+ le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail));
+ return msg_processed;
+ }
+
+ if (irq != IMEM_IRQ_DONT_CARE &&
+ irq != ipc_protocol->p_ap_shm->ci.msg_irq_vector)
+ return msg_processed;
+
+ for (i = ipc_protocol->old_msg_tail;
+ i != le32_to_cpu(ipc_protocol->p_ap_shm->msg_tail);
+ i = (i + 1) % IPC_MEM_MSG_ENTRIES) {
+ union ipc_mem_msg_entry *msg =
+ &ipc_protocol->p_ap_shm->msg_ring[i];
+
+ dev_dbg(ipc_protocol->dev, "msg[%d]: type=%u status=%d", i,
+ msg->common.type_of_message,
+ msg->common.completion_status);
+
+ /* Update response with status and wake up waiting requestor */
+ if (rsp_ring[i]) {
+ rsp_ring[i]->status =
+ le32_to_cpu(msg->common.completion_status);
+ complete(&rsp_ring[i]->completion);
+ rsp_ring[i] = NULL;
+ }
+ msg_processed = true;
+ }
+
+ ipc_protocol->old_msg_tail = i;
+ return msg_processed;
+}
+
+/* Sends data from UL list to CP for the provided pipe by updating the Head
+ * pointer of given pipe.
+ */
+bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe,
+ struct sk_buff_head *p_ul_list)
+{
+ struct ipc_protocol_td *td;
+ bool hpda_pending = false;
+ struct sk_buff *skb;
+ s32 free_elements;
+ u32 head;
+ u32 tail;
+
+ if (!ipc_protocol->p_ap_shm) {
+ dev_err(ipc_protocol->dev, "driver is not initialized");
+ return false;
+ }
+
+ /* Get head and tail of the td list and calculate
+ * the number of free elements.
+ */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = pipe->old_tail;
+
+ while (!skb_queue_empty(p_ul_list)) {
+ if (head < tail)
+ free_elements = tail - head - 1;
+ else
+ free_elements =
+ pipe->nr_of_entries - head + ((s32)tail - 1);
+
+ if (free_elements <= 0) {
+ dev_dbg(ipc_protocol->dev,
+ "no free td elements for UL pipe %d",
+ pipe->pipe_nr);
+ break;
+ }
+
+ /* Get the td address. */
+ td = &pipe->tdr_start[head];
+
+ /* Take the first element of the uplink list and add it
+ * to the td list.
+ */
+ skb = skb_dequeue(p_ul_list);
+ if (WARN_ON(!skb))
+ break;
+
+ /* Save the reference to the uplink skbuf. */
+ pipe->skbr_start[head] = skb;
+
+ td->buffer.address = IPC_CB(skb)->mapping;
+ td->scs = cpu_to_le32(skb->len) & cpu_to_le32(SIZE_MASK);
+ td->next = 0;
+
+ pipe->nr_of_queued_entries++;
+
+ /* Calculate the new head and save it. */
+ head++;
+ if (head >= pipe->nr_of_entries)
+ head = 0;
+
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
+ cpu_to_le32(head);
+ }
+
+ if (pipe->old_head != head) {
+ dev_dbg(ipc_protocol->dev, "New UL TDs Pipe:%d", pipe->pipe_nr);
+
+ pipe->old_head = head;
+ /* Trigger doorbell because of pending UL packets. */
+ hpda_pending = true;
+ }
+
+ return hpda_pending;
+}
+
+/* Checks for Tail pointer update from CP and returns the data as SKB. */
+struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_protocol_td *p_td = &pipe->tdr_start[pipe->old_tail];
+ struct sk_buff *skb = pipe->skbr_start[pipe->old_tail];
+
+ pipe->nr_of_queued_entries--;
+ pipe->old_tail++;
+ if (pipe->old_tail >= pipe->nr_of_entries)
+ pipe->old_tail = 0;
+
+ if (!p_td->buffer.address) {
+ dev_err(ipc_protocol->dev, "Td buffer address is NULL");
+ return NULL;
+ }
+
+ if (p_td->buffer.address != IPC_CB(skb)->mapping) {
+ dev_err(ipc_protocol->dev,
+ "pipe %d: invalid buf_addr or skb_data",
+ pipe->pipe_nr);
+ return NULL;
+ }
+
+ return skb;
+}
+
+/* Allocates an SKB for CP to send data and updates the Head Pointer
+ * of the given Pipe#.
+ */
+bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct ipc_protocol_td *td;
+ dma_addr_t mapping = 0;
+ u32 head, new_head;
+ struct sk_buff *skb;
+ u32 tail;
+
+ /* Get head and tail of the td list and calculate
+ * the number of free elements.
+ */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
+
+ new_head = head + 1;
+ if (new_head >= pipe->nr_of_entries)
+ new_head = 0;
+
+ if (new_head == tail)
+ return false;
+
+ /* Get the td address. */
+ td = &pipe->tdr_start[head];
+
+ /* Allocate the skbuf for the descriptor. */
+ skb = ipc_pcie_alloc_skb(ipc_protocol->pcie, pipe->buf_size, GFP_ATOMIC,
+ &mapping, DMA_FROM_DEVICE,
+ IPC_MEM_DL_ETH_OFFSET);
+ if (!skb)
+ return false;
+
+ td->buffer.address = mapping;
+ td->scs = cpu_to_le32(pipe->buf_size) & cpu_to_le32(SIZE_MASK);
+ td->next = 0;
+
+ /* store the new head value. */
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] =
+ cpu_to_le32(new_head);
+
+ /* Save the reference to the skbuf. */
+ pipe->skbr_start[head] = skb;
+
+ pipe->nr_of_queued_entries++;
+
+ return true;
+}
+
+/* Processes DL TD's */
+struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ u32 tail =
+ le32_to_cpu(ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr]);
+ struct ipc_protocol_td *p_td;
+ struct sk_buff *skb;
+
+ if (!pipe->tdr_start)
+ return NULL;
+
+ /* Copy the reference to the downlink buffer. */
+ p_td = &pipe->tdr_start[pipe->old_tail];
+ skb = pipe->skbr_start[pipe->old_tail];
+
+ /* Reset the ring elements. */
+ pipe->skbr_start[pipe->old_tail] = NULL;
+
+ pipe->nr_of_queued_entries--;
+
+ pipe->old_tail++;
+ if (pipe->old_tail >= pipe->nr_of_entries)
+ pipe->old_tail = 0;
+
+ if (!skb) {
+ dev_err(ipc_protocol->dev, "skb is null");
+ goto ret;
+ } else if (!p_td->buffer.address) {
+ dev_err(ipc_protocol->dev, "td/buffer address is null");
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ if (!IPC_CB(skb)) {
+ dev_err(ipc_protocol->dev, "pipe# %d, tail: %d skb_cb is NULL",
+ pipe->pipe_nr, tail);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ if (p_td->buffer.address != IPC_CB(skb)->mapping) {
+ dev_err(ipc_protocol->dev, "invalid buf=%p or skb=%p",
+ (void *)p_td->buffer.address, skb->data);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ } else if ((le32_to_cpu(p_td->scs) & SIZE_MASK) > pipe->buf_size) {
+ dev_err(ipc_protocol->dev, "invalid buffer size %d > %d",
+ le32_to_cpu(p_td->scs) & SIZE_MASK,
+ pipe->buf_size);
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ } else if (le32_to_cpu(p_td->scs) >> COMPLETION_STATUS ==
+ IPC_MEM_TD_CS_ABORT) {
+ /* Discard aborted buffers. */
+ dev_dbg(ipc_protocol->dev, "discard 'aborted' buffers");
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+ skb = NULL;
+ goto ret;
+ }
+
+ /* Set the length field in skbuf. */
+ skb_put(skb, le32_to_cpu(p_td->scs) & SIZE_MASK);
+
+ret:
+ return skb;
+}
+
+void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe, u32 *head,
+ u32 *tail)
+{
+ struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
+
+ if (head)
+ *head = le32_to_cpu(ipc_ap_shm->head_array[pipe->pipe_nr]);
+
+ if (tail)
+ *tail = le32_to_cpu(ipc_ap_shm->tail_array[pipe->pipe_nr]);
+}
+
+/* Frees the TDs given to CP. */
+void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe)
+{
+ struct sk_buff *skb;
+ u32 head;
+ u32 tail;
+
+ /* Get the start and the end of the buffer list. */
+ head = le32_to_cpu(ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr]);
+ tail = pipe->old_tail;
+
+ /* Reset tail and head to 0. */
+ ipc_protocol->p_ap_shm->tail_array[pipe->pipe_nr] = 0;
+ ipc_protocol->p_ap_shm->head_array[pipe->pipe_nr] = 0;
+
+ /* Free pending uplink and downlink buffers. */
+ if (pipe->skbr_start) {
+ while (head != tail) {
+ /* Get the reference to the skbuf,
+ * which is on the way and free it.
+ */
+ skb = pipe->skbr_start[tail];
+ if (skb)
+ ipc_pcie_kfree_skb(ipc_protocol->pcie, skb);
+
+ tail++;
+ if (tail >= pipe->nr_of_entries)
+ tail = 0;
+ }
+
+ kfree(pipe->skbr_start);
+ pipe->skbr_start = NULL;
+ }
+
+ pipe->old_tail = 0;
+
+ /* Free and reset the td and skbuf circular buffers. kfree is save! */
+ if (pipe->tdr_start) {
+ pci_free_consistent(ipc_protocol->pcie->pci,
+ sizeof(*pipe->tdr_start) *
+ pipe->nr_of_entries,
+ pipe->tdr_start, pipe->phy_tdr_start);
+
+ pipe->tdr_start = NULL;
+ }
+}
+
+enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
+ *ipc_protocol)
+{
+ return (enum ipc_mem_device_ipc_state)
+ le32_to_cpu(ipc_protocol->p_ap_shm->device_info.ipc_status);
+}
+
+enum ipc_mem_exec_stage
+ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol)
+{
+ return le32_to_cpu(ipc_protocol->p_ap_shm->device_info.execution_stage);
+}
+
+int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *args)
+{
+ struct iosm_protocol *ipc_protocol = ipc_imem->ipc_protocol;
+
+ switch (msg_type) {
+ case IPC_MSG_PREP_SLEEP:
+ return ipc_protocol_msg_prep_sleep(ipc_protocol, args);
+
+ case IPC_MSG_PREP_PIPE_OPEN:
+ return ipc_protocol_msg_prepipe_open(ipc_protocol, args);
+
+ case IPC_MSG_PREP_PIPE_CLOSE:
+ return ipc_protocol_msg_prepipe_close(ipc_protocol, args);
+
+ case IPC_MSG_PREP_FEATURE_SET:
+ return ipc_protocol_msg_prep_feature_set(ipc_protocol, args);
+
+ /* Unsupported messages in protocol */
+ case IPC_MSG_PREP_MAP:
+ case IPC_MSG_PREP_UNMAP:
+ default:
+ dev_err(ipc_protocol->dev,
+ "unsupported message type: %d in protocol", msg_type);
+ return -EINVAL;
+ }
+}
+
+u32
+ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol *ipc_protocol)
+{
+ struct ipc_protocol_ap_shm *ipc_ap_shm = ipc_protocol->p_ap_shm;
+
+ return le32_to_cpu(ipc_ap_shm->device_info.device_sleep_notification);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
new file mode 100644
index 000000000000..35aa1387306e
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_protocol_ops.h
@@ -0,0 +1,444 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_PROTOCOL_OPS_H
+#define IOSM_IPC_PROTOCOL_OPS_H
+
+#define SIZE_MASK 0x00FFFFFF
+#define COMPLETION_STATUS 24
+#define RESET_BIT 7
+
+/**
+ * enum ipc_mem_td_cs - Completion status of a TD
+ * @IPC_MEM_TD_CS_INVALID: Initial status - td not yet used.
+ * @IPC_MEM_TD_CS_PARTIAL_TRANSFER: More data pending -> next TD used for this
+ * @IPC_MEM_TD_CS_END_TRANSFER: IO transfer is complete.
+ * @IPC_MEM_TD_CS_OVERFLOW: IO transfer to small for the buff to write
+ * @IPC_MEM_TD_CS_ABORT: TD marked as abort and shall be discarded
+ * by AP.
+ * @IPC_MEM_TD_CS_ERROR: General error.
+ */
+enum ipc_mem_td_cs {
+ IPC_MEM_TD_CS_INVALID,
+ IPC_MEM_TD_CS_PARTIAL_TRANSFER,
+ IPC_MEM_TD_CS_END_TRANSFER,
+ IPC_MEM_TD_CS_OVERFLOW,
+ IPC_MEM_TD_CS_ABORT,
+ IPC_MEM_TD_CS_ERROR,
+};
+
+/**
+ * enum ipc_mem_msg_cs - Completion status of IPC Message
+ * @IPC_MEM_MSG_CS_INVALID: Initial status.
+ * @IPC_MEM_MSG_CS_SUCCESS: IPC Message completion success.
+ * @IPC_MEM_MSG_CS_ERROR: Message send error.
+ */
+enum ipc_mem_msg_cs {
+ IPC_MEM_MSG_CS_INVALID,
+ IPC_MEM_MSG_CS_SUCCESS,
+ IPC_MEM_MSG_CS_ERROR,
+};
+
+/**
+ * struct ipc_msg_prep_args_pipe - struct for pipe args for message preparation
+ * @pipe: Pipe to open/close
+ */
+struct ipc_msg_prep_args_pipe {
+ struct ipc_pipe *pipe;
+};
+
+/**
+ * struct ipc_msg_prep_args_sleep - struct for sleep args for message
+ * preparation
+ * @target: 0=host, 1=device
+ * @state: 0=enter sleep, 1=exit sleep
+ */
+struct ipc_msg_prep_args_sleep {
+ unsigned int target;
+ unsigned int state;
+};
+
+/**
+ * struct ipc_msg_prep_feature_set - struct for feature set argument for
+ * message preparation
+ * @reset_enable: 0=out-of-band, 1=in-band-crash notification
+ */
+struct ipc_msg_prep_feature_set {
+ u8 reset_enable;
+};
+
+/**
+ * struct ipc_msg_prep_map - struct for map argument for message preparation
+ * @region_id: Region to map
+ * @addr: Pcie addr of region to map
+ * @size: Size of the region to map
+ */
+struct ipc_msg_prep_map {
+ unsigned int region_id;
+ unsigned long addr;
+ size_t size;
+};
+
+/**
+ * struct ipc_msg_prep_unmap - struct for unmap argument for message preparation
+ * @region_id: Region to unmap
+ */
+struct ipc_msg_prep_unmap {
+ unsigned int region_id;
+};
+
+/**
+ * struct ipc_msg_prep_args - Union to handle different message types
+ * @pipe_open: Pipe open message preparation struct
+ * @pipe_close: Pipe close message preparation struct
+ * @sleep: Sleep message preparation struct
+ * @feature_set: Feature set message preparation struct
+ * @map: Memory map message preparation struct
+ * @unmap: Memory unmap message preparation struct
+ */
+union ipc_msg_prep_args {
+ struct ipc_msg_prep_args_pipe pipe_open;
+ struct ipc_msg_prep_args_pipe pipe_close;
+ struct ipc_msg_prep_args_sleep sleep;
+ struct ipc_msg_prep_feature_set feature_set;
+ struct ipc_msg_prep_map map;
+ struct ipc_msg_prep_unmap unmap;
+};
+
+/**
+ * enum ipc_msg_prep_type - Enum for message prepare actions
+ * @IPC_MSG_PREP_SLEEP: Sleep message preparation type
+ * @IPC_MSG_PREP_PIPE_OPEN: Pipe open message preparation type
+ * @IPC_MSG_PREP_PIPE_CLOSE: Pipe close message preparation type
+ * @IPC_MSG_PREP_FEATURE_SET: Feature set message preparation type
+ * @IPC_MSG_PREP_MAP: Memory map message preparation type
+ * @IPC_MSG_PREP_UNMAP: Memory unmap message preparation type
+ */
+enum ipc_msg_prep_type {
+ IPC_MSG_PREP_SLEEP,
+ IPC_MSG_PREP_PIPE_OPEN,
+ IPC_MSG_PREP_PIPE_CLOSE,
+ IPC_MSG_PREP_FEATURE_SET,
+ IPC_MSG_PREP_MAP,
+ IPC_MSG_PREP_UNMAP,
+};
+
+/**
+ * struct ipc_rsp - Response to sent message
+ * @completion: For waking up requestor
+ * @status: Completion status
+ */
+struct ipc_rsp {
+ struct completion completion;
+ enum ipc_mem_msg_cs status;
+};
+
+/**
+ * enum ipc_mem_msg - Type-definition of the messages.
+ * @IPC_MEM_MSG_OPEN_PIPE: AP ->CP: Open a pipe
+ * @IPC_MEM_MSG_CLOSE_PIPE: AP ->CP: Close a pipe
+ * @IPC_MEM_MSG_ABORT_PIPE: AP ->CP: wait for completion of the
+ * running transfer and abort all pending
+ * IO-transfers for the pipe
+ * @IPC_MEM_MSG_SLEEP: AP ->CP: host enter or exit sleep
+ * @IPC_MEM_MSG_FEATURE_SET: AP ->CP: Intel feature configuration
+ */
+enum ipc_mem_msg {
+ IPC_MEM_MSG_OPEN_PIPE = 0x01,
+ IPC_MEM_MSG_CLOSE_PIPE = 0x02,
+ IPC_MEM_MSG_ABORT_PIPE = 0x03,
+ IPC_MEM_MSG_SLEEP = 0x04,
+ IPC_MEM_MSG_FEATURE_SET = 0xF0,
+};
+
+/**
+ * struct ipc_mem_msg_open_pipe - Message structure for open pipe
+ * @tdr_addr: Tdr address
+ * @tdr_entries: Tdr entries
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @irq_vector: MSI vector number
+ * @accumulation_backoff: Time in usec for data accumalation
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_open_pipe {
+ __le64 tdr_addr;
+ __le16 tdr_entries;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 irq_vector;
+ __le32 accumulation_backoff;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_close_pipe - Message structure for close pipe
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_close_pipe {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_abort_pipe - Message structure for abort pipe
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @pipe_nr: Pipe number
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_abort_pipe {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 pipe_nr;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_host_sleep - Message structure for sleep message.
+ * @reserved1: Reserved
+ * @target: 0=host, 1=device, host or EP devie
+ * is the message target
+ * @state: 0=enter sleep, 1=exit sleep,
+ * 2=enter sleep no protocol
+ * @reserved2: Reserved
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_host_sleep {
+ __le32 reserved1[2];
+ u8 target;
+ u8 state;
+ u8 reserved2;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_feature_set - Message structure for feature_set message
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @reset_enable: 0=out-of-band, 1=in-band-crash notification
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_feature_set {
+ __le32 reserved1[2];
+ __le16 reserved2;
+ u8 reset_enable;
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * struct ipc_mem_msg_common - Message structure for completion status update.
+ * @reserved1: Reserved
+ * @reserved2: Reserved
+ * @type_of_message: Message type
+ * @reserved3: Reserved
+ * @reserved4: Reserved
+ * @completion_status: Message Completion Status
+ */
+struct ipc_mem_msg_common {
+ __le32 reserved1[2];
+ u8 reserved2[3];
+ u8 type_of_message;
+ __le32 reserved3;
+ __le32 reserved4;
+ __le32 completion_status;
+};
+
+/**
+ * union ipc_mem_msg_entry - Union with all possible messages.
+ * @open_pipe: Open pipe message struct
+ * @close_pipe: Close pipe message struct
+ * @abort_pipe: Abort pipe message struct
+ * @host_sleep: Host sleep message struct
+ * @feature_set: Featuer set message struct
+ * @common: Used to access msg_type and to set the completion status
+ */
+union ipc_mem_msg_entry {
+ struct ipc_mem_msg_open_pipe open_pipe;
+ struct ipc_mem_msg_close_pipe close_pipe;
+ struct ipc_mem_msg_abort_pipe abort_pipe;
+ struct ipc_mem_msg_host_sleep host_sleep;
+ struct ipc_mem_msg_feature_set feature_set;
+ struct ipc_mem_msg_common common;
+};
+
+/* Transfer descriptor definition. */
+struct ipc_protocol_td {
+ union {
+ /* 0 : 63 - 64-bit address of a buffer in host memory. */
+ dma_addr_t address;
+ struct {
+ /* 0 : 31 - 32 bit address */
+ __le32 address;
+ /* 32 : 63 - corresponding descriptor */
+ __le32 desc;
+ } __packed shm;
+ } buffer;
+
+ /* 0 - 2nd byte - Size of the buffer.
+ * The host provides the size of the buffer queued.
+ * The EP device reads this value and shall update
+ * it for downlink transfers to indicate the
+ * amount of data written in buffer.
+ * 3rd byte - This field provides the completion status
+ * of the TD. When queuing the TD, the host sets
+ * the status to 0. The EP device updates this
+ * field when completing the TD.
+ */
+ __le32 scs;
+
+ /* 0th - nr of following descriptors
+ * 1 - 3rd byte - reserved
+ */
+ __le32 next;
+} __packed;
+
+/**
+ * ipc_protocol_msg_prep - Prepare message based upon message type
+ * @ipc_imem: iosm_protocol instance
+ * @msg_type: message prepare type
+ * @args: message arguments
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_protocol_msg_prep(struct iosm_imem *ipc_imem,
+ enum ipc_msg_prep_type msg_type,
+ union ipc_msg_prep_args *args);
+
+/**
+ * ipc_protocol_msg_hp_update - Function for head pointer update
+ * of message ring
+ * @ipc_imem: iosm_protocol instance
+ */
+void ipc_protocol_msg_hp_update(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_protocol_msg_process - Function for processing responses
+ * to IPC messages
+ * @ipc_imem: iosm_protocol instance
+ * @irq: IRQ vector
+ *
+ * Return: True on success, false if error
+ */
+bool ipc_protocol_msg_process(struct iosm_imem *ipc_imem, int irq);
+
+/**
+ * ipc_protocol_ul_td_send - Function for sending the data to CP
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ * @p_ul_list: uplink sk_buff list
+ *
+ * Return: true in success, false in case of error
+ */
+bool ipc_protocol_ul_td_send(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe,
+ struct sk_buff_head *p_ul_list);
+
+/**
+ * ipc_protocol_ul_td_process - Function for processing the sent data
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: sk_buff instance
+ */
+struct sk_buff *ipc_protocol_ul_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_dl_td_prepare - Function for providing DL TDs to CP
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: true in success, false in case of error
+ */
+bool ipc_protocol_dl_td_prepare(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_dl_td_process - Function for processing the DL data
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ *
+ * Return: sk_buff instance
+ */
+struct sk_buff *ipc_protocol_dl_td_process(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_get_head_tail_index - Function for getting Head and Tail
+ * pointer index of given pipe
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe Instance
+ * @head: head pointer index of the given pipe
+ * @tail: tail pointer index of the given pipe
+ */
+void ipc_protocol_get_head_tail_index(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe, u32 *head,
+ u32 *tail);
+/**
+ * ipc_protocol_get_ipc_status - Function for getting the IPC Status
+ * @ipc_protocol: iosm_protocol instance
+ *
+ * Return: Returns IPC State
+ */
+enum ipc_mem_device_ipc_state ipc_protocol_get_ipc_status(struct iosm_protocol
+ *ipc_protocol);
+
+/**
+ * ipc_protocol_pipe_cleanup - Function to cleanup pipe resources
+ * @ipc_protocol: iosm_protocol instance
+ * @pipe: Pipe instance
+ */
+void ipc_protocol_pipe_cleanup(struct iosm_protocol *ipc_protocol,
+ struct ipc_pipe *pipe);
+
+/**
+ * ipc_protocol_get_ap_exec_stage - Function for getting AP Exec Stage
+ * @ipc_protocol: pointer to struct iosm protocol
+ *
+ * Return: returns BOOT Stages
+ */
+enum ipc_mem_exec_stage
+ipc_protocol_get_ap_exec_stage(struct iosm_protocol *ipc_protocol);
+
+/**
+ * ipc_protocol_pm_dev_get_sleep_notification - Function for getting Dev Sleep
+ * notification
+ * @ipc_protocol: iosm_protocol instance
+ *
+ * Return: Returns dev PM State
+ */
+u32 ipc_protocol_pm_dev_get_sleep_notification(struct iosm_protocol
+ *ipc_protocol);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_task_queue.c b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
new file mode 100644
index 000000000000..852a99166144
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_task_queue.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_task_queue.h"
+
+/* Actual tasklet function, will be called whenever tasklet is scheduled.
+ * Calls event handler involves callback for each element in the message queue
+ */
+static void ipc_task_queue_handler(unsigned long data)
+{
+ struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ /* Loop over the input queue contents. */
+ while (q_rpos != ipc_task->q_wpos) {
+ /* Get the current first queue element. */
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ /* Process the input message. */
+ if (args->func)
+ args->response = args->func(args->ipc_imem, args->arg,
+ args->msg, args->size);
+
+ /* Signal completion for synchronous calls */
+ if (args->completion)
+ complete(args->completion);
+
+ /* Free message if copy was allocated. */
+ if (args->is_copy)
+ kfree(args->msg);
+
+ /* Set invalid queue element. Technically
+ * spin_lock_irqsave is not required here as
+ * the array element has been processed already
+ * so we can assume that immediately after processing
+ * ipc_task element, queue will not rotate again to
+ * ipc_task same element within such short time.
+ */
+ args->completion = NULL;
+ args->func = NULL;
+ args->msg = NULL;
+ args->size = 0;
+ args->is_copy = false;
+
+ /* calculate the new read ptr and update the volatile read
+ * ptr
+ */
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Free memory alloc and trigger completions left in the queue during dealloc */
+static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
+{
+ unsigned int q_rpos = ipc_task->q_rpos;
+
+ while (q_rpos != ipc_task->q_wpos) {
+ struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
+
+ if (args->completion)
+ complete(args->completion);
+
+ if (args->is_copy)
+ kfree(args->msg);
+
+ q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
+ ipc_task->q_rpos = q_rpos;
+ }
+}
+
+/* Add a message to the queue and trigger the ipc_task. */
+static int
+ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
+ int arg, void *msg,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ size_t size, bool is_copy, bool wait)
+{
+ struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
+ struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
+ struct completion completion;
+ unsigned int pos, nextpos;
+ unsigned long flags;
+ int result = -EIO;
+
+ init_completion(&completion);
+
+ /* tasklet send may be called from both interrupt or thread
+ * context, therefore protect queue operation by spinlock
+ */
+ spin_lock_irqsave(&ipc_task->q_lock, flags);
+
+ pos = ipc_task->q_wpos;
+ nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
+
+ /* Get next queue position. */
+ if (nextpos != ipc_task->q_rpos) {
+ /* Get the reference to the queue element and save the passed
+ * values.
+ */
+ ipc_task->args[pos].arg = arg;
+ ipc_task->args[pos].msg = msg;
+ ipc_task->args[pos].func = func;
+ ipc_task->args[pos].ipc_imem = ipc_imem;
+ ipc_task->args[pos].size = size;
+ ipc_task->args[pos].is_copy = is_copy;
+ ipc_task->args[pos].completion = wait ? &completion : NULL;
+ ipc_task->args[pos].response = -1;
+
+ /* apply write barrier so that ipc_task->q_rpos elements
+ * are updated before ipc_task->q_wpos is being updated.
+ */
+ smp_wmb();
+
+ /* Update the status of the free queue space. */
+ ipc_task->q_wpos = nextpos;
+ result = 0;
+ }
+
+ spin_unlock_irqrestore(&ipc_task->q_lock, flags);
+
+ if (result == 0) {
+ tasklet_schedule(ipc_tasklet);
+
+ if (wait) {
+ wait_for_completion(&completion);
+ result = ipc_task->args[pos].response;
+ }
+ } else {
+ dev_err(ipc_imem->ipc_task->dev, "queue is full");
+ }
+
+ return result;
+}
+
+int ipc_task_queue_send_task(struct iosm_imem *imem,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ int arg, void *msg, size_t size, bool wait)
+{
+ bool is_copy = false;
+ void *copy = msg;
+ int ret = -ENOMEM;
+
+ if (size > 0) {
+ copy = kmemdup(msg, size, GFP_ATOMIC);
+ if (!copy)
+ goto out;
+
+ is_copy = true;
+ }
+
+ ret = ipc_task_queue_add_task(imem, arg, copy, func,
+ size, is_copy, wait);
+ if (ret < 0) {
+ dev_err(imem->ipc_task->dev,
+ "add task failed for %ps %d, %p, %zu, %d", func, arg,
+ copy, size, is_copy);
+ if (is_copy)
+ kfree(copy);
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+int ipc_task_init(struct ipc_task *ipc_task)
+{
+ struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
+
+ ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
+ GFP_KERNEL);
+
+ if (!ipc_task->ipc_tasklet)
+ return -ENOMEM;
+
+ /* Initialize the spinlock needed to protect the message queue of the
+ * ipc_task
+ */
+ spin_lock_init(&ipc_queue->q_lock);
+
+ tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
+ (unsigned long)ipc_queue);
+ return 0;
+}
+
+void ipc_task_deinit(struct ipc_task *ipc_task)
+{
+ tasklet_kill(ipc_task->ipc_tasklet);
+
+ kfree(ipc_task->ipc_tasklet);
+ /* This will free/complete any outstanding messages,
+ * without calling the actual handler
+ */
+ ipc_task_queue_cleanup(&ipc_task->ipc_queue);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_task_queue.h b/drivers/net/wwan/iosm/iosm_ipc_task_queue.h
new file mode 100644
index 000000000000..df6e9cd925a9
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_task_queue.h
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_TASK_QUEUE_H
+#define IOSM_IPC_TASK_QUEUE_H
+
+/* Number of available element for the input message queue of the IPC
+ * ipc_task
+ */
+#define IPC_THREAD_QUEUE_SIZE 256
+
+/**
+ * struct ipc_task_queue_args - Struct for Task queue elements
+ * @ipc_imem: Pointer to struct iosm_imem
+ * @msg: Message argument for tasklet function. (optional, can be NULL)
+ * @completion: OS object used to wait for the tasklet function to finish for
+ * synchronous calls
+ * @func: Function to be called in tasklet (tl) context
+ * @arg: Generic integer argument for tasklet function (optional)
+ * @size: Message size argument for tasklet function (optional)
+ * @response: Return code of tasklet function for synchronous calls
+ * @is_copy: Is true if msg contains a pointer to a copy of the original msg
+ * for async. calls that needs to be freed once the tasklet returns
+ */
+struct ipc_task_queue_args {
+ struct iosm_imem *ipc_imem;
+ void *msg;
+ struct completion *completion;
+ int (*func)(struct iosm_imem *ipc_imem, int arg, void *msg,
+ size_t size);
+ int arg;
+ size_t size;
+ int response;
+ u8 is_copy:1;
+};
+
+/**
+ * struct ipc_task_queue - Struct for Task queue
+ * @q_lock: Protect the message queue of the ipc ipc_task
+ * @args: Message queue of the IPC ipc_task
+ * @q_rpos: First queue element to process.
+ * @q_wpos: First free element of the input queue.
+ */
+struct ipc_task_queue {
+ spinlock_t q_lock; /* for atomic operation on queue */
+ struct ipc_task_queue_args args[IPC_THREAD_QUEUE_SIZE];
+ unsigned int q_rpos;
+ unsigned int q_wpos;
+};
+
+/**
+ * struct ipc_task - Struct for Task
+ * @dev: Pointer to device structure
+ * @ipc_tasklet: Tasklet for serialized work offload
+ * from interrupts and OS callbacks
+ * @ipc_queue: Task for entry into ipc task queue
+ */
+struct ipc_task {
+ struct device *dev;
+ struct tasklet_struct *ipc_tasklet;
+ struct ipc_task_queue ipc_queue;
+};
+
+/**
+ * ipc_task_init - Allocate a tasklet
+ * @ipc_task: Pointer to ipc_task structure
+ * Returns: 0 on success and failure value on error.
+ */
+int ipc_task_init(struct ipc_task *ipc_task);
+
+/**
+ * ipc_task_deinit - Free a tasklet, invalidating its pointer.
+ * @ipc_task: Pointer to ipc_task structure
+ */
+void ipc_task_deinit(struct ipc_task *ipc_task);
+
+/**
+ * ipc_task_queue_send_task - Synchronously/Asynchronously call a function in
+ * tasklet context.
+ * @imem: Pointer to iosm_imem struct
+ * @func: Function to be called in tasklet context
+ * @arg: Integer argument for func
+ * @msg: Message pointer argument for func
+ * @size: Size argument for func
+ * @wait: if true wait for result
+ *
+ * Returns: Result value returned by func or failure value if func could not
+ * be called.
+ */
+int ipc_task_queue_send_task(struct iosm_imem *imem,
+ int (*func)(struct iosm_imem *ipc_imem, int arg,
+ void *msg, size_t size),
+ int arg, void *msg, size_t size, bool wait);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
new file mode 100644
index 000000000000..2229d752926c
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/device.h>
+#include <linux/kobject.h>
+#include <linux/slab.h>
+
+#include "iosm_ipc_uevent.h"
+
+/* Update the uevent in work queue context */
+static void ipc_uevent_work(struct work_struct *data)
+{
+ struct ipc_uevent_info *info;
+ char *envp[2] = { NULL, NULL };
+
+ info = container_of(data, struct ipc_uevent_info, work);
+
+ envp[0] = info->uevent;
+
+ if (kobject_uevent_env(&info->dev->kobj, KOBJ_CHANGE, envp))
+ pr_err("uevent %s failed to sent", info->uevent);
+
+ kfree(info);
+}
+
+void ipc_uevent_send(struct device *dev, char *uevent)
+{
+ struct ipc_uevent_info *info = kzalloc(sizeof(*info), GFP_ATOMIC);
+
+ if (!info)
+ return;
+
+ /* Initialize the kernel work queue */
+ INIT_WORK(&info->work, ipc_uevent_work);
+
+ /* Store the device and event information */
+ info->dev = dev;
+ snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent);
+
+ /* Schedule uevent in process context using work queue */
+ schedule_work(&info->work);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.h b/drivers/net/wwan/iosm/iosm_ipc_uevent.h
new file mode 100644
index 000000000000..2e45c051b5f4
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_UEVENT_H
+#define IOSM_IPC_UEVENT_H
+
+/* Baseband event strings */
+#define UEVENT_MDM_NOT_READY "MDM_NOT_READY"
+#define UEVENT_ROM_READY "ROM_READY"
+#define UEVENT_MDM_READY "MDM_READY"
+#define UEVENT_CRASH "CRASH"
+#define UEVENT_CD_READY "CD_READY"
+#define UEVENT_CD_READY_LINK_DOWN "CD_READY_LINK_DOWN"
+#define UEVENT_MDM_TIMEOUT "MDM_TIMEOUT"
+
+/* Maximum length of user events */
+#define MAX_UEVENT_LEN 64
+
+/**
+ * struct ipc_uevent_info - Uevent information structure.
+ * @dev: Pointer to device structure
+ * @uevent: Uevent information
+ * @work: Uevent work struct
+ */
+struct ipc_uevent_info {
+ struct device *dev;
+ char uevent[MAX_UEVENT_LEN];
+ struct work_struct work;
+};
+
+/**
+ * ipc_uevent_send - Send modem event to user space.
+ * @dev: Generic device pointer
+ * @uevent: Uevent information
+ *
+ */
+void ipc_uevent_send(struct device *dev, char *uevent);
+
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
new file mode 100644
index 000000000000..1711b79fc616
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_link.h>
+#include <linux/rtnetlink.h>
+#include <linux/wwan.h>
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_wwan.h"
+
+#define IOSM_IP_TYPE_MASK 0xF0
+#define IOSM_IP_TYPE_IPV4 0x40
+#define IOSM_IP_TYPE_IPV6 0x60
+
+#define IOSM_IF_ID_PAYLOAD 2
+
+/**
+ * struct iosm_netdev_priv - netdev private data
+ * @ipc_wwan: Pointer to iosm_wwan struct
+ * @netdev: Pointer to network interface device structure
+ * @if_id: Interface id for device.
+ * @ch_id: IPC channel number for which interface device is created.
+ */
+struct iosm_netdev_priv {
+ struct iosm_wwan *ipc_wwan;
+ struct net_device *netdev;
+ int if_id;
+ int ch_id;
+};
+
+/**
+ * struct iosm_wwan - This structure contains information about WWAN root device
+ * and interface to the IPC layer.
+ * @ipc_imem: Pointer to imem data-struct
+ * @sub_netlist: List of active netdevs
+ * @dev: Pointer device structure
+ * @if_mutex: Mutex used for add and remove interface id
+ */
+struct iosm_wwan {
+ struct iosm_imem *ipc_imem;
+ struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1];
+ struct device *dev;
+ struct mutex if_mutex; /* Mutex used for add and remove interface id */
+};
+
+/* Bring-up the wwan net link */
+static int ipc_wwan_link_open(struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = netdev_priv(netdev);
+ struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ int if_id = priv->if_id;
+ int ret;
+
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ mutex_lock(&ipc_wwan->if_mutex);
+
+ /* get channel id */
+ priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
+
+ if (priv->ch_id < 0) {
+ dev_err(ipc_wwan->dev,
+ "cannot connect wwan0 & id %d to the IPC mem layer",
+ if_id);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* enable tx path, DL data may follow */
+ netif_start_queue(netdev);
+
+ dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
+ priv->ch_id, priv->if_id);
+
+ ret = 0;
+out:
+ mutex_unlock(&ipc_wwan->if_mutex);
+ return ret;
+}
+
+/* Bring-down the wwan net link */
+static int ipc_wwan_link_stop(struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = netdev_priv(netdev);
+
+ netif_stop_queue(netdev);
+
+ mutex_lock(&priv->ipc_wwan->if_mutex);
+ ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
+ priv->ch_id);
+ priv->ch_id = -1;
+ mutex_unlock(&priv->ipc_wwan->if_mutex);
+
+ return 0;
+}
+
+/* Transmit a packet */
+static int ipc_wwan_link_transmit(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ struct iosm_netdev_priv *priv = netdev_priv(netdev);
+ struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ int if_id = priv->if_id;
+ int ret;
+
+ /* Interface IDs from 1 to 8 are for IP data
+ * & from 257 to 261 are for non-IP data
+ */
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ /* Send the SKB to device for transmission */
+ ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
+ if_id, priv->ch_id, skb);
+
+ /* Return code of zero is success */
+ if (ret == 0) {
+ ret = NETDEV_TX_OK;
+ } else if (ret == -EBUSY) {
+ ret = NETDEV_TX_BUSY;
+ dev_err(ipc_wwan->dev, "unable to push packets");
+ } else {
+ goto exit;
+ }
+
+ return ret;
+
+exit:
+ /* Log any skb drop */
+ if (if_id)
+ dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id,
+ ret);
+
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+/* Ops structure for wwan net link */
+static const struct net_device_ops ipc_inm_ops = {
+ .ndo_open = ipc_wwan_link_open,
+ .ndo_stop = ipc_wwan_link_stop,
+ .ndo_start_xmit = ipc_wwan_link_transmit,
+};
+
+/* Setup function for creating new net link */
+static void ipc_wwan_setup(struct net_device *iosm_dev)
+{
+ iosm_dev->header_ops = NULL;
+ iosm_dev->hard_header_len = 0;
+ iosm_dev->priv_flags |= IFF_NO_QUEUE;
+
+ iosm_dev->type = ARPHRD_NONE;
+ iosm_dev->min_mtu = ETH_MIN_MTU;
+ iosm_dev->max_mtu = ETH_MAX_MTU;
+
+ iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+
+ iosm_dev->netdev_ops = &ipc_inm_ops;
+}
+
+/* Create new wwan net link */
+static int ipc_wwan_newlink(void *ctxt, struct net_device *dev,
+ u32 if_id, struct netlink_ext_ack *extack)
+{
+ struct iosm_wwan *ipc_wwan = ctxt;
+ struct iosm_netdev_priv *priv;
+ int err;
+
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+ priv->if_id = if_id;
+ priv->netdev = dev;
+ priv->ipc_wwan = ipc_wwan;
+
+ mutex_lock(&ipc_wwan->if_mutex);
+ if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) {
+ err = -EBUSY;
+ goto out_unlock;
+ }
+
+ err = register_netdevice(dev);
+ if (err)
+ goto out_unlock;
+
+ rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv);
+ mutex_unlock(&ipc_wwan->if_mutex);
+
+ netif_device_attach(dev);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&ipc_wwan->if_mutex);
+ return err;
+}
+
+static void ipc_wwan_dellink(void *ctxt, struct net_device *dev,
+ struct list_head *head)
+{
+ struct iosm_wwan *ipc_wwan = ctxt;
+ struct iosm_netdev_priv *priv = netdev_priv(dev);
+ int if_id = priv->if_id;
+
+ if (WARN_ON(if_id < IP_MUX_SESSION_START ||
+ if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)))
+ return;
+
+ mutex_lock(&ipc_wwan->if_mutex);
+
+ if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv))
+ goto unlock;
+
+ RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL);
+ /* unregistering includes synchronize_net() */
+ unregister_netdevice(dev);
+
+unlock:
+ mutex_unlock(&ipc_wwan->if_mutex);
+}
+
+static const struct wwan_ops iosm_wwan_ops = {
+ .priv_size = sizeof(struct iosm_netdev_priv),
+ .setup = ipc_wwan_setup,
+ .newlink = ipc_wwan_newlink,
+ .dellink = ipc_wwan_dellink,
+};
+
+int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
+ bool dss, int if_id)
+{
+ struct sk_buff *skb = skb_arg;
+ struct net_device_stats *stats;
+ struct iosm_netdev_priv *priv;
+ int ret;
+
+ if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+ else if ((skb->data[0] & IOSM_IP_TYPE_MASK) ==
+ IOSM_IP_TYPE_IPV6)
+ skb->protocol = htons(ETH_P_IPV6);
+
+ skb->pkt_type = PACKET_HOST;
+
+ if (if_id < (IP_MUX_SESSION_START - 1) ||
+ if_id > (IP_MUX_SESSION_END - 1)) {
+ ret = -EINVAL;
+ goto free;
+ }
+
+ rcu_read_lock();
+ priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
+ if (!priv) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ skb->dev = priv->netdev;
+ stats = &priv->netdev->stats;
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+
+ ret = netif_rx(skb);
+ skb = NULL;
+unlock:
+ rcu_read_unlock();
+free:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on)
+{
+ struct net_device *netdev;
+ struct iosm_netdev_priv *priv;
+ bool is_tx_blk;
+
+ rcu_read_lock();
+ priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]);
+ if (!priv) {
+ rcu_read_unlock();
+ return;
+ }
+
+ netdev = priv->netdev;
+
+ is_tx_blk = netif_queue_stopped(netdev);
+
+ if (on)
+ dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable",
+ if_id);
+
+ if (on && !is_tx_blk)
+ netif_stop_queue(netdev);
+ else if (!on && is_tx_blk)
+ netif_wake_queue(netdev);
+ rcu_read_unlock();
+}
+
+struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev)
+{
+ struct iosm_wwan *ipc_wwan;
+
+ ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL);
+ if (!ipc_wwan)
+ return NULL;
+
+ ipc_wwan->dev = dev;
+ ipc_wwan->ipc_imem = ipc_imem;
+
+ if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan)) {
+ kfree(ipc_wwan);
+ return NULL;
+ }
+
+ mutex_init(&ipc_wwan->if_mutex);
+
+ return ipc_wwan;
+}
+
+void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan)
+{
+ int if_id;
+
+ wwan_unregister_ops(ipc_wwan->dev);
+
+ for (if_id = 0; if_id < ARRAY_SIZE(ipc_wwan->sub_netlist); if_id++) {
+ struct iosm_netdev_priv *priv;
+
+ priv = rcu_access_pointer(ipc_wwan->sub_netlist[if_id]);
+ if (!priv)
+ continue;
+
+ rtnl_lock();
+ ipc_wwan_dellink(ipc_wwan, priv->netdev, NULL);
+ rtnl_unlock();
+ }
+
+ mutex_destroy(&ipc_wwan->if_mutex);
+
+ kfree(ipc_wwan);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.h b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
new file mode 100644
index 000000000000..4925f22dff0a
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-21 Intel Corporation.
+ */
+
+#ifndef IOSM_IPC_WWAN_H
+#define IOSM_IPC_WWAN_H
+
+/**
+ * ipc_wwan_init - Allocate, Init and register WWAN device
+ * @ipc_imem: Pointer to imem data-struct
+ * @dev: Pointer to device structure
+ *
+ * Returns: Pointer to instance on success else NULL
+ */
+struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev);
+
+/**
+ * ipc_wwan_deinit - Unregister and free WWAN device, clear pointer
+ * @ipc_wwan: Pointer to wwan instance data
+ */
+void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan);
+
+/**
+ * ipc_wwan_receive - Receive a downlink packet from CP.
+ * @ipc_wwan: Pointer to wwan instance
+ * @skb_arg: Pointer to struct sk_buff
+ * @dss: Set to true if interafce id is from 257 to 261,
+ * else false
+ * @if_id: Interface ID
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
+ bool dss, int if_id);
+
+/**
+ * ipc_wwan_tx_flowctrl - Enable/Disable TX flow control
+ * @ipc_wwan: Pointer to wwan instance
+ * @id: Ipc mux channel session id
+ * @on: if true then flow ctrl would be enabled else disable
+ *
+ */
+void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int id, bool on);
+
+/**
+ * ipc_wwan_is_tx_stopped - Checks if Tx stopped for a Interface id.
+ * @ipc_wwan: Pointer to wwan instance
+ * @id: Ipc mux channel session id
+ *
+ * Return: true if stopped, false otherwise
+ */
+bool ipc_wwan_is_tx_stopped(struct iosm_wwan *ipc_wwan, int id);
+
+#endif
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 92a8a6ffc567..7e728042fc41 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -12,9 +12,13 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/termios.h>
#include <linux/wwan.h>
+#include <net/rtnetlink.h>
+#include <uapi/linux/wwan.h>
-#define WWAN_MAX_MINORS 256 /* 256 minors allowed with register_chrdev() */
+/* Maximum number of minors in use */
+#define WWAN_MAX_MINORS (1 << MINORBITS)
static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
@@ -34,11 +38,15 @@ static int wwan_major;
* @id: WWAN device unique ID.
* @dev: Underlying device.
* @port_id: Current available port ID to pick.
+ * @ops: wwan device ops
+ * @ops_ctxt: context to pass to ops
*/
struct wwan_device {
unsigned int id;
struct device dev;
atomic_t port_id;
+ const struct wwan_ops *ops;
+ void *ops_ctxt;
};
/**
@@ -51,6 +59,8 @@ struct wwan_device {
* @dev: Underlying device
* @rxq: Buffer inbound queue
* @waitqueue: The waitqueue for port fops (read/write/poll)
+ * @data_lock: Port specific data access serialization
+ * @at_data: AT port specific data
*/
struct wwan_port {
enum wwan_port_type type;
@@ -61,8 +71,29 @@ struct wwan_port {
struct device dev;
struct sk_buff_head rxq;
wait_queue_head_t waitqueue;
+ struct mutex data_lock; /* Port specific data access serialization */
+ union {
+ struct {
+ struct ktermios termios;
+ int mdmbits;
+ } at_data;
+ };
};
+static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct wwan_device *wwan = to_wwan_dev(dev);
+
+ return sprintf(buf, "%d\n", wwan->id);
+}
+static DEVICE_ATTR_RO(index);
+
+static struct attribute *wwan_dev_attrs[] = {
+ &dev_attr_index.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(wwan_dev);
+
static void wwan_dev_destroy(struct device *dev)
{
struct wwan_device *wwandev = to_wwan_dev(dev);
@@ -74,11 +105,13 @@ static void wwan_dev_destroy(struct device *dev)
static const struct device_type wwan_dev_type = {
.name = "wwan_dev",
.release = wwan_dev_destroy,
+ .groups = wwan_dev_groups,
};
static int wwan_dev_parent_match(struct device *dev, const void *parent)
{
- return (dev->type == &wwan_dev_type && dev->parent == parent);
+ return (dev->type == &wwan_dev_type &&
+ (dev->parent == parent || dev == parent));
}
static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
@@ -92,6 +125,23 @@ static struct wwan_device *wwan_dev_get_by_parent(struct device *parent)
return to_wwan_dev(dev);
}
+static int wwan_dev_name_match(struct device *dev, const void *name)
+{
+ return dev->type == &wwan_dev_type &&
+ strcmp(dev_name(dev), name) == 0;
+}
+
+static struct wwan_device *wwan_dev_get_by_name(const char *name)
+{
+ struct device *dev;
+
+ dev = class_find_device(wwan_class, NULL, name, wwan_dev_name_match);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ return to_wwan_dev(dev);
+}
+
/* This function allocates and registers a new WWAN device OR if a WWAN device
* already exist for the given parent, it gets a reference and return it.
* This function is not exported (for now), it is called indirectly via
@@ -156,9 +206,14 @@ static void wwan_remove_dev(struct wwan_device *wwandev)
/* WWAN device is created and registered (get+add) along with its first
* child port, and subsequent port registrations only grab a reference
* (get). The WWAN device must then be unregistered (del+put) along with
- * its latest port, and reference simply dropped (put) otherwise.
+ * its last port, and reference simply dropped (put) otherwise. In the
+ * same fashion, we must not unregister it when the ops are still there.
*/
- ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+ if (wwandev->ops)
+ ret = 1;
+ else
+ ret = device_for_each_child(&wwandev->dev, NULL, is_wwan_child);
+
if (!ret)
device_unregister(&wwandev->dev);
else
@@ -169,13 +224,30 @@ static void wwan_remove_dev(struct wwan_device *wwandev)
/* ------- WWAN port management ------- */
-/* Keep aligned with wwan_port_type enum */
-static const char * const wwan_port_type_str[] = {
- "AT",
- "MBIM",
- "QMI",
- "QCDM",
- "FIREHOSE"
+static const struct {
+ const char * const name; /* Port type name */
+ const char * const devsuf; /* Port devce name suffix */
+} wwan_port_types[WWAN_PORT_MAX + 1] = {
+ [WWAN_PORT_AT] = {
+ .name = "AT",
+ .devsuf = "at",
+ },
+ [WWAN_PORT_MBIM] = {
+ .name = "MBIM",
+ .devsuf = "mbim",
+ },
+ [WWAN_PORT_QMI] = {
+ .name = "QMI",
+ .devsuf = "qmi",
+ },
+ [WWAN_PORT_QCDM] = {
+ .name = "QCDM",
+ .devsuf = "qcdm",
+ },
+ [WWAN_PORT_FIREHOSE] = {
+ .name = "FIREHOSE",
+ .devsuf = "firehose",
+ },
};
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
@@ -183,7 +255,7 @@ static ssize_t type_show(struct device *dev, struct device_attribute *attr,
{
struct wwan_port *port = to_wwan_port(dev);
- return sprintf(buf, "%s\n", wwan_port_type_str[port->type]);
+ return sprintf(buf, "%s\n", wwan_port_types[port->type].name);
}
static DEVICE_ATTR_RO(type);
@@ -198,7 +270,7 @@ static void wwan_port_destroy(struct device *dev)
struct wwan_port *port = to_wwan_port(dev);
ida_free(&minors, MINOR(port->dev.devt));
- skb_queue_purge(&port->rxq);
+ mutex_destroy(&port->data_lock);
mutex_destroy(&port->ops_lock);
kfree(port);
}
@@ -226,6 +298,56 @@ static struct wwan_port *wwan_port_get_by_minor(unsigned int minor)
return to_wwan_port(dev);
}
+/* Allocate and set unique name based on passed format
+ *
+ * Name allocation approach is highly inspired by the __dev_alloc_name()
+ * function.
+ *
+ * To avoid names collision, the caller must prevent the new port device
+ * registration as well as concurrent invocation of this function.
+ */
+static int __wwan_port_dev_assign_name(struct wwan_port *port, const char *fmt)
+{
+ struct wwan_device *wwandev = to_wwan_dev(port->dev.parent);
+ const unsigned int max_ports = PAGE_SIZE * 8;
+ struct class_dev_iter iter;
+ unsigned long *idmap;
+ struct device *dev;
+ char buf[0x20];
+ int id;
+
+ idmap = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+ if (!idmap)
+ return -ENOMEM;
+
+ /* Collect ids of same name format ports */
+ class_dev_iter_init(&iter, wwan_class, NULL, &wwan_port_dev_type);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (dev->parent != &wwandev->dev)
+ continue;
+ if (sscanf(dev_name(dev), fmt, &id) != 1)
+ continue;
+ if (id < 0 || id >= max_ports)
+ continue;
+ set_bit(id, idmap);
+ }
+ class_dev_iter_exit(&iter);
+
+ /* Allocate unique id */
+ id = find_first_zero_bit(idmap, max_ports);
+ free_page((unsigned long)idmap);
+
+ snprintf(buf, sizeof(buf), fmt, id); /* Name generation */
+
+ dev = device_find_child_by_name(&wwandev->dev, buf);
+ if (dev) {
+ put_device(dev);
+ return -ENFILE;
+ }
+
+ return dev_set_name(&port->dev, buf);
+}
+
struct wwan_port *wwan_create_port(struct device *parent,
enum wwan_port_type type,
const struct wwan_port_ops *ops,
@@ -234,8 +356,9 @@ struct wwan_port *wwan_create_port(struct device *parent,
struct wwan_device *wwandev;
struct wwan_port *port;
int minor, err = -ENOMEM;
+ char namefmt[0x20];
- if (type >= WWAN_PORT_MAX || !ops)
+ if (type > WWAN_PORT_MAX || !ops)
return ERR_PTR(-EINVAL);
/* A port is always a child of a WWAN device, retrieve (allocate or
@@ -261,6 +384,7 @@ struct wwan_port *wwan_create_port(struct device *parent,
mutex_init(&port->ops_lock);
skb_queue_head_init(&port->rxq);
init_waitqueue_head(&port->waitqueue);
+ mutex_init(&port->data_lock);
port->dev.parent = &wwandev->dev;
port->dev.class = wwan_class;
@@ -268,12 +392,18 @@ struct wwan_port *wwan_create_port(struct device *parent,
port->dev.devt = MKDEV(wwan_major, minor);
dev_set_drvdata(&port->dev, drvdata);
- /* create unique name based on wwan device id, port index and type */
- dev_set_name(&port->dev, "wwan%up%u%s", wwandev->id,
- atomic_inc_return(&wwandev->port_id),
- wwan_port_type_str[port->type]);
+ /* allocate unique name based on wwan device id, port type and number */
+ snprintf(namefmt, sizeof(namefmt), "wwan%u%s%%d", wwandev->id,
+ wwan_port_types[port->type].devsuf);
+
+ /* Serialize ports registration */
+ mutex_lock(&wwan_register_lock);
+ __wwan_port_dev_assign_name(port, namefmt);
err = device_register(&port->dev);
+
+ mutex_unlock(&wwan_register_lock);
+
if (err)
goto error_put_device;
@@ -362,8 +492,11 @@ static void wwan_port_op_stop(struct wwan_port *port)
{
mutex_lock(&port->ops_lock);
port->start_count--;
- if (port->ops && !port->start_count)
- port->ops->stop(port);
+ if (!port->start_count) {
+ if (port->ops)
+ port->ops->stop(port);
+ skb_queue_purge(&port->rxq);
+ }
mutex_unlock(&port->ops_lock);
}
@@ -530,6 +663,110 @@ static __poll_t wwan_port_fops_poll(struct file *filp, poll_table *wait)
return mask;
}
+/* Implements minimalistic stub terminal IOCTLs support */
+static long wwan_port_fops_at_ioctl(struct wwan_port *port, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ mutex_lock(&port->data_lock);
+
+ switch (cmd) {
+ case TCFLSH:
+ break;
+
+ case TCGETS:
+ if (copy_to_user((void __user *)arg, &port->at_data.termios,
+ sizeof(struct termios)))
+ ret = -EFAULT;
+ break;
+
+ case TCSETS:
+ case TCSETSW:
+ case TCSETSF:
+ if (copy_from_user(&port->at_data.termios, (void __user *)arg,
+ sizeof(struct termios)))
+ ret = -EFAULT;
+ break;
+
+#ifdef TCGETS2
+ case TCGETS2:
+ if (copy_to_user((void __user *)arg, &port->at_data.termios,
+ sizeof(struct termios2)))
+ ret = -EFAULT;
+ break;
+
+ case TCSETS2:
+ case TCSETSW2:
+ case TCSETSF2:
+ if (copy_from_user(&port->at_data.termios, (void __user *)arg,
+ sizeof(struct termios2)))
+ ret = -EFAULT;
+ break;
+#endif
+
+ case TIOCMGET:
+ ret = put_user(port->at_data.mdmbits, (int __user *)arg);
+ break;
+
+ case TIOCMSET:
+ case TIOCMBIC:
+ case TIOCMBIS: {
+ int mdmbits;
+
+ if (copy_from_user(&mdmbits, (int __user *)arg, sizeof(int))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (cmd == TIOCMBIC)
+ port->at_data.mdmbits &= ~mdmbits;
+ else if (cmd == TIOCMBIS)
+ port->at_data.mdmbits |= mdmbits;
+ else
+ port->at_data.mdmbits = mdmbits;
+ break;
+ }
+
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+ mutex_unlock(&port->data_lock);
+
+ return ret;
+}
+
+static long wwan_port_fops_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct wwan_port *port = filp->private_data;
+ int res;
+
+ if (port->type == WWAN_PORT_AT) { /* AT port specific IOCTLs */
+ res = wwan_port_fops_at_ioctl(port, cmd, arg);
+ if (res != -ENOIOCTLCMD)
+ return res;
+ }
+
+ switch (cmd) {
+ case TIOCINQ: { /* aka SIOCINQ aka FIONREAD */
+ unsigned long flags;
+ struct sk_buff *skb;
+ int amount = 0;
+
+ spin_lock_irqsave(&port->rxq.lock, flags);
+ skb_queue_walk(&port->rxq, skb)
+ amount += skb->len;
+ spin_unlock_irqrestore(&port->rxq.lock, flags);
+
+ return put_user(amount, (int __user *)arg);
+ }
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
static const struct file_operations wwan_port_fops = {
.owner = THIS_MODULE,
.open = wwan_port_fops_open,
@@ -537,28 +774,233 @@ static const struct file_operations wwan_port_fops = {
.read = wwan_port_fops_read,
.write = wwan_port_fops_write,
.poll = wwan_port_fops_poll,
+ .unlocked_ioctl = wwan_port_fops_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ptr_ioctl,
+#endif
.llseek = noop_llseek,
};
+/**
+ * wwan_register_ops - register WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ * created netdevs
+ * @ops: operations to register
+ * @ctxt: context to pass to operations
+ *
+ * Returns: 0 on success, a negative error code on failure
+ */
+int wwan_register_ops(struct device *parent, const struct wwan_ops *ops,
+ void *ctxt)
+{
+ struct wwan_device *wwandev;
+
+ if (WARN_ON(!parent || !ops))
+ return -EINVAL;
+
+ wwandev = wwan_create_dev(parent);
+ if (!wwandev)
+ return -ENOMEM;
+
+ if (WARN_ON(wwandev->ops)) {
+ wwan_remove_dev(wwandev);
+ return -EBUSY;
+ }
+
+ if (!try_module_get(ops->owner)) {
+ wwan_remove_dev(wwandev);
+ return -ENODEV;
+ }
+
+ wwandev->ops = ops;
+ wwandev->ops_ctxt = ctxt;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(wwan_register_ops);
+
+/**
+ * wwan_unregister_ops - remove WWAN device ops
+ * @parent: Device to use as parent and shared by all WWAN ports and
+ * created netdevs
+ */
+void wwan_unregister_ops(struct device *parent)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(parent);
+ bool has_ops;
+
+ if (WARN_ON(IS_ERR(wwandev)))
+ return;
+
+ has_ops = wwandev->ops;
+
+ /* put the reference obtained by wwan_dev_get_by_parent(),
+ * we should still have one (that the owner is giving back
+ * now) due to the ops being assigned, check that below
+ * and return if not.
+ */
+ put_device(&wwandev->dev);
+
+ if (WARN_ON(!has_ops))
+ return;
+
+ module_put(wwandev->ops->owner);
+
+ wwandev->ops = NULL;
+ wwandev->ops_ctxt = NULL;
+ wwan_remove_dev(wwandev);
+}
+EXPORT_SYMBOL_GPL(wwan_unregister_ops);
+
+static int wwan_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!tb[IFLA_PARENT_DEV_NAME])
+ return -EINVAL;
+
+ if (!data[IFLA_WWAN_LINK_ID])
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct device_type wwan_type = { .name = "wwan" };
+
+static struct net_device *wwan_rtnl_alloc(struct nlattr *tb[],
+ const char *ifname,
+ unsigned char name_assign_type,
+ unsigned int num_tx_queues,
+ unsigned int num_rx_queues)
+{
+ const char *devname = nla_data(tb[IFLA_PARENT_DEV_NAME]);
+ struct wwan_device *wwandev = wwan_dev_get_by_name(devname);
+ struct net_device *dev;
+
+ if (IS_ERR(wwandev))
+ return ERR_CAST(wwandev);
+
+ /* only supported if ops were registered (not just ports) */
+ if (!wwandev->ops) {
+ dev = ERR_PTR(-EOPNOTSUPP);
+ goto out;
+ }
+
+ dev = alloc_netdev_mqs(wwandev->ops->priv_size, ifname, name_assign_type,
+ wwandev->ops->setup, num_tx_queues, num_rx_queues);
+
+ if (dev) {
+ SET_NETDEV_DEV(dev, &wwandev->dev);
+ SET_NETDEV_DEVTYPE(dev, &wwan_type);
+ }
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+ return dev;
+}
+
+static int wwan_rtnl_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
+ u32 link_id = nla_get_u32(data[IFLA_WWAN_LINK_ID]);
+ int ret;
+
+ if (IS_ERR(wwandev))
+ return PTR_ERR(wwandev);
+
+ /* shouldn't have a netdev (left) with us as parent so WARN */
+ if (WARN_ON(!wwandev->ops)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (wwandev->ops->newlink)
+ ret = wwandev->ops->newlink(wwandev->ops_ctxt, dev,
+ link_id, extack);
+ else
+ ret = register_netdevice(dev);
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+ return ret;
+}
+
+static void wwan_rtnl_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct wwan_device *wwandev = wwan_dev_get_by_parent(dev->dev.parent);
+
+ if (IS_ERR(wwandev))
+ return;
+
+ /* shouldn't have a netdev (left) with us as parent so WARN */
+ if (WARN_ON(!wwandev->ops))
+ goto out;
+
+ if (wwandev->ops->dellink)
+ wwandev->ops->dellink(wwandev->ops_ctxt, dev, head);
+ else
+ unregister_netdevice(dev);
+
+out:
+ /* release the reference */
+ put_device(&wwandev->dev);
+}
+
+static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
+ [IFLA_WWAN_LINK_ID] = { .type = NLA_U32 },
+};
+
+static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
+ .kind = "wwan",
+ .maxtype = __IFLA_WWAN_MAX,
+ .alloc = wwan_rtnl_alloc,
+ .validate = wwan_rtnl_validate,
+ .newlink = wwan_rtnl_newlink,
+ .dellink = wwan_rtnl_dellink,
+ .policy = wwan_rtnl_policy,
+};
+
static int __init wwan_init(void)
{
+ int err;
+
+ err = rtnl_link_register(&wwan_rtnl_link_ops);
+ if (err)
+ return err;
+
wwan_class = class_create(THIS_MODULE, "wwan");
- if (IS_ERR(wwan_class))
- return PTR_ERR(wwan_class);
+ if (IS_ERR(wwan_class)) {
+ err = PTR_ERR(wwan_class);
+ goto unregister;
+ }
/* chrdev used for wwan ports */
- wwan_major = register_chrdev(0, "wwan_port", &wwan_port_fops);
+ wwan_major = __register_chrdev(0, 0, WWAN_MAX_MINORS, "wwan_port",
+ &wwan_port_fops);
if (wwan_major < 0) {
- class_destroy(wwan_class);
- return wwan_major;
+ err = wwan_major;
+ goto destroy;
}
return 0;
+
+destroy:
+ class_destroy(wwan_class);
+unregister:
+ rtnl_link_unregister(&wwan_rtnl_link_ops);
+ return err;
}
static void __exit wwan_exit(void)
{
- unregister_chrdev(wwan_major, "wwan_port");
+ __unregister_chrdev(wwan_major, 0, WWAN_MAX_MINORS, "wwan_port");
+ rtnl_link_unregister(&wwan_rtnl_link_ops);
class_destroy(wwan_class);
}
diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
new file mode 100644
index 000000000000..472cae544a2b
--- /dev/null
+++ b/drivers/net/wwan/wwan_hwsim.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * WWAN device simulator for WWAN framework testing.
+ *
+ * Copyright (c) 2021, Sergey Ryazanov <ryazanov.s.a@gmail.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/wwan.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+
+static int wwan_hwsim_devsnum = 2;
+module_param_named(devices, wwan_hwsim_devsnum, int, 0444);
+MODULE_PARM_DESC(devices, "Number of simulated devices");
+
+static struct class *wwan_hwsim_class;
+
+static struct dentry *wwan_hwsim_debugfs_topdir;
+static struct dentry *wwan_hwsim_debugfs_devcreate;
+
+static DEFINE_SPINLOCK(wwan_hwsim_devs_lock);
+static LIST_HEAD(wwan_hwsim_devs);
+static unsigned int wwan_hwsim_dev_idx;
+
+struct wwan_hwsim_dev {
+ struct list_head list;
+ unsigned int id;
+ struct device dev;
+ struct work_struct del_work;
+ struct dentry *debugfs_topdir;
+ struct dentry *debugfs_portcreate;
+ spinlock_t ports_lock; /* Serialize ports creation/deletion */
+ unsigned int port_idx;
+ struct list_head ports;
+};
+
+struct wwan_hwsim_port {
+ struct list_head list;
+ unsigned int id;
+ struct wwan_hwsim_dev *dev;
+ struct wwan_port *wwan;
+ struct work_struct del_work;
+ struct dentry *debugfs_topdir;
+ enum { /* AT command parser state */
+ AT_PARSER_WAIT_A,
+ AT_PARSER_WAIT_T,
+ AT_PARSER_WAIT_TERM,
+ AT_PARSER_SKIP_LINE,
+ } pstate;
+};
+
+static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops;
+static const struct file_operations wwan_hwsim_debugfs_portcreate_fops;
+static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops;
+static void wwan_hwsim_port_del_work(struct work_struct *work);
+static void wwan_hwsim_dev_del_work(struct work_struct *work);
+
+static int wwan_hwsim_port_start(struct wwan_port *wport)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+
+ port->pstate = AT_PARSER_WAIT_A;
+
+ return 0;
+}
+
+static void wwan_hwsim_port_stop(struct wwan_port *wport)
+{
+}
+
+/* Implements a minimalistic AT commands parser that echo input back and
+ * reply with 'OK' to each input command. See AT command protocol details in the
+ * ITU-T V.250 recomendations document.
+ *
+ * Be aware that this processor is not fully V.250 compliant.
+ */
+static int wwan_hwsim_port_tx(struct wwan_port *wport, struct sk_buff *in)
+{
+ struct wwan_hwsim_port *port = wwan_port_get_drvdata(wport);
+ struct sk_buff *out;
+ int i, n, s;
+
+ /* Estimate a max possible number of commands by counting the number of
+ * termination chars (S3 param, CR by default). And then allocate the
+ * output buffer that will be enough to fit the echo and result codes of
+ * all commands.
+ */
+ for (i = 0, n = 0; i < in->len; ++i)
+ if (in->data[i] == '\r')
+ n++;
+ n = in->len + n * (2 + 2 + 2); /* Output buffer size */
+ out = alloc_skb(n, GFP_KERNEL);
+ if (!out)
+ return -ENOMEM;
+
+ for (i = 0, s = 0; i < in->len; ++i) {
+ char c = in->data[i];
+
+ if (port->pstate == AT_PARSER_WAIT_A) {
+ if (c == 'A' || c == 'a')
+ port->pstate = AT_PARSER_WAIT_T;
+ else if (c != '\n') /* Ignore formating char */
+ port->pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->pstate == AT_PARSER_WAIT_T) {
+ if (c == 'T' || c == 't')
+ port->pstate = AT_PARSER_WAIT_TERM;
+ else
+ port->pstate = AT_PARSER_SKIP_LINE;
+ } else if (port->pstate == AT_PARSER_WAIT_TERM) {
+ if (c != '\r')
+ continue;
+ /* Consume the trailing formatting char as well */
+ if ((i + 1) < in->len && in->data[i + 1] == '\n')
+ i++;
+ n = i - s + 1;
+ memcpy(skb_put(out, n), &in->data[s], n);/* Echo */
+ memcpy(skb_put(out, 6), "\r\nOK\r\n", 6);
+ s = i + 1;
+ port->pstate = AT_PARSER_WAIT_A;
+ } else if (port->pstate == AT_PARSER_SKIP_LINE) {
+ if (c != '\r')
+ continue;
+ port->pstate = AT_PARSER_WAIT_A;
+ }
+ }
+
+ if (i > s) {
+ /* Echo the processed portion of a not yet completed command */
+ n = i - s;
+ memcpy(skb_put(out, n), &in->data[s], n);
+ }
+
+ consume_skb(in);
+
+ wwan_port_rx(wport, out);
+
+ return 0;
+}
+
+static const struct wwan_port_ops wwan_hwsim_port_ops = {
+ .start = wwan_hwsim_port_start,
+ .stop = wwan_hwsim_port_stop,
+ .tx = wwan_hwsim_port_tx,
+};
+
+static struct wwan_hwsim_port *wwan_hwsim_port_new(struct wwan_hwsim_dev *dev)
+{
+ struct wwan_hwsim_port *port;
+ char name[0x10];
+ int err;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ port->dev = dev;
+
+ spin_lock(&dev->ports_lock);
+ port->id = dev->port_idx++;
+ spin_unlock(&dev->ports_lock);
+
+ port->wwan = wwan_create_port(&dev->dev, WWAN_PORT_AT,
+ &wwan_hwsim_port_ops,
+ port);
+ if (IS_ERR(port->wwan)) {
+ err = PTR_ERR(port->wwan);
+ goto err_free_port;
+ }
+
+ INIT_WORK(&port->del_work, wwan_hwsim_port_del_work);
+
+ snprintf(name, sizeof(name), "port%u", port->id);
+ port->debugfs_topdir = debugfs_create_dir(name, dev->debugfs_topdir);
+ debugfs_create_file("destroy", 0200, port->debugfs_topdir, port,
+ &wwan_hwsim_debugfs_portdestroy_fops);
+
+ return port;
+
+err_free_port:
+ kfree(port);
+
+ return ERR_PTR(err);
+}
+
+static void wwan_hwsim_port_del(struct wwan_hwsim_port *port)
+{
+ debugfs_remove(port->debugfs_topdir);
+
+ /* Make sure that there is no pending deletion work */
+ if (current_work() != &port->del_work)
+ cancel_work_sync(&port->del_work);
+
+ wwan_remove_port(port->wwan);
+ kfree(port);
+}
+
+static void wwan_hwsim_port_del_work(struct work_struct *work)
+{
+ struct wwan_hwsim_port *port =
+ container_of(work, typeof(*port), del_work);
+ struct wwan_hwsim_dev *dev = port->dev;
+
+ spin_lock(&dev->ports_lock);
+ if (list_empty(&port->list)) {
+ /* Someone else deleting port at the moment */
+ spin_unlock(&dev->ports_lock);
+ return;
+ }
+ list_del_init(&port->list);
+ spin_unlock(&dev->ports_lock);
+
+ wwan_hwsim_port_del(port);
+}
+
+static void wwan_hwsim_dev_release(struct device *sysdev)
+{
+ struct wwan_hwsim_dev *dev = container_of(sysdev, typeof(*dev), dev);
+
+ kfree(dev);
+}
+
+static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
+{
+ struct wwan_hwsim_dev *dev;
+ int err;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ dev->id = wwan_hwsim_dev_idx++;
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ dev->dev.release = wwan_hwsim_dev_release;
+ dev->dev.class = wwan_hwsim_class;
+ dev_set_name(&dev->dev, "hwsim%u", dev->id);
+
+ spin_lock_init(&dev->ports_lock);
+ INIT_LIST_HEAD(&dev->ports);
+
+ err = device_register(&dev->dev);
+ if (err)
+ goto err_free_dev;
+
+ INIT_WORK(&dev->del_work, wwan_hwsim_dev_del_work);
+
+ dev->debugfs_topdir = debugfs_create_dir(dev_name(&dev->dev),
+ wwan_hwsim_debugfs_topdir);
+ debugfs_create_file("destroy", 0200, dev->debugfs_topdir, dev,
+ &wwan_hwsim_debugfs_devdestroy_fops);
+ dev->debugfs_portcreate =
+ debugfs_create_file("portcreate", 0200,
+ dev->debugfs_topdir, dev,
+ &wwan_hwsim_debugfs_portcreate_fops);
+
+ return dev;
+
+err_free_dev:
+ kfree(dev);
+
+ return ERR_PTR(err);
+}
+
+static void wwan_hwsim_dev_del(struct wwan_hwsim_dev *dev)
+{
+ debugfs_remove(dev->debugfs_portcreate); /* Avoid new ports */
+
+ spin_lock(&dev->ports_lock);
+ while (!list_empty(&dev->ports)) {
+ struct wwan_hwsim_port *port;
+
+ port = list_first_entry(&dev->ports, struct wwan_hwsim_port,
+ list);
+ list_del_init(&port->list);
+ spin_unlock(&dev->ports_lock);
+ wwan_hwsim_port_del(port);
+ spin_lock(&dev->ports_lock);
+ }
+ spin_unlock(&dev->ports_lock);
+
+ debugfs_remove(dev->debugfs_topdir);
+
+ /* Make sure that there is no pending deletion work */
+ if (current_work() != &dev->del_work)
+ cancel_work_sync(&dev->del_work);
+
+ device_unregister(&dev->dev);
+ /* Memory will be freed in the device release callback */
+}
+
+static void wwan_hwsim_dev_del_work(struct work_struct *work)
+{
+ struct wwan_hwsim_dev *dev = container_of(work, typeof(*dev), del_work);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ if (list_empty(&dev->list)) {
+ /* Someone else deleting device at the moment */
+ spin_unlock(&wwan_hwsim_devs_lock);
+ return;
+ }
+ list_del_init(&dev->list);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ wwan_hwsim_dev_del(dev);
+}
+
+static ssize_t wwan_hwsim_debugfs_portdestroy_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_port *port = file->private_data;
+
+ /* We can not delete port here since it will cause a deadlock due to
+ * waiting this callback to finish in the debugfs_remove() call. So,
+ * use workqueue.
+ */
+ schedule_work(&port->del_work);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_portdestroy_fops = {
+ .write = wwan_hwsim_debugfs_portdestroy_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_portcreate_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev = file->private_data;
+ struct wwan_hwsim_port *port;
+
+ port = wwan_hwsim_port_new(dev);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ spin_lock(&dev->ports_lock);
+ list_add_tail(&port->list, &dev->ports);
+ spin_unlock(&dev->ports_lock);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_portcreate_fops = {
+ .write = wwan_hwsim_debugfs_portcreate_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_devdestroy_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev = file->private_data;
+
+ /* We can not delete device here since it will cause a deadlock due to
+ * waiting this callback to finish in the debugfs_remove() call. So,
+ * use workqueue.
+ */
+ schedule_work(&dev->del_work);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_devdestroy_fops = {
+ .write = wwan_hwsim_debugfs_devdestroy_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static ssize_t wwan_hwsim_debugfs_devcreate_write(struct file *file,
+ const char __user *usrbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wwan_hwsim_dev *dev;
+
+ dev = wwan_hwsim_dev_new();
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ list_add_tail(&dev->list, &wwan_hwsim_devs);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ return count;
+}
+
+static const struct file_operations wwan_hwsim_debugfs_devcreate_fops = {
+ .write = wwan_hwsim_debugfs_devcreate_write,
+ .open = simple_open,
+ .llseek = noop_llseek,
+};
+
+static int __init wwan_hwsim_init_devs(void)
+{
+ struct wwan_hwsim_dev *dev;
+ int i, j;
+
+ for (i = 0; i < wwan_hwsim_devsnum; ++i) {
+ dev = wwan_hwsim_dev_new();
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ list_add_tail(&dev->list, &wwan_hwsim_devs);
+ spin_unlock(&wwan_hwsim_devs_lock);
+
+ /* Create a couple of ports per each device to accelerate
+ * the simulator readiness time.
+ */
+ for (j = 0; j < 2; ++j) {
+ struct wwan_hwsim_port *port;
+
+ port = wwan_hwsim_port_new(dev);
+ if (IS_ERR(port))
+ return PTR_ERR(port);
+
+ spin_lock(&dev->ports_lock);
+ list_add_tail(&port->list, &dev->ports);
+ spin_unlock(&dev->ports_lock);
+ }
+ }
+
+ return 0;
+}
+
+static void wwan_hwsim_free_devs(void)
+{
+ struct wwan_hwsim_dev *dev;
+
+ spin_lock(&wwan_hwsim_devs_lock);
+ while (!list_empty(&wwan_hwsim_devs)) {
+ dev = list_first_entry(&wwan_hwsim_devs, struct wwan_hwsim_dev,
+ list);
+ list_del_init(&dev->list);
+ spin_unlock(&wwan_hwsim_devs_lock);
+ wwan_hwsim_dev_del(dev);
+ spin_lock(&wwan_hwsim_devs_lock);
+ }
+ spin_unlock(&wwan_hwsim_devs_lock);
+}
+
+static int __init wwan_hwsim_init(void)
+{
+ int err;
+
+ if (wwan_hwsim_devsnum < 0 || wwan_hwsim_devsnum > 128)
+ return -EINVAL;
+
+ wwan_hwsim_class = class_create(THIS_MODULE, "wwan_hwsim");
+ if (IS_ERR(wwan_hwsim_class))
+ return PTR_ERR(wwan_hwsim_class);
+
+ wwan_hwsim_debugfs_topdir = debugfs_create_dir("wwan_hwsim", NULL);
+ wwan_hwsim_debugfs_devcreate =
+ debugfs_create_file("devcreate", 0200,
+ wwan_hwsim_debugfs_topdir, NULL,
+ &wwan_hwsim_debugfs_devcreate_fops);
+
+ err = wwan_hwsim_init_devs();
+ if (err)
+ goto err_clean_devs;
+
+ return 0;
+
+err_clean_devs:
+ wwan_hwsim_free_devs();
+ debugfs_remove(wwan_hwsim_debugfs_topdir);
+ class_destroy(wwan_hwsim_class);
+
+ return err;
+}
+
+static void __exit wwan_hwsim_exit(void)
+{
+ debugfs_remove(wwan_hwsim_debugfs_devcreate); /* Avoid new devs */
+ wwan_hwsim_free_devs();
+ flush_scheduled_work(); /* Wait deletion works completion */
+ debugfs_remove(wwan_hwsim_debugfs_topdir);
+ class_destroy(wwan_hwsim_class);
+}
+
+module_init(wwan_hwsim_init);
+module_exit(wwan_hwsim_exit);
+
+MODULE_AUTHOR("Sergey Ryazanov");
+MODULE_DESCRIPTION("Device simulator for WWAN framework");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
index fe0719ed81a0..528745862738 100644
--- a/drivers/nfc/fdp/fdp.c
+++ b/drivers/nfc/fdp/fdp.c
@@ -149,7 +149,7 @@ static void fdp_nci_send_patch_cb(struct nci_dev *ndev)
wake_up(&info->setup_wq);
}
-/**
+/*
* Register a packet sent counter and a callback
*
* We have no other way of knowing when all firmware packets were sent out
@@ -167,7 +167,7 @@ static void fdp_nci_set_data_pkt_counter(struct nci_dev *ndev,
info->data_pkt_counter_cb = cb;
}
-/**
+/*
* The device is expecting a stream of packets. All packets need to
* have the PBF flag set to 0x0 (last packet) even if the firmware
* file is segmented and there are multiple packets. If we give the
@@ -237,28 +237,18 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
static int fdp_nci_open(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
return info->phy_ops->enable(info->phy);
}
static int fdp_nci_close(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
return 0;
}
static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
if (atomic_dec_and_test(&info->data_pkt_counter))
info->data_pkt_counter_cb(ndev);
@@ -266,16 +256,6 @@ static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb)
return info->phy_ops->write(info->phy, skb);
}
-int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb)
-{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
- return nci_recv_frame(ndev, skb);
-}
-EXPORT_SYMBOL(fdp_nci_recv_frame);
-
static int fdp_nci_request_firmware(struct nci_dev *ndev)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
@@ -286,7 +266,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
r = request_firmware(&info->ram_patch, FDP_RAM_PATCH_NAME, dev);
if (r < 0) {
nfc_err(dev, "RAM patch request error\n");
- goto error;
+ return r;
}
data = (u8 *) info->ram_patch->data;
@@ -303,7 +283,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
r = request_firmware(&info->otp_patch, FDP_OTP_PATCH_NAME, dev);
if (r < 0) {
nfc_err(dev, "OTP patch request error\n");
- goto out;
+ return 0;
}
data = (u8 *) info->otp_patch->data;
@@ -315,10 +295,7 @@ static int fdp_nci_request_firmware(struct nci_dev *ndev)
dev_dbg(dev, "OTP patch version: %d, size: %d\n",
info->otp_patch_version, (int) info->otp_patch->size);
-out:
return 0;
-error:
- return r;
}
static void fdp_nci_release_firmware(struct nci_dev *ndev)
@@ -476,8 +453,6 @@ static int fdp_nci_setup(struct nci_dev *ndev)
int r;
u8 patched = 0;
- dev_dbg(dev, "%s\n", __func__);
-
r = nci_core_init(ndev);
if (r)
goto error;
@@ -585,9 +560,7 @@ static int fdp_nci_core_reset_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
- dev_dbg(dev, "%s\n", __func__);
info->setup_reset_ntf = 1;
wake_up(&info->setup_wq);
@@ -598,9 +571,7 @@ static int fdp_nci_prop_patch_ntf_packet(struct nci_dev *ndev,
struct sk_buff *skb)
{
struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
- dev_dbg(dev, "%s\n", __func__);
info->setup_patch_ntf = 1;
info->setup_patch_status = skb->data[0];
wake_up(&info->setup_wq);
@@ -773,11 +744,6 @@ EXPORT_SYMBOL(fdp_nci_probe);
void fdp_nci_remove(struct nci_dev *ndev)
{
- struct fdp_nci_info *info = nci_get_drvdata(ndev);
- struct device *dev = &info->phy->i2c_dev->dev;
-
- dev_dbg(dev, "%s\n", __func__);
-
nci_unregister_device(ndev);
nci_free_device(ndev);
}
diff --git a/drivers/nfc/fdp/fdp.h b/drivers/nfc/fdp/fdp.h
index 9bd1f3f23e2d..ead3b21ccae6 100644
--- a/drivers/nfc/fdp/fdp.h
+++ b/drivers/nfc/fdp/fdp.h
@@ -25,6 +25,5 @@ int fdp_nci_probe(struct fdp_i2c_phy *phy, struct nfc_phy_ops *phy_ops,
struct nci_dev **ndev, int tx_headroom, int tx_tailroom,
u8 clock_type, u32 clock_freq, u8 *fw_vsc_cfg);
void fdp_nci_remove(struct nci_dev *ndev);
-int fdp_nci_recv_frame(struct nci_dev *ndev, struct sk_buff *skb);
#endif /* __LOCAL_FDP_H_ */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index adaa1a7147f9..c5596e514648 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -49,7 +49,6 @@ static int fdp_nci_i2c_enable(void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
- dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
return 0;
@@ -59,7 +58,6 @@ static void fdp_nci_i2c_disable(void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
- dev_dbg(&phy->i2c_dev->dev, "%s\n", __func__);
fdp_nci_i2c_reset(phy);
}
@@ -197,7 +195,6 @@ flush:
static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct fdp_i2c_phy *phy = phy_id;
- struct i2c_client *client;
struct sk_buff *skb;
int r;
@@ -206,9 +203,6 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "%s\n", __func__);
-
r = fdp_nci_i2c_read(phy, &skb);
if (r == -EREMOTEIO)
@@ -217,7 +211,7 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
return IRQ_HANDLED;
if (skb != NULL)
- fdp_nci_recv_frame(phy->ndev, skb);
+ nci_recv_frame(phy->ndev, skb);
return IRQ_HANDLED;
}
@@ -288,8 +282,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client)
u32 clock_freq;
int r = 0;
- dev_dbg(dev, "%s\n", __func__);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(dev, "No I2C_FUNC_I2C support\n");
return -ENODEV;
@@ -351,8 +343,6 @@ static int fdp_nci_i2c_remove(struct i2c_client *client)
{
struct fdp_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
fdp_nci_remove(phy->ndev);
fdp_nci_i2c_disable(phy);
@@ -368,7 +358,7 @@ MODULE_DEVICE_TABLE(acpi, fdp_nci_i2c_acpi_match);
static struct i2c_driver fdp_nci_i2c_driver = {
.driver = {
.name = FDP_I2C_DRIVER_NAME,
- .acpi_match_table = ACPI_PTR(fdp_nci_i2c_acpi_match),
+ .acpi_match_table = fdp_nci_i2c_acpi_match,
},
.probe_new = fdp_nci_i2c_probe,
.remove = fdp_nci_i2c_remove,
diff --git a/drivers/nfc/mei_phy.c b/drivers/nfc/mei_phy.c
index 0f43bb389566..e56cea716cd2 100644
--- a/drivers/nfc/mei_phy.c
+++ b/drivers/nfc/mei_phy.c
@@ -98,8 +98,6 @@ static int mei_nfc_if_version(struct nfc_mei_phy *phy)
size_t if_version_length;
int bytes_recv, r;
- pr_info("%s\n", __func__);
-
memset(&cmd, 0, sizeof(struct mei_nfc_cmd));
cmd.hdr.cmd = MEI_NFC_CMD_MAINTENANCE;
cmd.hdr.data_size = 1;
@@ -146,8 +144,6 @@ static int mei_nfc_connect(struct nfc_mei_phy *phy)
size_t connect_length, connect_resp_length;
int bytes_recv, r;
- pr_info("%s\n", __func__);
-
connect_length = sizeof(struct mei_nfc_cmd) +
sizeof(struct mei_nfc_connect);
@@ -320,8 +316,6 @@ static int nfc_mei_phy_enable(void *phy_id)
int r;
struct nfc_mei_phy *phy = phy_id;
- pr_info("%s\n", __func__);
-
if (phy->powered == 1)
return 0;
@@ -363,8 +357,6 @@ static void nfc_mei_phy_disable(void *phy_id)
{
struct nfc_mei_phy *phy = phy_id;
- pr_info("%s\n", __func__);
-
mei_cldev_disable(phy->cldev);
phy->powered = 0;
diff --git a/drivers/nfc/microread/microread.c b/drivers/nfc/microread/microread.c
index 8d3988457c58..b1d3975e8a81 100644
--- a/drivers/nfc/microread/microread.c
+++ b/drivers/nfc/microread/microread.c
@@ -364,7 +364,6 @@ static void microread_im_transceive_cb(void *context, struct sk_buff *skb,
case MICROREAD_CB_TYPE_READER_ALL:
if (err == 0) {
if (skb->len == 0) {
- err = -EPROTO;
kfree_skb(skb);
info->async_cb(info->async_cb_context, NULL,
-EPROTO);
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index 52c8ae504e32..aaccb8b76b3e 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell NFC driver: Firmware downloader
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/module.h>
@@ -50,8 +39,8 @@ enum {
};
/*
-** Patterns for responses
-*/
+ * Patterns for responses
+ */
static const uint8_t nci_pattern_core_reset_ntf[] = {
0x60, 0x00, 0x02, 0xA0, 0x01
@@ -451,7 +440,7 @@ static void fw_dnld_rx_work(struct work_struct *work)
}
}
-int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
+int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
{
char name[32];
@@ -465,13 +454,13 @@ int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv)
return 0;
}
-void nfcmrvl_fw_dnld_deinit(struct nfcmrvl_private *priv)
+void nfcmrvl_fw_dnld_deinit(struct nfcmrvl_private *priv)
{
destroy_workqueue(priv->fw_dnld.rx_wq);
}
-void nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv,
- struct sk_buff *skb)
+void nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv,
+ struct sk_buff *skb)
{
/* Discard command timer */
if (timer_pending(&priv->ndev->cmd_timer))
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.h b/drivers/nfc/nfcmrvl/fw_dnld.h
index ee4a339c05fd..7c4d91b01910 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.h
+++ b/drivers/nfc/nfcmrvl/fw_dnld.h
@@ -1,20 +1,9 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
* Marvell NFC driver: Firmware downloader
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#ifndef __NFCMRVL_FW_DNLD_H__
#define __NFCMRVL_FW_DNLD_H__
diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c
index 18cd96284b77..59a529e72d96 100644
--- a/drivers/nfc/nfcmrvl/i2c.c
+++ b/drivers/nfc/nfcmrvl/i2c.c
@@ -1,20 +1,9 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-I2C driver: I2C interface related functions
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -49,11 +38,6 @@ static int nfcmrvl_i2c_read(struct nfcmrvl_i2c_drv_data *drv_data,
return -EBADMSG;
}
- if (nci_hdr.plen > NCI_MAX_PAYLOAD_SIZE) {
- nfc_err(&drv_data->i2c->dev, "invalid packet payload size\n");
- return -EBADMSG;
- }
-
*skb = nci_skb_alloc(drv_data->priv->ndev,
nci_hdr.plen + NCI_CTRL_HDR_SIZE, GFP_KERNEL);
if (!*skb)
@@ -260,7 +244,7 @@ static int nfcmrvl_i2c_remove(struct i2c_client *client)
}
-static const struct of_device_id of_nfcmrvl_i2c_match[] = {
+static const struct of_device_id of_nfcmrvl_i2c_match[] __maybe_unused = {
{ .compatible = "marvell,nfc-i2c", },
{},
};
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 529be35ac178..a4620b480c4f 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Marvell NFC driver: major functions
*
* Copyright (C) 2014-2015 Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/module.h>
diff --git a/drivers/nfc/nfcmrvl/nfcmrvl.h b/drivers/nfc/nfcmrvl/nfcmrvl.h
index de68ff45e49a..a715543bc9bf 100644
--- a/drivers/nfc/nfcmrvl/nfcmrvl.h
+++ b/drivers/nfc/nfcmrvl/nfcmrvl.h
@@ -1,20 +1,9 @@
-/**
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
* Marvell NFC driver
*
* Copyright (C) 2014-2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#ifndef _NFCMRVL_H_
#define _NFCMRVL_H_
@@ -36,16 +25,16 @@
#define NFCMRVL_NCI_MAX_EVENT_SIZE 260
/*
-** NCI FW Parmaters
-*/
+ * NCI FW Parameters
+ */
#define NFCMRVL_PB_BAIL_OUT 0x11
#define NFCMRVL_PROP_REF_CLOCK 0xF0
#define NFCMRVL_PROP_SET_HI_CONFIG 0xF1
/*
-** HCI defines
-*/
+ * HCI defines
+ */
#define NFCMRVL_HCI_EVENT_HEADER_SIZE 0x04
#define NFCMRVL_HCI_EVENT_CODE 0x04
@@ -78,8 +67,8 @@ struct nfcmrvl_private {
bool support_fw_dnld;
/*
- ** PHY related information
- */
+ * PHY related information
+ */
/* PHY driver context */
void *drv_data;
diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c
index 8e0ddb434770..66696321c645 100644
--- a/drivers/nfc/nfcmrvl/spi.c
+++ b/drivers/nfc/nfcmrvl/spi.c
@@ -1,20 +1,9 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-SPI driver: SPI interface related functions
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -196,7 +185,7 @@ static int nfcmrvl_spi_remove(struct spi_device *spi)
return 0;
}
-static const struct of_device_id of_nfcmrvl_spi_match[] = {
+static const struct of_device_id of_nfcmrvl_spi_match[] __maybe_unused = {
{ .compatible = "marvell,nfc-spi", },
{},
};
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index e5a622ce4b95..50d86c90b9dd 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -1,19 +1,8 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-UART driver
*
* Copyright (C) 2015, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
*/
#include <linux/module.h>
@@ -29,8 +18,8 @@ static unsigned int break_control;
static int reset_n_io = -EINVAL;
/*
-** NFCMRVL NCI OPS
-*/
+ * NFCMRVL NCI OPS
+ */
static int nfcmrvl_uart_nci_open(struct nfcmrvl_private *priv)
{
@@ -103,8 +92,8 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
}
/*
-** NCI UART OPS
-*/
+ * NCI UART OPS
+ */
static int nfcmrvl_nci_uart_open(struct nci_uart *nu)
{
@@ -178,10 +167,10 @@ static void nfcmrvl_nci_uart_tx_done(struct nci_uart *nu)
return;
/*
- ** To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him
- ** up. we set BREAK. Once we will be ready to send again we will remove
- ** it.
- */
+ * To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him
+ * up. we set BREAK. Once we will be ready to send again we will remove
+ * it.
+ */
if (priv->config.break_control && nu->tty->ops->break_ctl) {
nu->tty->ops->break_ctl(nu->tty, -1);
usleep_range(1000, 3000);
@@ -200,23 +189,7 @@ static struct nci_uart nfcmrvl_nci_uart = {
.tx_done = nfcmrvl_nci_uart_tx_done,
}
};
-
-/*
-** Module init
-*/
-
-static int nfcmrvl_uart_init_module(void)
-{
- return nci_uart_register(&nfcmrvl_nci_uart);
-}
-
-static void nfcmrvl_uart_exit_module(void)
-{
- nci_uart_unregister(&nfcmrvl_nci_uart);
-}
-
-module_init(nfcmrvl_uart_init_module);
-module_exit(nfcmrvl_uart_exit_module);
+module_driver(nfcmrvl_nci_uart, nci_uart_register, nci_uart_unregister);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell NFC-over-UART");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 888e298f610b..9d649b45300b 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -1,20 +1,9 @@
-/**
+// SPDX-License-Identifier: GPL-2.0-only
+/*
* Marvell NFC-over-USB driver: USB interface related functions
*
* Copyright (C) 2014, Marvell International Ltd.
- *
- * This software file (the "File") is distributed by Marvell International
- * Ltd. under the terms of the GNU General Public License Version 2, June 1991
- * (the "License"). You may use, redistribute and/or modify this File in
- * accordance with the terms and conditions of the License, a copy of which
- * is available on the worldwide web at
- * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
- *
- * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
- * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
- * this warranty disclaimer.
- **/
+ */
#include <linux/module.h>
#include <linux/usb.h>
@@ -68,7 +57,6 @@ static int nfcmrvl_inc_tx(struct nfcmrvl_usb_drv_data *drv_data)
static void nfcmrvl_bulk_complete(struct urb *urb)
{
struct nfcmrvl_usb_drv_data *drv_data = urb->context;
- struct sk_buff *skb;
int err;
dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d\n",
@@ -78,6 +66,8 @@ static void nfcmrvl_bulk_complete(struct urb *urb)
return;
if (!urb->status) {
+ struct sk_buff *skb;
+
skb = nci_skb_alloc(drv_data->priv->ndev, urb->actual_length,
GFP_ATOMIC);
if (!skb) {
@@ -296,7 +286,6 @@ static void nfcmrvl_waker(struct work_struct *work)
static int nfcmrvl_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_endpoint_descriptor *ep_desc;
struct nfcmrvl_usb_drv_data *drv_data;
struct nfcmrvl_private *priv;
int i;
@@ -314,18 +303,16 @@ static int nfcmrvl_probe(struct usb_interface *intf,
return -ENOMEM;
for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
+ struct usb_endpoint_descriptor *ep_desc;
+
ep_desc = &intf->cur_altsetting->endpoint[i].desc;
if (!drv_data->bulk_tx_ep &&
usb_endpoint_is_bulk_out(ep_desc)) {
drv_data->bulk_tx_ep = ep_desc;
- continue;
- }
-
- if (!drv_data->bulk_rx_ep &&
- usb_endpoint_is_bulk_in(ep_desc)) {
+ } else if (!drv_data->bulk_rx_ep &&
+ usb_endpoint_is_bulk_in(ep_desc)) {
drv_data->bulk_rx_ep = ep_desc;
- continue;
}
}
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index 795da9b85d56..e6bf8cfe3aa7 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -174,9 +174,6 @@ static int pn533_i2c_probe(struct i2c_client *client,
struct pn533 *priv;
int r = 0;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -195,9 +192,8 @@ static int pn533_i2c_probe(struct i2c_client *client,
phy, &i2c_phy_ops, NULL,
&phy->i2c_dev->dev);
- if (IS_ERR(priv)) {
+ if (IS_ERR(priv))
return PTR_ERR(priv);
- }
phy->priv = priv;
r = pn532_i2c_nfc_alloc(priv, PN533_NO_TYPE_B_PROTOCOLS, &client->dev);
@@ -239,8 +235,6 @@ static int pn533_i2c_remove(struct i2c_client *client)
{
struct pn533_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
free_irq(client->irq, phy);
pn53x_unregister_nfc(phy->priv);
@@ -249,7 +243,7 @@ static int pn533_i2c_remove(struct i2c_client *client)
return 0;
}
-static const struct of_device_id of_pn533_i2c_match[] = {
+static const struct of_device_id of_pn533_i2c_match[] __maybe_unused = {
{ .compatible = "nxp,pn532", },
/*
* NOTE: The use of the compatibles with the trailing "...-i2c" is
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 2c7f9916f206..cd64bfe20402 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1075,8 +1075,6 @@ static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg,
u8 status, ret, mi;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
skb_queue_purge(&dev->resp_q);
return PTR_ERR(resp);
@@ -1124,8 +1122,6 @@ static void pn533_wq_tm_mi_recv(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, 0);
if (!skb)
return;
@@ -1148,8 +1144,6 @@ static void pn533_wq_tm_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
if (skb == NULL) { /* No more data */
@@ -1186,8 +1180,6 @@ static void pn533_wq_tg_get_data(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, 0);
if (!skb)
return;
@@ -1206,8 +1198,6 @@ static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp)
size_t gb_len;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (resp->len < ATR_REQ_GB_OFFSET + 1)
return -EINVAL;
@@ -1260,8 +1250,6 @@ static int pn533_rf_complete(struct pn533 *dev, void *arg,
{
int rc = 0;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -1283,8 +1271,6 @@ static void pn533_wq_rf(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, 2);
if (!skb)
return;
@@ -1360,8 +1346,6 @@ static int pn533_poll_dep(struct nfc_dev *nfc_dev)
u8 *next, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- dev_dbg(dev->dev, "%s", __func__);
-
if (!dev->gb) {
dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len);
@@ -1511,8 +1495,6 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
struct pn533_poll_modulations *cur_mod;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -1783,8 +1765,6 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/
if (!skb)
return -ENOMEM;
@@ -1866,8 +1846,6 @@ static int pn533_deactivate_target_complete(struct pn533 *dev, void *arg,
{
int rc = 0;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
@@ -1892,8 +1870,6 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (!dev->tgt_active_prot) {
nfc_err(dev->dev, "There is no active target\n");
return;
@@ -1988,8 +1964,6 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (dev->poll_mod_count) {
nfc_err(dev->dev,
"Cannot bring the DEP link up while polling\n");
@@ -2067,8 +2041,6 @@ static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
- dev_dbg(dev->dev, "%s\n", __func__);
-
pn533_poll_reset_mod_list(dev);
if (dev->tgt_mode || dev->tgt_active_prot)
@@ -2092,8 +2064,6 @@ static struct sk_buff *pn533_build_response(struct pn533 *dev)
struct sk_buff *skb, *tmp, *t;
unsigned int skb_len = 0, tmp_len = 0;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (skb_queue_empty(&dev->resp_q))
return NULL;
@@ -2133,8 +2103,6 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
int rc = 0;
u8 status, ret, mi;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp)) {
rc = PTR_ERR(resp);
goto _error;
@@ -2288,8 +2256,6 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
struct pn533_data_exchange_arg *arg = NULL;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (!dev->tgt_active_prot) {
nfc_err(dev->dev,
"Can't exchange data if there is no active target\n");
@@ -2356,8 +2322,6 @@ static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
{
u8 status;
- dev_dbg(dev->dev, "%s\n", __func__);
-
if (IS_ERR(resp))
return PTR_ERR(resp);
@@ -2388,8 +2352,6 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
/* let's split in multiple chunks if size's too big */
if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
rc = pn533_fill_fragment_skbs(dev, skb);
@@ -2426,8 +2388,6 @@ static void pn533_wq_mi_recv(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN);
if (!skb)
goto error;
@@ -2476,8 +2436,6 @@ static void pn533_wq_mi_send(struct work_struct *work)
struct sk_buff *skb;
int rc;
- dev_dbg(dev->dev, "%s\n", __func__);
-
/* Grab the first skb in the queue */
skb = skb_dequeue(&dev->fragment_skb);
@@ -2533,8 +2491,6 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
struct sk_buff *resp;
int skb_len;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */
skb = pn533_alloc_skb(dev, skb_len);
@@ -2580,8 +2536,6 @@ static int pn533_pasori_fw_reset(struct pn533 *dev)
struct sk_buff *skb;
struct sk_buff *resp;
- dev_dbg(dev->dev, "%s\n", __func__);
-
skb = pn533_alloc_skb(dev, sizeof(u8));
if (!skb)
return -ENOMEM;
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index a0665d8ea85b..7bdaf8263070 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -319,7 +319,7 @@ static struct serdev_device_driver pn532_uart_driver = {
.remove = pn532_uart_remove,
.driver = {
.name = "pn532_uart",
- .of_match_table = of_match_ptr(pn532_uart_of_match),
+ .of_match_table = pn532_uart_of_match,
},
};
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index 84d2bfabf42b..bd7f7478d189 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -354,8 +354,6 @@ static void pn533_acr122_poweron_rdr_resp(struct urb *urb)
{
struct pn533_acr122_poweron_rdr_arg *arg = urb->context;
- dev_dbg(&urb->dev->dev, "%s\n", __func__);
-
print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1,
urb->transfer_buffer, urb->transfer_buffer_length,
false);
@@ -375,8 +373,6 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
void *cntx;
struct pn533_acr122_poweron_rdr_arg arg;
- dev_dbg(&phy->udev->dev, "%s\n", __func__);
-
buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 4ac8cb262559..de59e439c369 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -50,7 +50,7 @@ static const struct i2c_device_id pn544_hci_i2c_id_table[] = {
MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
-static const struct acpi_device_id pn544_hci_i2c_acpi_match[] = {
+static const struct acpi_device_id pn544_hci_i2c_acpi_match[] __maybe_unused = {
{"NXP5440", 0},
{}
};
@@ -241,8 +241,6 @@ static int pn544_hci_i2c_enable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
- pr_info("%s\n", __func__);
-
pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
phy->powered = 1;
@@ -875,9 +873,6 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
struct pn544_i2c_phy *phy;
int r = 0;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -937,8 +932,6 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
{
struct pn544_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
cancel_work_sync(&phy->fw_work);
if (phy->fw_work_state != FW_WORK_STATE_IDLE)
pn544_hci_i2c_fw_work_complete(phy, -ENODEV);
@@ -951,7 +944,7 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
return 0;
}
-static const struct of_device_id of_pn544_i2c_match[] = {
+static const struct of_device_id of_pn544_i2c_match[] __maybe_unused = {
{ .compatible = "nxp,pn544-i2c", },
{},
};
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 8e4d355dc3ae..4df926cc37d0 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -94,7 +94,7 @@ struct port100;
typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg,
struct sk_buff *resp);
-/**
+/*
* Setting sets structure for in_set_rf command
*
* @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table.
@@ -145,7 +145,7 @@ static const struct port100_in_rf_setting in_rf_settings[] = {
};
/**
- * Setting sets structure for tg_set_rf command
+ * struct port100_tg_rf_setting - Setting sets structure for tg_set_rf command
*
* @tg_set_number: Represents the entry index in the port-100 RF Base Table.
* This table contains multiple RF setting sets required for RF
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index 38b8d6cab593..4d1cf1bb55b0 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -262,7 +262,7 @@ static const struct i2c_device_id s3fwrn5_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table);
-static const struct of_device_id of_s3fwrn5_i2c_match[] = {
+static const struct of_device_id of_s3fwrn5_i2c_match[] __maybe_unused = {
{ .compatible = "samsung,s3fwrn5-i2c", },
{}
};
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index 55d600cd3861..46981405e8b1 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -206,9 +206,6 @@ static int st_nci_i2c_probe(struct i2c_client *client,
struct st_nci_i2c_phy *phy;
int r;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -261,8 +258,6 @@ static int st_nci_i2c_remove(struct i2c_client *client)
{
struct st_nci_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
ndlc_remove(phy->ndlc);
return 0;
@@ -274,14 +269,14 @@ static const struct i2c_device_id st_nci_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table);
-static const struct acpi_device_id st_nci_i2c_acpi_match[] = {
+static const struct acpi_device_id st_nci_i2c_acpi_match[] __maybe_unused = {
{"SMO2101"},
{"SMO2102"},
{}
};
MODULE_DEVICE_TABLE(acpi, st_nci_i2c_acpi_match);
-static const struct of_device_id of_st_nci_i2c_match[] = {
+static const struct of_device_id of_st_nci_i2c_match[] __maybe_unused = {
{ .compatible = "st,st21nfcb-i2c", },
{ .compatible = "st,st21nfcb_i2c", },
{ .compatible = "st,st21nfcc-i2c", },
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 1cba8f69d3ae..5fd89f72969d 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -470,8 +470,6 @@ int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx)
{
int r;
- pr_debug("st_nci_disable_se\n");
-
/*
* According to upper layer, se_idx == NFC_SE_UICC when
* info->se_info.se_status->is_uicc_enable is true should never happen
@@ -496,8 +494,6 @@ int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx)
{
int r;
- pr_debug("st_nci_enable_se\n");
-
/*
* According to upper layer, se_idx == NFC_SE_UICC when
* info->se_info.se_status->is_uicc_enable is true should never happen.
@@ -534,10 +530,8 @@ static int st_nci_hci_network_init(struct nci_dev *ndev)
dest_params =
kzalloc(sizeof(struct core_conn_create_dest_spec_params) +
sizeof(struct dest_spec_params), GFP_KERNEL);
- if (dest_params == NULL) {
- r = -ENOMEM;
- goto exit;
- }
+ if (dest_params == NULL)
+ return -ENOMEM;
dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE;
dest_params->length = sizeof(struct dest_spec_params);
@@ -594,8 +588,6 @@ static int st_nci_hci_network_init(struct nci_dev *ndev)
free_dest_params:
kfree(dest_params);
-
-exit:
return r;
}
@@ -606,8 +598,6 @@ int st_nci_discover_se(struct nci_dev *ndev)
int se_count = 0;
struct st_nci_info *info = nci_get_drvdata(ndev);
- pr_debug("st_nci_discover_se\n");
-
r = st_nci_hci_network_init(ndev);
if (r != 0)
return r;
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 09df6ea65840..250d56f204c3 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -216,9 +216,6 @@ static int st_nci_spi_probe(struct spi_device *dev)
struct st_nci_spi_phy *phy;
int r;
- dev_dbg(&dev->dev, "%s\n", __func__);
- dev_dbg(&dev->dev, "IRQ: %d\n", dev->irq);
-
/* Check SPI platform functionnalities */
if (!dev) {
pr_debug("%s: dev is NULL. Device is not accessible.\n",
@@ -274,8 +271,6 @@ static int st_nci_spi_remove(struct spi_device *dev)
{
struct st_nci_spi_phy *phy = spi_get_drvdata(dev);
- dev_dbg(&dev->dev, "%s\n", __func__);
-
ndlc_remove(phy->ndlc);
return 0;
@@ -287,13 +282,13 @@ static struct spi_device_id st_nci_spi_id_table[] = {
};
MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
-static const struct acpi_device_id st_nci_spi_acpi_match[] = {
+static const struct acpi_device_id st_nci_spi_acpi_match[] __maybe_unused = {
{"SMO2101", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, st_nci_spi_acpi_match);
-static const struct of_device_id of_st_nci_spi_match[] = {
+static const struct of_device_id of_st_nci_spi_match[] __maybe_unused = {
{ .compatible = "st,st21nfcb-spi", },
{}
};
diff --git a/drivers/nfc/st-nci/vendor_cmds.c b/drivers/nfc/st-nci/vendor_cmds.c
index c6a9d30a4dba..94b600029a2a 100644
--- a/drivers/nfc/st-nci/vendor_cmds.c
+++ b/drivers/nfc/st-nci/vendor_cmds.c
@@ -98,7 +98,7 @@ static int st_nci_hci_dm_get_info(struct nfc_dev *dev, void *data,
r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETINFO,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_GET_INFO, skb->len);
@@ -117,7 +117,6 @@ static int st_nci_hci_dm_get_info(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -131,7 +130,7 @@ static int st_nci_hci_dm_get_data(struct nfc_dev *dev, void *data,
r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETDATA,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_GET_DATA, skb->len);
@@ -150,7 +149,6 @@ static int st_nci_hci_dm_get_data(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -216,7 +214,7 @@ static int st_nci_hci_get_param(struct nfc_dev *dev, void *data,
r = nci_hci_get_param(ndev, param->gate, param->data, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_GET_PARAM, skb->len);
@@ -235,7 +233,6 @@ static int st_nci_hci_get_param(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -262,7 +259,7 @@ static int st_nci_hci_dm_vdc_measurement_value(struct nfc_dev *dev, void *data,
ST_NCI_HCI_DM_VDC_MEASUREMENT_VALUE,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_VDC_MEASUREMENT_VALUE, skb->len);
@@ -281,7 +278,6 @@ static int st_nci_hci_dm_vdc_measurement_value(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
@@ -299,7 +295,7 @@ static int st_nci_hci_dm_vdc_value_comparison(struct nfc_dev *dev, void *data,
ST_NCI_HCI_DM_VDC_VALUE_COMPARISON,
data, data_len, &skb);
if (r)
- goto exit;
+ return r;
msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI,
HCI_DM_VDC_VALUE_COMPARISON, skb->len);
@@ -318,7 +314,6 @@ static int st_nci_hci_dm_vdc_value_comparison(struct nfc_dev *dev, void *data,
free_skb:
kfree_skb(skb);
-exit:
return r;
}
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 23ed11f91213..7a9f4d71707e 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -502,9 +502,6 @@ static int st21nfca_hci_i2c_probe(struct i2c_client *client,
struct st21nfca_i2c_phy *phy;
int r;
- dev_dbg(&client->dev, "%s\n", __func__);
- dev_dbg(&client->dev, "IRQ: %d\n", client->irq);
-
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
nfc_err(&client->dev, "Need I2C_FUNC_I2C\n");
return -ENODEV;
@@ -568,8 +565,6 @@ static int st21nfca_hci_i2c_remove(struct i2c_client *client)
{
struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client);
- dev_dbg(&client->dev, "%s\n", __func__);
-
st21nfca_hci_remove(phy->hdev);
if (phy->powered)
@@ -584,13 +579,13 @@ static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table);
-static const struct acpi_device_id st21nfca_hci_i2c_acpi_match[] = {
+static const struct acpi_device_id st21nfca_hci_i2c_acpi_match[] __maybe_unused = {
{"SMO2100", 0},
{}
};
MODULE_DEVICE_TABLE(acpi, st21nfca_hci_i2c_acpi_match);
-static const struct of_device_id of_st21nfca_i2c_match[] = {
+static const struct of_device_id of_st21nfca_i2c_match[] __maybe_unused = {
{ .compatible = "st,st21nfca-i2c", },
{ .compatible = "st,st21nfca_i2c", },
{}
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index 457854765983..2dc788c363fd 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -926,10 +926,8 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
int len_data_to_tag = 0;
skb_resp = nfc_alloc_recv_skb(MAX_RESPONSE_BUFFER_SIZE, GFP_KERNEL);
- if (!skb_resp) {
- rc = -ENOMEM;
- goto error;
- }
+ if (!skb_resp)
+ return -ENOMEM;
switch (stcontext->current_rf_tech) {
case NFC_DIGITAL_RF_TECH_106A:
@@ -986,7 +984,6 @@ static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev,
free_skb_resp:
kfree_skb(skb_resp);
-error:
return rc;
}
@@ -1059,9 +1056,9 @@ static const struct spi_device_id st95hf_id[] = {
};
MODULE_DEVICE_TABLE(spi, st95hf_id);
-static const struct of_device_id st95hf_spi_of_match[] = {
- { .compatible = "st,st95hf" },
- { },
+static const struct of_device_id st95hf_spi_of_match[] __maybe_unused = {
+ { .compatible = "st,st95hf" },
+ {},
};
MODULE_DEVICE_TABLE(of, st95hf_spi_of_match);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 522c9b229f80..762125f2905f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2901,7 +2901,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ctrl->hmmaxd = le16_to_cpu(id->hmmaxd);
}
- ret = nvme_mpath_init(ctrl, id);
+ ret = nvme_mpath_init_identify(ctrl, id);
if (ret < 0)
goto out_free;
@@ -4364,6 +4364,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
min(default_ps_max_latency_us, (unsigned long)S32_MAX));
nvme_fault_inject_init(&ctrl->fault_inject, dev_name(ctrl->device));
+ nvme_mpath_init_ctrl(ctrl);
return 0;
out_free_name:
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d9ab9e7871d0..256e87721a01 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2461,6 +2461,18 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
static void
__nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
{
+ int q;
+
+ /*
+ * if aborting io, the queues are no longer good, mark them
+ * all as not live.
+ */
+ if (ctrl->ctrl.queue_count > 1) {
+ for (q = 1; q < ctrl->ctrl.queue_count; q++)
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags);
+ }
+ clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
/*
* If io queues are present, stop them and terminate all outstanding
* ios on them. As FC allocates FC exchange for each io, the
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 0551796517e6..f81871c7128a 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -781,9 +781,18 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
put_disk(head->disk);
}
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
{
- int error;
+ mutex_init(&ctrl->ana_lock);
+ timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
+ INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+}
+
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
+ size_t ana_log_size;
+ int error = 0;
/* check if multipath is enabled and we have the capability */
if (!multipath || !ctrl->subsys ||
@@ -795,37 +804,31 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
- mutex_init(&ctrl->ana_lock);
- timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
- ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
- ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
- ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
-
- if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
+ ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
+ ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
+ ctrl->max_namespaces * sizeof(__le32);
+ if (ana_log_size > max_transfer_size) {
dev_err(ctrl->device,
- "ANA log page size (%zd) larger than MDTS (%d).\n",
- ctrl->ana_log_size,
- ctrl->max_hw_sectors << SECTOR_SHIFT);
+ "ANA log page size (%zd) larger than MDTS (%zd).\n",
+ ana_log_size, max_transfer_size);
dev_err(ctrl->device, "disabling ANA support.\n");
- return 0;
+ goto out_uninit;
}
-
- INIT_WORK(&ctrl->ana_work, nvme_ana_work);
- kfree(ctrl->ana_log_buf);
- ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
- if (!ctrl->ana_log_buf) {
- error = -ENOMEM;
- goto out;
+ if (ana_log_size > ctrl->ana_log_size) {
+ nvme_mpath_stop(ctrl);
+ kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
+ if (!ctrl->ana_log_buf)
+ return -ENOMEM;
}
-
+ ctrl->ana_log_size = ana_log_size;
error = nvme_read_ana_log(ctrl);
if (error)
- goto out_free_ana_log_buf;
+ goto out_uninit;
return 0;
-out_free_ana_log_buf:
- kfree(ctrl->ana_log_buf);
- ctrl->ana_log_buf = NULL;
-out:
+
+out_uninit:
+ nvme_mpath_uninit(ctrl);
return error;
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 05f31a2c64bb..0015860ec12b 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -712,7 +712,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
void nvme_mpath_remove_disk(struct nvme_ns_head *head);
-int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
+void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
void nvme_mpath_stop(struct nvme_ctrl *ctrl);
bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
@@ -780,7 +781,10 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline void nvme_trace_bio_complete(struct request *req)
{
}
-static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
+static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
+{
+}
+static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 0222e23f5936..34f4b3402f7c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -943,7 +943,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
if (ret <= 0)
return ret;
- nvme_tcp_advance_req(req, ret);
if (queue->data_digest)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
@@ -960,6 +959,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
}
return 1;
}
+ nvme_tcp_advance_req(req, ret);
}
return -EAGAIN;
}
@@ -1140,7 +1140,8 @@ static void nvme_tcp_io_work(struct work_struct *w)
pending = true;
else if (unlikely(result < 0))
break;
- }
+ } else
+ pending = !llist_empty(&queue->req_list);
result = nvme_tcp_try_recv(queue);
if (result > 0)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index e7a367cf6d36..dcd49a72f2f3 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -975,10 +975,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
case nvme_admin_keep_alive:
req->execute = nvmet_execute_keep_alive;
return 0;
+ default:
+ return nvmet_report_invalid_opcode(req);
}
-
- pr_debug("unhandled cmd %d on qid %d\n", cmd->common.opcode,
- req->sq->qid);
- req->error_loc = offsetof(struct nvme_common_command, opcode);
- return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 25cc2ee8de3f..1853db38b682 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1372,7 +1372,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_free_changed_ns_list;
if (subsys->cntlid_min > subsys->cntlid_max)
- goto out_free_changed_ns_list;
+ goto out_free_sqs;
ret = ida_simple_get(&cntlid_ida,
subsys->cntlid_min, subsys->cntlid_max,
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 4845d12e374a..fc3645fc2c24 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -379,7 +379,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
req->execute = nvmet_execute_disc_identify;
return 0;
default:
- pr_err("unhandled cmd %d\n", cmd->common.opcode);
+ pr_debug("unhandled cmd %d\n", cmd->common.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 1420a8e3e0b1..7d0f3523fdab 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -94,7 +94,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
req->execute = nvmet_execute_prop_get;
break;
default:
- pr_err("received unknown capsule type 0x%x\n",
+ pr_debug("received unknown capsule type 0x%x\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
@@ -284,13 +284,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
struct nvme_command *cmd = req->cmd;
if (!nvme_is_fabrics(cmd)) {
- pr_err("invalid command 0x%x on unconnected queue.\n",
+ pr_debug("invalid command 0x%x on unconnected queue.\n",
cmd->fabrics.opcode);
req->error_loc = offsetof(struct nvme_common_command, opcode);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
}
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- pr_err("invalid capsule type 0x%x on unconnected queue.\n",
+ pr_debug("invalid capsule type 0x%x on unconnected queue.\n",
cmd->fabrics.fctype);
req->error_loc = offsetof(struct nvmf_common_command, fctype);
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 9a8b3726a37c..429263ca9b97 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -258,7 +258,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
sector = nvmet_lba_to_sect(req->ns, req->cmd->rw.slba);
- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (nvmet_use_inline_bvec(req)) {
bio = &req->b.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 715d4376c997..7fdbdc496597 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -49,9 +49,11 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
ns->file = filp_open(ns->device_path, flags, 0);
if (IS_ERR(ns->file)) {
- pr_err("failed to open file %s: (%ld)\n",
- ns->device_path, PTR_ERR(ns->file));
- return PTR_ERR(ns->file);
+ ret = PTR_ERR(ns->file);
+ pr_err("failed to open file %s: (%d)\n",
+ ns->device_path, ret);
+ ns->file = NULL;
+ return ret;
}
ret = nvmet_file_ns_revalidate(ns);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 74b3b150e1a5..cb30cb942e1d 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -590,8 +590,10 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
0 /* no quirks, we're perfect! */);
- if (ret)
+ if (ret) {
+ kfree(ctrl);
goto out;
+ }
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
WARN_ON_ONCE(1);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 5566ed403576..d69a409515d6 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -616,4 +616,10 @@ static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
}
+static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
+{
+ return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
+ req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
+}
+
#endif /* _NVMET_H */
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 2798944899b7..39b1473f7204 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -194,7 +194,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
if (req->sg_cnt > BIO_MAX_VECS)
return -EINVAL;
- if (req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN) {
+ if (nvmet_use_inline_bvec(req)) {
bio = &req->p.inline_bio;
bio_init(bio, req->inline_bvec, ARRAY_SIZE(req->inline_bvec));
} else {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 6c1f3ab7649c..7d607f435e36 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -700,7 +700,7 @@ static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
nvmet_rdma_release_rsp(rsp);
@@ -786,7 +786,7 @@ static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_rsp *rsp =
container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe);
- struct nvmet_rdma_queue *queue = cq->cq_context;
+ struct nvmet_rdma_queue *queue = wc->qp->qp_context;
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u16 status;
diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
index bbc4e71a16ff..38800e86ed8a 100644
--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
+++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
@@ -294,6 +294,9 @@ mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
return NULL;
+ /* Make sure 'avail->idx' is visible already. */
+ virtio_rmb(false);
+
idx = vring->next_avail % vr->num;
head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
if (WARN_ON(head >= vr->num))
@@ -322,7 +325,7 @@ static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
* done or not. Add a memory barrier here to make sure the update above
* completes before updating the idx.
*/
- mb();
+ virtio_mb(false);
vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
}
@@ -733,6 +736,12 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
desc = NULL;
fifo->vring[is_rx] = NULL;
+ /*
+ * Make sure the load/store are in order before
+ * returning back to virtio.
+ */
+ virtio_mb(false);
+
/* Notify upper layer that packet is done. */
spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
vring_interrupt(0, vring->vq);
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index 69e86cd599d3..8a70df60142c 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -2483,8 +2483,7 @@ int ssam_irq_setup(struct ssam_controller *ctrl)
* interrupt, and let the SAM resume callback during the controller
* resume process clear it.
*/
- const int irqf = IRQF_SHARED | IRQF_ONESHOT |
- IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
+ const int irqf = IRQF_ONESHOT | IRQF_TRIGGER_RISING | IRQF_NO_AUTOEN;
gpiod = gpiod_get(dev, "ssam_wakeup-int", GPIOD_ASIS);
if (IS_ERR(gpiod))
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index 63ce587e79e3..5d9b758a99bb 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -527,20 +527,14 @@ static __poll_t surface_dtx_poll(struct file *file, struct poll_table_struct *pt
struct sdtx_client *client = file->private_data;
__poll_t events = 0;
- if (down_read_killable(&client->ddev->lock))
- return -ERESTARTSYS;
-
- if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags)) {
- up_read(&client->ddev->lock);
+ if (test_bit(SDTX_DEVICE_SHUTDOWN_BIT, &client->ddev->flags))
return EPOLLHUP | EPOLLERR;
- }
poll_wait(file, &client->ddev->waitq, pt);
if (!kfifo_is_empty(&client->buffer))
events |= EPOLLIN | EPOLLRDNORM;
- up_read(&client->ddev->lock);
return events;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 2714f7c3843e..60592fb88e7a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -711,7 +711,7 @@ config INTEL_HID_EVENT
config INTEL_INT0002_VGPIO
tristate "Intel ACPI INT0002 Virtual GPIO driver"
- depends on GPIOLIB && ACPI
+ depends on GPIOLIB && ACPI && PM_SLEEP
select GPIOLIB_IRQCHIP
help
Some peripherals on Bay Trail and Cherry Trail platforms signal a
diff --git a/drivers/platform/x86/dell/dell-smbios-wmi.c b/drivers/platform/x86/dell/dell-smbios-wmi.c
index a1753485159c..33f823772733 100644
--- a/drivers/platform/x86/dell/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell/dell-smbios-wmi.c
@@ -270,7 +270,8 @@ int init_dell_smbios_wmi(void)
void exit_dell_smbios_wmi(void)
{
- wmi_driver_unregister(&dell_smbios_wmi_driver);
+ if (wmi_supported)
+ wmi_driver_unregister(&dell_smbios_wmi_driver);
}
MODULE_DEVICE_TABLE(wmi, dell_smbios_wmi_id_table);
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 13d57434e60f..5529d7b0abea 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -133,31 +133,21 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev)
return r;
}
+#define DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME(name) \
+ { .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }}
+
static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550 GAMING X V2"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M AORUS PRO-P"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "B550M DS3H"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "Z390 I AORUS PRO WIFI-CF"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 AORUS ELITE"),
- }},
- { .matches = {
- DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "X570 I AORUS PRO WIFI"),
- }},
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"),
{ }
};
diff --git a/drivers/platform/x86/hp-wireless.c b/drivers/platform/x86/hp-wireless.c
index 12c31fd5d5ae..0753ef18e721 100644
--- a/drivers/platform/x86/hp-wireless.c
+++ b/drivers/platform/x86/hp-wireless.c
@@ -17,12 +17,14 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Hung");
MODULE_ALIAS("acpi*:HPQ6001:*");
MODULE_ALIAS("acpi*:WSTADEF:*");
+MODULE_ALIAS("acpi*:AMDI0051:*");
static struct input_dev *hpwl_input_dev;
static const struct acpi_device_id hpwl_ids[] = {
{"HPQ6001", 0},
{"WSTADEF", 0},
+ {"AMDI0051", 0},
{"", 0},
};
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 799cbe2ffcf3..8c0867bda828 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -88,6 +88,9 @@ MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
static int lis3lv02d_acpi_init(struct lis3lv02d *lis3)
{
struct acpi_device *dev = lis3->bus_priv;
+ if (!lis3->init_required)
+ return 0;
+
if (acpi_evaluate_object(dev->handle, METHOD_NAME__INI,
NULL, NULL) != AE_OK)
return -EINVAL;
@@ -356,6 +359,7 @@ static int lis3lv02d_add(struct acpi_device *device)
}
/* call the core layer do its init */
+ lis3_dev.init_required = true;
ret = lis3lv02d_init_device(&lis3_dev);
if (ret)
return ret;
@@ -403,11 +407,27 @@ static int lis3lv02d_suspend(struct device *dev)
static int lis3lv02d_resume(struct device *dev)
{
+ lis3_dev.init_required = false;
+ lis3lv02d_poweron(&lis3_dev);
+ return 0;
+}
+
+static int lis3lv02d_restore(struct device *dev)
+{
+ lis3_dev.init_required = true;
lis3lv02d_poweron(&lis3_dev);
return 0;
}
-static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume);
+static const struct dev_pm_ops hp_accel_pm = {
+ .suspend = lis3lv02d_suspend,
+ .resume = lis3lv02d_resume,
+ .freeze = lis3lv02d_suspend,
+ .thaw = lis3lv02d_resume,
+ .poweroff = lis3lv02d_suspend,
+ .restore = lis3lv02d_restore,
+};
+
#define HP_ACCEL_PM (&hp_accel_pm)
#else
#define HP_ACCEL_PM NULL
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 6cb5ad4be231..387817290921 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -57,8 +57,8 @@ enum {
};
enum {
- SMBC_CONSERVATION_ON = 3,
- SMBC_CONSERVATION_OFF = 5,
+ SBMC_CONSERVATION_ON = 3,
+ SBMC_CONSERVATION_OFF = 5,
};
enum {
@@ -182,9 +182,9 @@ static int eval_gbmd(acpi_handle handle, unsigned long *res)
return eval_int(handle, "GBMD", res);
}
-static int exec_smbc(acpi_handle handle, unsigned long arg)
+static int exec_sbmc(acpi_handle handle, unsigned long arg)
{
- return exec_simple_method(handle, "SMBC", arg);
+ return exec_simple_method(handle, "SBMC", arg);
}
static int eval_hals(acpi_handle handle, unsigned long *res)
@@ -477,7 +477,7 @@ static ssize_t conservation_mode_store(struct device *dev,
if (err)
return err;
- err = exec_smbc(priv->adev->handle, state ? SMBC_CONSERVATION_ON : SMBC_CONSERVATION_OFF);
+ err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
if (err)
return err;
@@ -809,6 +809,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
{
struct ideapad_dytc_priv *dytc = container_of(pprof, struct ideapad_dytc_priv, pprof);
struct ideapad_private *priv = dytc->priv;
+ unsigned long output;
int err;
err = mutex_lock_interruptible(&dytc->mutex);
@@ -829,7 +830,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
/* Determine if we are in CQL mode. This alters the commands we do */
err = dytc_cql_command(priv, DYTC_SET_COMMAND(DYTC_FUNCTION_MMC, perfmode, 1),
- NULL);
+ &output);
if (err)
goto unlock;
}
diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
index 289c6655d425..569342aa8926 100644
--- a/drivers/platform/x86/intel_int0002_vgpio.c
+++ b/drivers/platform/x86/intel_int0002_vgpio.c
@@ -51,6 +51,12 @@
#define GPE0A_STS_PORT 0x420
#define GPE0A_EN_PORT 0x428
+struct int0002_data {
+ struct gpio_chip chip;
+ int parent_irq;
+ int wake_enable_count;
+};
+
/*
* As this is not a real GPIO at all, but just a hack to model an event in
* ACPI the get / set functions are dummy functions.
@@ -98,14 +104,16 @@ static void int0002_irq_mask(struct irq_data *data)
static int int0002_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct platform_device *pdev = to_platform_device(chip->parent);
- int irq = platform_get_irq(pdev, 0);
+ struct int0002_data *int0002 = container_of(chip, struct int0002_data, chip);
- /* Propagate to parent irq */
+ /*
+ * Applying of the wakeup flag to our parent IRQ is delayed till system
+ * suspend, because we only want to do this when using s2idle.
+ */
if (on)
- enable_irq_wake(irq);
+ int0002->wake_enable_count++;
else
- disable_irq_wake(irq);
+ int0002->wake_enable_count--;
return 0;
}
@@ -135,7 +143,7 @@ static bool int0002_check_wake(void *data)
return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
}
-static struct irq_chip int0002_byt_irqchip = {
+static struct irq_chip int0002_irqchip = {
.name = DRV_NAME,
.irq_ack = int0002_irq_ack,
.irq_mask = int0002_irq_mask,
@@ -143,21 +151,9 @@ static struct irq_chip int0002_byt_irqchip = {
.irq_set_wake = int0002_irq_set_wake,
};
-static struct irq_chip int0002_cht_irqchip = {
- .name = DRV_NAME,
- .irq_ack = int0002_irq_ack,
- .irq_mask = int0002_irq_mask,
- .irq_unmask = int0002_irq_unmask,
- /*
- * No set_wake, on CHT the IRQ is typically shared with the ACPI SCI
- * and we don't want to mess with the ACPI SCI irq settings.
- */
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
static const struct x86_cpu_id int0002_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &int0002_byt_irqchip),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &int0002_cht_irqchip),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
{}
};
@@ -172,8 +168,9 @@ static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct x86_cpu_id *cpu_id;
- struct gpio_chip *chip;
+ struct int0002_data *int0002;
struct gpio_irq_chip *girq;
+ struct gpio_chip *chip;
int irq, ret;
/* Menlow has a different INT0002 device? <sigh> */
@@ -185,10 +182,13 @@ static int int0002_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
+ int0002 = devm_kzalloc(dev, sizeof(*int0002), GFP_KERNEL);
+ if (!int0002)
return -ENOMEM;
+ int0002->parent_irq = irq;
+
+ chip = &int0002->chip;
chip->label = DRV_NAME;
chip->parent = dev;
chip->owner = THIS_MODULE;
@@ -214,7 +214,7 @@ static int int0002_probe(struct platform_device *pdev)
}
girq = &chip->irq;
- girq->chip = (struct irq_chip *)cpu_id->driver_data;
+ girq->chip = &int0002_irqchip;
/* This let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
@@ -230,6 +230,7 @@ static int int0002_probe(struct platform_device *pdev)
acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
device_init_wakeup(dev, true);
+ dev_set_drvdata(dev, int0002);
return 0;
}
@@ -240,6 +241,36 @@ static int int0002_remove(struct platform_device *pdev)
return 0;
}
+static int int0002_suspend(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ /*
+ * The INT0002 parent IRQ is often shared with the ACPI GPE IRQ, don't
+ * muck with it when firmware based suspend is used, otherwise we may
+ * cause spurious wakeups from firmware managed suspend.
+ */
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ enable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static int int0002_resume(struct device *dev)
+{
+ struct int0002_data *int0002 = dev_get_drvdata(dev);
+
+ if (!pm_suspend_via_firmware() && int0002->wake_enable_count)
+ disable_irq_wake(int0002->parent_irq);
+
+ return 0;
+}
+
+static const struct dev_pm_ops int0002_pm_ops = {
+ .suspend = int0002_suspend,
+ .resume = int0002_resume,
+};
+
static const struct acpi_device_id int0002_acpi_ids[] = {
{ "INT0002", 0 },
{ },
@@ -250,6 +281,7 @@ static struct platform_driver int0002_driver = {
.driver = {
.name = DRV_NAME,
.acpi_match_table = int0002_acpi_ids,
+ .pm = &int0002_pm_ops,
},
.probe = int0002_probe,
.remove = int0002_remove,
diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
index 05cced59e251..f58b8543f6ac 100644
--- a/drivers/platform/x86/intel_punit_ipc.c
+++ b/drivers/platform/x86/intel_punit_ipc.c
@@ -312,6 +312,7 @@ static const struct acpi_device_id punit_ipc_acpi_ids[] = {
{ "INT34D4", 0 },
{ }
};
+MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
static struct platform_driver intel_punit_ipc_driver = {
.probe = intel_punit_ipc_probe,
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 90fe4f8f3c2c..bde740d6120e 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -115,6 +115,32 @@ static const struct ts_dmi_data chuwi_hi10_plus_data = {
.properties = chuwi_hi10_plus_props,
};
+static const struct property_entry chuwi_hi10_pro_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 8),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1912),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1272),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10-pro.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hi10_pro_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10-pro.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 42504,
+ .sha256 = { 0xdb, 0x92, 0x68, 0xa8, 0xdb, 0x81, 0x31, 0x00,
+ 0x1f, 0x58, 0x89, 0xdb, 0x19, 0x1b, 0x15, 0x8c,
+ 0x05, 0x14, 0xf4, 0x95, 0xba, 0x15, 0x45, 0x98,
+ 0x42, 0xa3, 0xbb, 0x65, 0xe3, 0x30, 0xa5, 0x93 },
+ },
+ .acpi_name = "MSSL1680:00",
+ .properties = chuwi_hi10_pro_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -916,6 +942,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Hi10 Prus (CWI597) */
+ .driver_data = (void *)&chuwi_hi10_pro_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
@@ -1097,6 +1132,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Mediacom WinPad 7.0 W700 (same hw as Wintron surftab 7") */
+ .driver_data = (void *)&trekstor_surftab_wintron70_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "WinPad 7 W10 - WPW700"),
+ },
+ },
+ {
/* Mediacom Flexbook Edge 11 (same hw as TS Primebook C11) */
.driver_data = (void *)&trekstor_primebook_c11_data,
.matches = {
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index 530e5f90095e..0d1034e3ed0f 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -324,7 +324,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!bp->base) {
dev_err(&pdev->dev, "io_remap bar0\n");
err = -ENOMEM;
- goto out;
+ goto out_release_regions;
}
bp->reg = bp->base + OCP_REGISTER_OFFSET;
bp->tod = bp->base + TOD_REGISTER_OFFSET;
@@ -347,6 +347,8 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out:
+ pci_iounmap(pdev, bp->base);
+out_release_regions:
pci_release_regions(pdev);
out_disable:
pci_disable_device(pdev);
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 50ec53d67a4c..db4c265287ae 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -2127,6 +2127,14 @@ static int riocm_add_mport(struct device *dev,
return -ENODEV;
}
+ cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
+ if (!cm->rx_wq) {
+ rio_release_inb_mbox(mport, cmbox);
+ rio_release_outb_mbox(mport, cmbox);
+ kfree(cm);
+ return -ENOMEM;
+ }
+
/*
* Allocate and register inbound messaging buffers to be ready
* to receive channel and system management requests
@@ -2137,15 +2145,6 @@ static int riocm_add_mport(struct device *dev,
cm->rx_slots = RIOCM_RX_RING_SIZE;
mutex_init(&cm->rx_lock);
riocm_rx_fill(cm, RIOCM_RX_RING_SIZE);
- cm->rx_wq = create_workqueue(DRV_NAME "/rxq");
- if (!cm->rx_wq) {
- riocm_error("failed to allocate IBMBOX_%d on %s",
- cmbox, mport->name);
- rio_release_outb_mbox(mport, cmbox);
- kfree(cm);
- return -ENOMEM;
- }
-
INIT_WORK(&cm->rx_work, rio_ibmsg_handler);
cm->tx_slot = 0;
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 260860cf3aa1..5a0c2f07a3a2 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -118,24 +118,6 @@ static struct device_driver netiucv_driver = {
.bus = &iucv_bus,
};
-static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
-static void netiucv_callback_connack(struct iucv_path *, u8 *);
-static void netiucv_callback_connrej(struct iucv_path *, u8 *);
-static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
-static void netiucv_callback_connres(struct iucv_path *, u8 *);
-static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
-static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
-
-static struct iucv_handler netiucv_handler = {
- .path_pending = netiucv_callback_connreq,
- .path_complete = netiucv_callback_connack,
- .path_severed = netiucv_callback_connrej,
- .path_quiesced = netiucv_callback_connsusp,
- .path_resumed = netiucv_callback_connres,
- .message_pending = netiucv_callback_rx,
- .message_complete = netiucv_callback_txdone
-};
-
/**
* Per connection profiling data
*/
@@ -774,6 +756,16 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
}
}
+static struct iucv_handler netiucv_handler = {
+ .path_pending = netiucv_callback_connreq,
+ .path_complete = netiucv_callback_connack,
+ .path_severed = netiucv_callback_connrej,
+ .path_quiesced = netiucv_callback_connsusp,
+ .path_resumed = netiucv_callback_connres,
+ .message_pending = netiucv_callback_rx,
+ .message_complete = netiucv_callback_txdone,
+};
+
static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
{
struct iucv_event *ev = arg;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index fd9b869d278e..f4d554ea0c93 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -417,13 +417,17 @@ enum qeth_qdio_out_buffer_state {
QETH_QDIO_BUF_EMPTY,
/* Filled by driver; owned by hardware in order to be sent. */
QETH_QDIO_BUF_PRIMED,
- /* Discovered by the TX completion code: */
- QETH_QDIO_BUF_PENDING,
- /* Finished by the TX completion code: */
- QETH_QDIO_BUF_NEED_QAOB,
- /* Received QAOB notification on CQ: */
- QETH_QDIO_BUF_QAOB_OK,
- QETH_QDIO_BUF_QAOB_ERROR,
+};
+
+enum qeth_qaob_state {
+ QETH_QAOB_ISSUED,
+ QETH_QAOB_PENDING,
+ QETH_QAOB_DONE,
+};
+
+struct qeth_qaob_priv1 {
+ unsigned int state;
+ u8 queue_no;
};
struct qeth_qdio_out_buffer {
@@ -433,9 +437,8 @@ struct qeth_qdio_out_buffer {
unsigned int frames;
unsigned int bytes;
struct sk_buff_head skb_list;
- int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
+ DECLARE_BITMAP(from_kmem_cache, QDIO_MAX_ELEMENTS_PER_BUFFER);
- struct qeth_qdio_out_q *q;
struct list_head list_entry;
struct qaob *aob;
};
@@ -483,6 +486,7 @@ struct qeth_out_q_stats {
u64 stopped;
u64 doorbell;
u64 coal_frames;
+ u64 completion_irq;
u64 completion_yield;
u64 completion_timer;
@@ -526,6 +530,7 @@ struct qeth_qdio_out_q {
unsigned int coalesce_usecs;
unsigned int max_coalesced_frames;
+ unsigned int rescan_usecs;
};
#define qeth_for_each_output_queue(card, q, i) \
@@ -612,7 +617,6 @@ struct qeth_channel {
struct ccw_device *ccwdev;
struct qeth_cmd_buffer *active_cmd;
enum qeth_channel_states state;
- atomic_t irq_pending;
};
struct qeth_reply {
@@ -662,11 +666,6 @@ static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob)
return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8));
}
-static inline bool qeth_trylock_channel(struct qeth_channel *channel)
-{
- return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0;
-}
-
/**
* OSA card related definitions
*/
@@ -886,13 +885,24 @@ static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
return card->state == CARD_STATE_SOFTSETUP;
}
+static inline bool qeth_use_tx_irqs(struct qeth_card *card)
+{
+ return !IS_IQD(card);
+}
+
static inline void qeth_unlock_channel(struct qeth_card *card,
struct qeth_channel *channel)
{
- atomic_set(&channel->irq_pending, 0);
+ xchg(&channel->active_cmd, NULL);
wake_up(&card->wait_q);
}
+static inline bool qeth_trylock_channel(struct qeth_channel *channel,
+ struct qeth_cmd_buffer *cmd)
+{
+ return cmpxchg(&channel->active_cmd, NULL, cmd) == NULL;
+}
+
struct qeth_trap_id {
__u16 lparnr;
char vmname[8];
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a1f08e9aa064..62f88ccbd03f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -70,9 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
unsigned int data_length);
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
-static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf,
- enum iucv_tx_notify notification);
static void qeth_close_dev_handler(struct work_struct *work)
{
@@ -434,65 +431,6 @@ static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
return n;
}
-static void qeth_qdio_handle_aob(struct qeth_card *card,
- unsigned long phys_aob_addr)
-{
- enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
- struct qaob *aob;
- struct qeth_qdio_out_buffer *buffer;
- enum iucv_tx_notify notification;
- struct qeth_qdio_out_q *queue;
- unsigned int i;
-
- aob = (struct qaob *) phys_to_virt(phys_aob_addr);
- QETH_CARD_TEXT(card, 5, "haob");
- QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
- buffer = (struct qeth_qdio_out_buffer *) aob->user1;
- QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
-
- if (aob->aorc) {
- QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
- new_state = QETH_QDIO_BUF_QAOB_ERROR;
- }
-
- switch (atomic_xchg(&buffer->state, new_state)) {
- case QETH_QDIO_BUF_PRIMED:
- /* Faster than TX completion code, let it handle the async
- * completion for us. It will also recycle the QAOB.
- */
- break;
- case QETH_QDIO_BUF_PENDING:
- /* TX completion code is active and will handle the async
- * completion for us. It will also recycle the QAOB.
- */
- break;
- case QETH_QDIO_BUF_NEED_QAOB:
- /* TX completion code is already finished. */
- notification = qeth_compute_cq_notification(aob->aorc, 1);
- qeth_notify_skbs(buffer->q, buffer, notification);
-
- /* Free dangling allocations. The attached skbs are handled by
- * qeth_tx_complete_pending_bufs(), and so is the QAOB.
- */
- for (i = 0;
- i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
- i++) {
- void *data = phys_to_virt(aob->sba[i]);
-
- if (data && buffer->is_header[i])
- kmem_cache_free(qeth_core_header_cache, data);
- buffer->is_header[i] = 0;
- }
-
- queue = buffer->q;
- atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
- napi_schedule(&queue->napi);
- break;
- default:
- WARN_ON_ONCE(1);
- }
-}
-
static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
void *data)
{
@@ -1268,7 +1206,6 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
}
- channel->active_cmd = NULL;
qeth_unlock_channel(card, channel);
rc = qeth_check_irb_error(card, cdev, irb);
@@ -1353,10 +1290,10 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
}
}
-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
+static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf, bool error,
int budget)
{
- struct qeth_qdio_out_q *queue = buf->q;
struct sk_buff *skb;
/* Empty buffer? */
@@ -1400,17 +1337,18 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
int i;
/* is PCI flag set on buffer? */
- if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
atomic_dec(&queue->set_pci_flags_count);
+ QETH_TXQ_STAT_INC(queue, completion_irq);
+ }
- qeth_tx_complete_buf(buf, error, budget);
+ qeth_tx_complete_buf(queue, buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
void *data = phys_to_virt(buf->buffer->element[i].addr);
- if (data && buf->is_header[i])
+ if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache, data);
- buf->is_header[i] = 0;
}
qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
@@ -1434,14 +1372,30 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
struct qeth_qdio_out_buffer *buf, *tmp;
list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
- if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) {
+ struct qeth_qaob_priv1 *priv;
+ struct qaob *aob = buf->aob;
+ enum iucv_tx_notify notify;
+ unsigned int i;
+
+ priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
QETH_CARD_TEXT(card, 5, "fp");
QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
- if (drain)
- qeth_notify_skbs(queue, buf,
- TX_NOTIFY_GENERALERROR);
- qeth_tx_complete_buf(buf, drain, budget);
+ notify = drain ? TX_NOTIFY_GENERALERROR :
+ qeth_compute_cq_notification(aob->aorc, 1);
+ qeth_notify_skbs(queue, buf, notify);
+ qeth_tx_complete_buf(queue, buf, drain, budget);
+
+ for (i = 0;
+ i < aob->sb_count && i < queue->max_elements;
+ i++) {
+ void *data = phys_to_virt(aob->sba[i]);
+
+ if (test_bit(i, buf->from_kmem_cache) && data)
+ kmem_cache_free(qeth_core_header_cache,
+ data);
+ }
list_del(&buf->list_entry);
qeth_free_out_buf(buf);
@@ -1713,11 +1667,10 @@ static int qeth_stop_channel(struct qeth_channel *channel)
rc = ccw_device_set_offline(cdev);
spin_lock_irq(get_ccwdev_lock(cdev));
- if (channel->active_cmd) {
+ if (channel->active_cmd)
dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
channel->active_cmd);
- channel->active_cmd = NULL;
- }
+
cdev->handler = NULL;
spin_unlock_irq(get_ccwdev_lock(cdev));
@@ -1730,7 +1683,7 @@ static int qeth_start_channel(struct qeth_channel *channel)
int rc;
channel->state = CH_STATE_DOWN;
- atomic_set(&channel->irq_pending, 0);
+ xchg(&channel->active_cmd, NULL);
spin_lock_irq(get_ccwdev_lock(cdev));
cdev->handler = qeth_irq;
@@ -2037,7 +1990,7 @@ static int qeth_send_control_data(struct qeth_card *card,
reply->param = reply_param;
timeout = wait_event_interruptible_timeout(card->wait_q,
- qeth_trylock_channel(channel),
+ qeth_trylock_channel(channel, iob),
timeout);
if (timeout <= 0) {
qeth_put_cmd(iob);
@@ -2057,8 +2010,6 @@ static int qeth_send_control_data(struct qeth_card *card,
spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
(addr_t) iob, 0, 0, timeout);
- if (!rc)
- channel->active_cmd = iob;
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
@@ -2578,7 +2529,6 @@ static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
newbuf->buffer = q->qdio_bufs[bidx];
skb_queue_head_init(&newbuf->skb_list);
lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
- newbuf->q = q;
atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
q->bufs[bidx] = newbuf;
return 0;
@@ -2663,8 +2613,15 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
INIT_LIST_HEAD(&queue->pending_bufs);
spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
- queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
- queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ if (IS_IQD(card)) {
+ queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
+ queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ queue->rescan_usecs = QETH_TX_TIMER_USECS;
+ } else {
+ queue->coalesce_usecs = USEC_PER_SEC;
+ queue->max_coalesced_frames = 0;
+ queue->rescan_usecs = 10 * USEC_PER_SEC;
+ }
queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
}
@@ -3601,8 +3558,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
int count)
{
struct qeth_qdio_out_buffer *buf = queue->bufs[index];
- unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
struct qeth_card *card = queue->card;
+ unsigned int frames, usecs;
struct qaob *aob = NULL;
int rc;
int i;
@@ -3629,8 +3586,12 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (!buf->aob)
buf->aob = qdio_allocate_aob();
if (buf->aob) {
+ struct qeth_qaob_priv1 *priv;
+
aob = buf->aob;
- aob->user1 = (u64) buf;
+ priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ priv->state = QETH_QAOB_ISSUED;
+ priv->queue_no = queue->queue_no;
}
}
} else {
@@ -3658,14 +3619,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
}
}
-
- if (atomic_read(&queue->set_pci_flags_count))
- qdio_flags |= QDIO_FLAG_PCI_OUT;
}
QETH_TXQ_STAT_INC(queue, doorbell);
- rc = do_QDIO(CARD_DDEV(card), qdio_flags, queue->queue_no, index, count,
- aob);
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_OUTPUT, queue->queue_no,
+ index, count, aob);
switch (rc) {
case 0:
@@ -3673,17 +3631,20 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
/* ignore temporary SIGA errors without busy condition */
/* Fake the TX completion interrupt: */
- if (IS_IQD(card)) {
- unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
- unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
+ frames = READ_ONCE(queue->max_coalesced_frames);
+ usecs = READ_ONCE(queue->coalesce_usecs);
- if (frames && queue->coalesced_frames >= frames) {
- napi_schedule(&queue->napi);
- queue->coalesced_frames = 0;
- QETH_TXQ_STAT_INC(queue, coal_frames);
- } else if (usecs) {
- qeth_tx_arm_timer(queue, usecs);
- }
+ if (frames && queue->coalesced_frames >= frames) {
+ napi_schedule(&queue->napi);
+ queue->coalesced_frames = 0;
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (qeth_use_tx_irqs(card) &&
+ atomic_read(&queue->used_buffers) >= 32) {
+ /* Old behaviour carried over from the qdio layer: */
+ napi_schedule(&queue->napi);
+ QETH_TXQ_STAT_INC(queue, coal_frames);
+ } else if (usecs) {
+ qeth_tx_arm_timer(queue, usecs);
}
break;
@@ -3769,6 +3730,18 @@ out:
}
EXPORT_SYMBOL_GPL(qeth_configure_cq);
+static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
+{
+ struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ unsigned int queue_no = priv->queue_no;
+
+ BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
+
+ if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
+ queue_no < card->qdio.no_out_queues)
+ napi_schedule(&card->qdio.out_qs[queue_no]->napi);
+}
+
static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
unsigned int queue, int first_element,
int count)
@@ -3795,7 +3768,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
buffer->element[e].addr) {
unsigned long phys_aob_addr = buffer->element[e].addr;
- qeth_qdio_handle_aob(card, phys_aob_addr);
+ qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
++e;
}
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
@@ -3831,36 +3804,14 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
unsigned long card_ptr)
{
struct qeth_card *card = (struct qeth_card *) card_ptr;
- struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
struct net_device *dev = card->dev;
- struct netdev_queue *txq;
- int i;
QETH_CARD_TEXT(card, 6, "qdouhdl");
if (qdio_error & QDIO_ERROR_FATAL) {
QETH_CARD_TEXT(card, 2, "achkcond");
netif_tx_stop_all_queues(dev);
qeth_schedule_recovery(card);
- return;
}
-
- for (i = first_element; i < (first_element + count); ++i) {
- struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
-
- qeth_handle_send_error(card, buf, qdio_error);
- qeth_clear_output_buffer(queue, buf, qdio_error, 0);
- }
-
- atomic_sub(count, &queue->used_buffers);
- qeth_check_outbound_queue(queue);
-
- txq = netdev_get_tx_queue(dev, __queue);
- /* xmit may have observed the full-condition, but not yet stopped the
- * txq. In which case the code below won't trigger. So before returning,
- * xmit will re-check the txq's fill level and wake it up if needed.
- */
- if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
- netif_tx_wake_queue(txq);
}
/**
@@ -4101,7 +4052,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
/* HW header is allocated from cache: */
if ((void *)hdr != skb->data)
- buf->is_header[element] = 1;
+ __set_bit(element, buf->from_kmem_cache);
/* HW header was pushed and is contiguous with linear part: */
else if (length > 0 && !PAGE_ALIGNED(data) &&
(data == (char *)hdr + hd_len))
@@ -5256,7 +5207,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = in_sbal_ptrs;
init_data.output_sbal_addr_array = out_sbal_ptrs;
- init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
@@ -5956,9 +5906,10 @@ static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
/* Fetch completed RX buffers: */
if (!card->rx.b_count) {
card->rx.qdio_err = 0;
- card->rx.b_count = qdio_get_next_buffers(
- card->data.ccwdev, 0, &card->rx.b_index,
- &card->rx.qdio_err);
+ card->rx.b_count = qdio_inspect_queue(CARD_DDEV(card),
+ 0, true,
+ &card->rx.b_index,
+ &card->rx.qdio_err);
if (card->rx.b_count <= 0) {
card->rx.b_count = 0;
break;
@@ -6022,6 +5973,16 @@ int qeth_poll(struct napi_struct *napi, int budget)
work_done = qeth_rx_poll(card, budget);
+ if (qeth_use_tx_irqs(card)) {
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
+
+ qeth_for_each_output_queue(card, queue, i) {
+ if (!qeth_out_queue_is_empty(queue))
+ napi_schedule(&queue->napi);
+ }
+ }
+
if (card->options.cq == QETH_CQ_ENABLED)
qeth_cq_poll(card);
@@ -6055,6 +6016,8 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
struct qaob *aob = buffer->aob;
+ struct qeth_qaob_priv1 *priv;
+ enum iucv_tx_notify notify;
if (!aob) {
netdev_WARN_ONCE(card->dev,
@@ -6066,60 +6029,27 @@ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
- switch (atomic_cmpxchg(&buffer->state,
- QETH_QDIO_BUF_PRIMED,
- QETH_QDIO_BUF_PENDING)) {
- case QETH_QDIO_BUF_PRIMED:
- /* We have initial ownership, no QAOB (yet): */
+ priv = (struct qeth_qaob_priv1 *)&aob->user1;
+ /* QAOB hasn't completed yet: */
+ if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
- /* Handle race with qeth_qdio_handle_aob(): */
- switch (atomic_xchg(&buffer->state,
- QETH_QDIO_BUF_NEED_QAOB)) {
- case QETH_QDIO_BUF_PENDING:
- /* No concurrent QAOB notification. */
-
- /* Prepare the queue slot for immediate re-use: */
- qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
- if (qeth_alloc_out_buf(queue, bidx,
- GFP_ATOMIC)) {
- QETH_CARD_TEXT(card, 2, "outofbuf");
- qeth_schedule_recovery(card);
- }
-
- list_add(&buffer->list_entry,
- &queue->pending_bufs);
- /* Skip clearing the buffer: */
- return;
- case QETH_QDIO_BUF_QAOB_OK:
- qeth_notify_skbs(queue, buffer,
- TX_NOTIFY_DELAYED_OK);
- error = false;
- break;
- case QETH_QDIO_BUF_QAOB_ERROR:
- qeth_notify_skbs(queue, buffer,
- TX_NOTIFY_DELAYED_GENERALERROR);
- error = true;
- break;
- default:
- WARN_ON_ONCE(1);
+ /* Prepare the queue slot for immediate re-use: */
+ qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
+ if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
+ QETH_CARD_TEXT(card, 2, "outofbuf");
+ qeth_schedule_recovery(card);
}
- break;
- case QETH_QDIO_BUF_QAOB_OK:
- /* qeth_qdio_handle_aob() already received a QAOB: */
- qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK);
- error = false;
- break;
- case QETH_QDIO_BUF_QAOB_ERROR:
- /* qeth_qdio_handle_aob() already received a QAOB: */
- qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR);
- error = true;
- break;
- default:
- WARN_ON_ONCE(1);
+ list_add(&buffer->list_entry, &queue->pending_bufs);
+ /* Skip clearing the buffer: */
+ return;
}
+ /* QAOB already completed: */
+ notify = qeth_compute_cq_notification(aob->aorc, 0);
+ qeth_notify_skbs(queue, buffer, notify);
+ error = !!aob->aorc;
memset(aob, 0, sizeof(*aob));
} else if (card->options.cq == QETH_CQ_ENABLED) {
qeth_notify_skbs(queue, buffer,
@@ -6138,7 +6068,10 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
unsigned int work_done = 0;
struct netdev_queue *txq;
- txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
+ if (IS_IQD(card))
+ txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
+ else
+ txq = netdev_get_tx_queue(dev, queue_no);
while (1) {
unsigned int start, error, i;
@@ -6165,8 +6098,9 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
&start, &error);
if (completed <= 0) {
/* Ensure we see TX completion for pending work: */
- if (napi_complete_done(napi, 0))
- qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
+ if (napi_complete_done(napi, 0) &&
+ !atomic_read(&queue->set_pci_flags_count))
+ qeth_tx_arm_timer(queue, queue->rescan_usecs);
return 0;
}
@@ -6179,12 +6113,19 @@ static int qeth_tx_poll(struct napi_struct *napi, int budget)
bytes += buffer->bytes;
qeth_handle_send_error(card, buffer, error);
- qeth_iqd_tx_complete(queue, bidx, error, budget);
+ if (IS_IQD(card))
+ qeth_iqd_tx_complete(queue, bidx, error, budget);
+ else
+ qeth_clear_output_buffer(queue, buffer, error,
+ budget);
}
- netdev_tx_completed_queue(txq, packets, bytes);
atomic_sub(completed, &queue->used_buffers);
work_done += completed;
+ if (IS_IQD(card))
+ netdev_tx_completed_queue(txq, packets, bytes);
+ else
+ qeth_check_outbound_queue(queue);
/* xmit may have observed the full-condition, but not yet
* stopped the txq. In which case the code below won't trigger.
@@ -7228,6 +7169,8 @@ EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
int qeth_open(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
QETH_CARD_TEXT(card, 4, "qethopen");
@@ -7235,16 +7178,11 @@ int qeth_open(struct net_device *dev)
netif_tx_start_all_queues(dev);
local_bh_disable();
- if (IS_IQD(card)) {
- struct qeth_qdio_out_q *queue;
- unsigned int i;
-
- qeth_for_each_output_queue(card, queue, i) {
- netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
- QETH_NAPI_WEIGHT);
- napi_enable(&queue->napi);
- napi_schedule(&queue->napi);
- }
+ qeth_for_each_output_queue(card, queue, i) {
+ netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
+ QETH_NAPI_WEIGHT);
+ napi_enable(&queue->napi);
+ napi_schedule(&queue->napi);
}
napi_enable(&card->napi);
@@ -7259,6 +7197,8 @@ EXPORT_SYMBOL_GPL(qeth_open);
int qeth_stop(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
QETH_CARD_TEXT(card, 4, "qethstop");
@@ -7266,24 +7206,17 @@ int qeth_stop(struct net_device *dev)
cancel_delayed_work_sync(&card->buffer_reclaim_work);
qdio_stop_irq(CARD_DDEV(card));
- if (IS_IQD(card)) {
- struct qeth_qdio_out_q *queue;
- unsigned int i;
-
- /* Quiesce the NAPI instances: */
- qeth_for_each_output_queue(card, queue, i)
- napi_disable(&queue->napi);
+ /* Quiesce the NAPI instances: */
+ qeth_for_each_output_queue(card, queue, i)
+ napi_disable(&queue->napi);
- /* Stop .ndo_start_xmit, might still access queue->napi. */
- netif_tx_disable(dev);
+ /* Stop .ndo_start_xmit, might still access queue->napi. */
+ netif_tx_disable(dev);
- qeth_for_each_output_queue(card, queue, i) {
- del_timer_sync(&queue->timer);
- /* Queues may get re-allocated, so remove the NAPIs. */
- netif_napi_del(&queue->napi);
- }
- } else {
- netif_tx_disable(dev);
+ qeth_for_each_output_queue(card, queue, i) {
+ del_timer_sync(&queue->timer);
+ /* Queues may get re-allocated, so remove the NAPIs. */
+ netif_napi_del(&queue->napi);
}
return 0;
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index 3a51bbff0ffe..2c4cb300a8fc 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -41,6 +41,7 @@ static const struct qeth_stats txq_stats[] = {
QETH_TXQ_STAT("Queue stopped", stopped),
QETH_TXQ_STAT("Doorbell", doorbell),
QETH_TXQ_STAT("IRQ for frames", coal_frames),
+ QETH_TXQ_STAT("Completion IRQ", completion_irq),
QETH_TXQ_STAT("Completion yield", completion_yield),
QETH_TXQ_STAT("Completion timer", completion_timer),
};
@@ -79,10 +80,8 @@ static void qeth_add_stat_strings(u8 **data, const char *prefix,
{
unsigned int i;
- for (i = 0; i < size; i++) {
- snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name);
- *data += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < size; i++)
+ ethtool_sprintf(data, "%s%s", prefix, stats[i].name);
}
static int qeth_get_sset_count(struct net_device *dev, int stringset)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index ca44421a6d6e..2abf86c104d5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -805,8 +805,6 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!netif_device_present(dev))
return -ENODEV;
- if (!(priv->brport_hw_features))
- return -EOPNOTSUPP;
nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
if (nla_type(attr) == IFLA_PROTINFO) {
@@ -832,6 +830,16 @@ static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
return 0;
if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
return -EINVAL;
+ if (!(priv->brport_hw_features & BR_LEARNING_SYNC)) {
+ NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
+ "Operation not supported by HW");
+ return -EOPNOTSUPP;
+ }
+ if (!IS_ENABLED(CONFIG_NET_SWITCHDEV)) {
+ NL_SET_ERR_MSG_ATTR(extack, bp_tb[IFLA_BRPORT_LEARNING_SYNC],
+ "Requires NET_SWITCHDEV");
+ return -EOPNOTSUPP;
+ }
enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 3ee46a843cb5..adddcd589941 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -2926,11 +2926,11 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
ccb->opcode = BLOGIC_INITIATOR_CCB_SG;
ccb->datalen = count * sizeof(struct blogic_sg_seg);
if (blogic_multimaster_type(adapter))
- ccb->data = (void *)((unsigned int) ccb->dma_handle +
+ ccb->data = (unsigned int) ccb->dma_handle +
((unsigned long) &ccb->sglist -
- (unsigned long) ccb));
+ (unsigned long) ccb);
else
- ccb->data = ccb->sglist;
+ ccb->data = virt_to_32bit_virt(ccb->sglist);
scsi_for_each_sg(command, sg, count, i) {
ccb->sglist[i].segbytes = sg_dma_len(sg);
diff --git a/drivers/scsi/BusLogic.h b/drivers/scsi/BusLogic.h
index a8e4a19788a7..7d1ec10f2430 100644
--- a/drivers/scsi/BusLogic.h
+++ b/drivers/scsi/BusLogic.h
@@ -806,7 +806,7 @@ struct blogic_ccb {
unsigned char cdblen; /* Byte 2 */
unsigned char sense_datalen; /* Byte 3 */
u32 datalen; /* Bytes 4-7 */
- void *data; /* Bytes 8-11 */
+ u32 data; /* Bytes 8-11 */
unsigned char:8; /* Byte 12 */
unsigned char:8; /* Byte 13 */
enum blogic_adapter_status adapter_status; /* Byte 14 */
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index ecd06d2d7e81..71aa6af08340 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3765,11 +3765,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
case HW_EVENT_PHY_START_STATUS:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_START_STATUS status = %x\n",
status);
- if (status == 0) {
+ if (status == 0)
phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL)
- complete(phy->enable_completion);
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
break;
case HW_EVENT_SAS_PHY_UP:
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 390c33df0357..af09bd282cb9 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -1151,8 +1151,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
goto err_out_shost;
}
list_add_tail(&pm8001_ha->list, &hba_list);
- scsi_scan_host(pm8001_ha->shost);
pm8001_ha->flags = PM8001F_RUN_TIME;
+ scsi_scan_host(pm8001_ha->shost);
return 0;
err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index d28af413b93a..335cf37e6cb9 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -264,12 +264,17 @@ void pm8001_scan_start(struct Scsi_Host *shost)
int i;
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+ DECLARE_COMPLETION_ONSTACK(completion);
pm8001_ha = sha->lldd_ha;
/* SAS_RE_INITIALIZATION not available in SPCv/ve */
if (pm8001_ha->chip_id == chip_8001)
PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha);
- for (i = 0; i < pm8001_ha->chip->n_phy; ++i)
+ for (i = 0; i < pm8001_ha->chip->n_phy; ++i) {
+ pm8001_ha->phy[i].enable_completion = &completion;
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i);
+ wait_for_completion(&completion);
+ msleep(300);
+ }
}
int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time)
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 4e980830f9f5..700530e969ac 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -3487,13 +3487,13 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, INIT,
"phy start resp status:0x%x, phyid:0x%x\n",
status, phy_id);
- if (status == 0) {
+ if (status == 0)
phy->phy_state = PHY_LINK_DOWN;
- if (pm8001_ha->flags == PM8001F_RUN_TIME &&
- phy->enable_completion != NULL) {
- complete(phy->enable_completion);
- phy->enable_completion = NULL;
- }
+
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL) {
+ complete(phy->enable_completion);
+ phy->enable_completion = NULL;
}
return 0;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 69f7784233f9..756231151882 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -536,7 +536,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf,
if (linkmode_intersects(link->supported_caps, sup_caps))
lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
- fc_host_supported_speeds(lport->host) = lport->link_supported_speeds;
+ if (lport->host && lport->host->shost_data)
+ fc_host_supported_speeds(lport->host) =
+ lport->link_supported_speeds;
}
static void qedf_bw_update(void *dev)
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0677295957bc..615e44af1ca6 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1063,7 +1063,8 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr,
return ret;
}
- if (qla82xx_flash_set_write_enable(ha))
+ ret = qla82xx_flash_set_write_enable(ha);
+ if (ret < 0)
goto done_write;
qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data);
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 0aa58131e791..d0626773eb38 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -467,21 +467,24 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
- host->rst = devm_reset_control_get(dev, "rst");
+ host->rst = devm_reset_control_get(dev, "rst");
if (IS_ERR(host->rst)) {
dev_err(dev, "%s: failed to get reset control\n", __func__);
- return PTR_ERR(host->rst);
+ err = PTR_ERR(host->rst);
+ goto error;
}
ufs_hisi_set_pm_lvl(hba);
err = ufs_hisi_get_resource(host);
- if (err) {
- ufshcd_set_variant(hba, NULL);
- return err;
- }
+ if (err)
+ goto error;
return 0;
+
+error:
+ ufshcd_set_variant(hba, NULL);
+ return err;
}
static int ufs_hi3660_init(struct ufs_hba *hba)
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index a981f261b304..aee3cfc7142a 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -922,6 +922,7 @@ static void ufs_mtk_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
+ struct arm_smccc_res res;
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
@@ -941,6 +942,9 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto fail;
}
+ if (ufshcd_is_link_off(hba))
+ ufs_mtk_device_reset_ctrl(0, res);
+
return 0;
fail:
/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3eb54937f1d8..72fd41bfbd54 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2842,7 +2842,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
- * @timeout: time in seconds
+ * @timeout: timeout in milliseconds
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
@@ -2872,6 +2872,9 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
}
tag = req->tag;
WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag));
+ /* Set the timeout such that the SCSI error handler is not activated. */
+ req->timeout = msecs_to_jiffies(2 * timeout);
+ blk_mq_start_request(req);
init_completion(&wait);
lrbp = &hba->lrb[tag];
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8b161ec4943b..f4481fe48bf0 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -65,7 +65,7 @@ config SPI_ALTERA
This is the driver for the Altera SPI Controller.
config SPI_ALTERA_CORE
- tristate "Altera SPI Controller core code"
+ tristate "Altera SPI Controller core code" if COMPILE_TEST
select REGMAP
help
"The core code for the Altera SPI Controller"
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 028736687488..fb45e6af6638 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -1375,11 +1375,13 @@ poll_mode:
ret = spi_register_controller(ctlr);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
- goto out_free_irq;
+ goto out_release_dma;
}
return ret;
+out_release_dma:
+ dspi_release_dma(dspi);
out_free_irq:
if (dspi->irq)
free_irq(dspi->irq, dspi);
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 297c512069a5..5d27ee482237 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -174,7 +174,7 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- if (t && t->len + tlen > SC18IS602_BUFSIZ)
+ if (t && t->len + tlen > SC18IS602_BUFSIZ + 1)
return -EINVAL;
return 0;
@@ -219,6 +219,11 @@ static int sc18is602_transfer_one(struct spi_master *master,
return status;
}
+static size_t sc18is602_max_transfer_size(struct spi_device *spi)
+{
+ return SC18IS602_BUFSIZ;
+}
+
static int sc18is602_setup(struct spi_device *spi)
{
struct sc18is602 *hw = spi_master_get_devdata(spi->master);
@@ -293,6 +298,8 @@ static int sc18is602_probe(struct i2c_client *client,
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
+ master->max_transfer_size = sc18is602_max_transfer_size;
+ master->max_message_size = sc18is602_max_transfer_size;
master->dev.of_node = np;
master->min_speed_hz = hw->freq / 128;
master->max_speed_hz = hw->freq / 4;
diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
index b41a75749b49..28e70db9bbba 100644
--- a/drivers/spi/spi-sprd.c
+++ b/drivers/spi/spi-sprd.c
@@ -1068,6 +1068,7 @@ static const struct of_device_id sprd_spi_of_match[] = {
{ .compatible = "sprd,sc9860-spi", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, sprd_spi_of_match);
static struct platform_driver sprd_spi_driver = {
.driver = {
diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
index 5d8a5ee62fa2..5a3d81c31d04 100644
--- a/drivers/spi/spi-zynq-qspi.c
+++ b/drivers/spi/spi-zynq-qspi.c
@@ -367,7 +367,7 @@ static int zynq_qspi_config_op(struct zynq_qspi *xqspi, struct spi_device *spi)
}
/**
- * zynq_qspi_setup - Configure the QSPI controller
+ * zynq_qspi_setup_op - Configure the QSPI controller
* @spi: Pointer to the spi_device structure
*
* Sets the operational mode of QSPI controller for the next QSPI transfer, baud
@@ -528,18 +528,17 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
struct zynq_qspi *xqspi = spi_controller_get_devdata(mem->spi->master);
int err = 0, i;
u8 *tmpbuf;
- u8 opcode = op->cmd.opcode;
dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
- opcode, op->cmd.buswidth, op->addr.buswidth,
+ op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth);
zynq_qspi_chipselect(mem->spi, true);
zynq_qspi_config_op(xqspi, mem->spi);
- if (op->cmd.nbytes) {
+ if (op->cmd.opcode) {
reinit_completion(&xqspi->data_completion);
- xqspi->txbuf = &opcode;
+ xqspi->txbuf = (u8 *)&op->cmd.opcode;
xqspi->rxbuf = NULL;
xqspi->tx_bytes = op->cmd.nbytes;
xqspi->rx_bytes = op->cmd.nbytes;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ba425b9c7700..e353b7a9e54e 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -47,10 +47,6 @@ static void spidev_release(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- /* spi controllers may cleanup for released devices */
- if (spi->controller->cleanup)
- spi->controller->cleanup(spi);
-
spi_controller_put(spi->controller);
kfree(spi->driver_override);
kfree(spi);
@@ -558,6 +554,12 @@ static int spi_dev_check(struct device *dev, void *data)
return 0;
}
+static void spi_cleanup(struct spi_device *spi)
+{
+ if (spi->controller->cleanup)
+ spi->controller->cleanup(spi);
+}
+
/**
* spi_add_device - Add spi_device allocated with spi_alloc_device
* @spi: spi_device to register
@@ -622,11 +624,13 @@ int spi_add_device(struct spi_device *spi)
/* Device may be bound to an active driver when this returns */
status = device_add(&spi->dev);
- if (status < 0)
+ if (status < 0) {
dev_err(dev, "can't add %s, status %d\n",
dev_name(&spi->dev), status);
- else
+ spi_cleanup(spi);
+ } else {
dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+ }
done:
mutex_unlock(&spi_add_lock);
@@ -717,7 +721,9 @@ void spi_unregister_device(struct spi_device *spi)
if (ACPI_COMPANION(&spi->dev))
acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
device_remove_software_node(&spi->dev);
- device_unregister(&spi->dev);
+ device_del(&spi->dev);
+ spi_cleanup(spi);
+ put_device(&spi->dev);
}
EXPORT_SYMBOL_GPL(spi_unregister_device);
@@ -814,15 +820,29 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
if (!(spi->mode & SPI_NO_CS)) {
- if (spi->cs_gpiod)
- /* polarity handled by gpiolib */
- gpiod_set_value_cansleep(spi->cs_gpiod, activate);
- else
+ if (spi->cs_gpiod) {
+ /*
+ * Historically ACPI has no means of the GPIO polarity and
+ * thus the SPISerialBus() resource defines it on the per-chip
+ * basis. In order to avoid a chain of negations, the GPIO
+ * polarity is considered being Active High. Even for the cases
+ * when _DSD() is involved (in the updated versions of ACPI)
+ * the GPIO CS polarity must be defined Active High to avoid
+ * ambiguity. That's why we use enable, that takes SPI_CS_HIGH
+ * into account.
+ */
+ if (has_acpi_companion(&spi->dev))
+ gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ else
+ /* Polarity handled by GPIO library */
+ gpiod_set_value_cansleep(spi->cs_gpiod, activate);
+ } else {
/*
* invert the enable line, as active low is
* default for SPI.
*/
gpio_set_value_cansleep(spi->cs_gpio, !enable);
+ }
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -3451,9 +3471,12 @@ int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
if (spi->controller->set_cs_timing &&
!(spi->cs_gpiod || gpio_is_valid(spi->cs_gpio))) {
+ mutex_lock(&spi->controller->io_mutex);
+
if (spi->controller->auto_runtime_pm) {
status = pm_runtime_get_sync(parent);
if (status < 0) {
+ mutex_unlock(&spi->controller->io_mutex);
pm_runtime_put_noidle(parent);
dev_err(&spi->controller->dev, "Failed to power device: %d\n",
status);
@@ -3464,11 +3487,13 @@ int spi_set_cs_timing(struct spi_device *spi, struct spi_delay *setup,
hold, inactive);
pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
- return status;
} else {
- return spi->controller->set_cs_timing(spi, setup, hold,
+ status = spi->controller->set_cs_timing(spi, setup, hold,
inactive);
}
+
+ mutex_unlock(&spi->controller->io_mutex);
+ return status;
}
if ((setup && setup->unit == SPI_DELAY_UNIT_SCK) ||
diff --git a/drivers/ssb/driver_gpio.c b/drivers/ssb/driver_gpio.c
index 66a76fd83248..2de3896489c8 100644
--- a/drivers/ssb/driver_gpio.c
+++ b/drivers/ssb/driver_gpio.c
@@ -231,7 +231,8 @@ static int ssb_gpio_chipco_init(struct ssb_bus *bus)
chip->ngpio = 16;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
- * a random base number. */
+ * a random base number.
+ */
if (bus->bustype == SSB_BUSTYPE_SSB)
chip->base = 0;
else
@@ -424,7 +425,8 @@ static int ssb_gpio_extif_init(struct ssb_bus *bus)
chip->ngpio = 5;
/* There is just one SoC in one device and its GPIO addresses should be
* deterministic to address them more easily. The other buses could get
- * a random base number. */
+ * a random base number.
+ */
if (bus->bustype == SSB_BUSTYPE_SSB)
chip->base = 0;
else
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index c1186415896b..d11b4242b6d2 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -55,7 +55,8 @@ void pcicore_write16(struct ssb_pcicore *pc, u16 offset, u16 value)
#include <asm/paccess.h>
/* Probe a 32bit value on the bus and catch bus exceptions.
* Returns nonzero on a bus exception.
- * This is MIPS specific */
+ * This is MIPS specific
+ */
#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
/* Assume one-hot slot wiring */
@@ -255,7 +256,8 @@ static struct pci_controller ssb_pcicore_controller = {
};
/* This function is called when doing a pci_enable_device().
- * We must first check if the device is a device on the PCI-core bridge. */
+ * We must first check if the device is a device on the PCI-core bridge.
+ */
int ssb_pcicore_plat_dev_init(struct pci_dev *d)
{
if (d->bus->ops != &ssb_pcicore_pciops) {
@@ -381,11 +383,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
/* Ok, ready to run, register it to the system.
* The following needs change, if we want to port hostmode
- * to non-MIPS platform. */
+ * to non-MIPS platform.
+ */
ssb_pcicore_controller.io_map_base = (unsigned long)ioremap(SSB_PCI_MEM, 0x04000000);
set_io_port_base(ssb_pcicore_controller.io_map_base);
/* Give some time to the PCI controller to configure itself with the new
- * values. Not waiting at this point causes crashes of the machine. */
+ * values. Not waiting at this point causes crashes of the machine.
+ */
mdelay(10);
register_pci_controller(&ssb_pcicore_controller);
}
@@ -405,7 +409,8 @@ static int pcicore_is_in_hostmode(struct ssb_pcicore *pc)
return 0;
/* The 200-pin BCM4712 package does not bond out PCI. Even when
- * PCI is bonded out, some boards may leave the pins floating. */
+ * PCI is bonded out, some boards may leave the pins floating.
+ */
if (bus->chip_id == 0x4712) {
if (bus->chip_package == SSB_CHIPPACK_BCM4712S)
return 0;
@@ -685,7 +690,8 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
if (dev->bus->bustype != SSB_BUSTYPE_PCI) {
/* This SSB device is not on a PCI host-bus. So the IRQs are
* not routed through the PCI core.
- * So we must not enable routing through the PCI core. */
+ * So we must not enable routing through the PCI core.
+ */
goto out;
}
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 0a26984acb2c..3a29b5570f9f 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -37,7 +37,8 @@ static LIST_HEAD(buses);
/* Software ID counter */
static unsigned int next_busnumber;
/* buses_mutes locks the two buslists and the next_busnumber.
- * Don't lock this directly, but use ssb_buses_[un]lock() below. */
+ * Don't lock this directly, but use ssb_buses_[un]lock() below.
+ */
static DEFINE_MUTEX(buses_mutex);
/* There are differences in the codeflow, if the bus is
@@ -45,7 +46,8 @@ static DEFINE_MUTEX(buses_mutex);
* are not available early. This is a mechanism to delay
* these initializations to after early boot has finished.
* It's also used to avoid mutex locking, as that's not
- * available and needed early. */
+ * available and needed early.
+ */
static bool ssb_is_early_boot = 1;
static void ssb_buses_lock(void);
@@ -161,7 +163,8 @@ int ssb_bus_resume(struct ssb_bus *bus)
int err;
/* Reset HW state information in memory, so that HW is
- * completely reinitialized. */
+ * completely reinitialized.
+ */
bus->mapped_device = NULL;
#ifdef CONFIG_SSB_DRIVER_PCICORE
bus->pcicore.setup_done = 0;
@@ -431,9 +434,7 @@ void ssb_bus_unregister(struct ssb_bus *bus)
int err;
err = ssb_gpio_unregister(bus);
- if (err == -EBUSY)
- pr_debug("Some GPIOs are still in use\n");
- else if (err)
+ if (err)
pr_debug("Can not unregister GPIO driver: %i\n", err);
ssb_buses_lock();
@@ -467,7 +468,8 @@ static int ssb_devices_register(struct ssb_bus *bus)
sdev = &(bus->devices[i]);
/* We don't register SSB-system devices to the kernel,
- * as the drivers for them are built into SSB. */
+ * as the drivers for them are built into SSB.
+ */
switch (sdev->id.coreid) {
case SSB_DEV_CHIPCOMMON:
case SSB_DEV_PCI:
@@ -521,7 +523,8 @@ static int ssb_devices_register(struct ssb_bus *bus)
if (err) {
pr_err("Could not register %s\n", dev_name(dev));
/* Set dev to NULL to not unregister
- * dev on error unwinding. */
+ * dev on error unwinding.
+ */
sdev->dev = NULL;
put_device(dev);
goto error;
@@ -667,7 +670,8 @@ ssb_bus_register(struct ssb_bus *bus,
ssb_bus_may_powerdown(bus);
/* Queue it for attach.
- * See the comment at the ssb_is_early_boot definition. */
+ * See the comment at the ssb_is_early_boot definition.
+ */
list_add_tail(&bus->list, &attach_queue);
if (!ssb_is_early_boot) {
/* This is not early boot, so we must attach the bus now */
@@ -1007,7 +1011,8 @@ static void ssb_flush_tmslow(struct ssb_device *dev)
* a machine check exception otherwise.
* Do this by reading the register back to commit the
* PCI write and delay an additional usec for the device
- * to react to the change. */
+ * to react to the change.
+ */
ssb_read32(dev, SSB_TMSLOW);
udelay(1);
}
@@ -1044,7 +1049,8 @@ void ssb_device_enable(struct ssb_device *dev, u32 core_specific_flags)
EXPORT_SYMBOL(ssb_device_enable);
/* Wait for bitmask in a register to get set or cleared.
- * timeout is in units of ten-microseconds */
+ * timeout is in units of ten-microseconds
+ */
static int ssb_wait_bits(struct ssb_device *dev, u16 reg, u32 bitmask,
int timeout, int set)
{
@@ -1153,7 +1159,8 @@ int ssb_bus_may_powerdown(struct ssb_bus *bus)
/* On buses where more than one core may be working
* at a time, we must not powerdown stuff if there are
- * still cores that may want to run. */
+ * still cores that may want to run.
+ */
if (bus->bustype == SSB_BUSTYPE_SSB)
goto out;
@@ -1303,13 +1310,11 @@ static int __init ssb_modinit(void)
if (err) {
pr_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
/* don't fail SSB init because of this */
- err = 0;
}
err = ssb_host_pcmcia_init();
if (err) {
pr_err("PCMCIA host initialization failed\n");
/* don't fail SSB init because of this */
- err = 0;
}
err = ssb_gige_init();
if (err) {
@@ -1322,7 +1327,8 @@ out:
}
/* ssb must be initialized after PCI but before the ssb drivers.
* That means we must use some initcall between subsys_initcall
- * and device_initcall. */
+ * and device_initcall.
+ */
fs_initcall(ssb_modinit);
static void __exit ssb_modexit(void)
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index dac54041ad8d..148bcb99c212 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -1117,9 +1117,9 @@ const struct ssb_bus_ops ssb_pci_ops = {
#endif
};
-static ssize_t ssb_pci_attr_sprom_show(struct device *pcidev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ssb_sprom_show(struct device *pcidev,
+ struct device_attribute *attr,
+ char *buf)
{
struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev);
struct ssb_bus *bus;
@@ -1131,9 +1131,9 @@ static ssize_t ssb_pci_attr_sprom_show(struct device *pcidev,
return ssb_attr_sprom_show(bus, buf, sprom_do_read);
}
-static ssize_t ssb_pci_attr_sprom_store(struct device *pcidev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t ssb_sprom_store(struct device *pcidev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pci_dev *pdev = container_of(pcidev, struct pci_dev, dev);
struct ssb_bus *bus;
@@ -1146,9 +1146,7 @@ static ssize_t ssb_pci_attr_sprom_store(struct device *pcidev,
sprom_check_crc, sprom_do_write);
}
-static DEVICE_ATTR(ssb_sprom, 0600,
- ssb_pci_attr_sprom_show,
- ssb_pci_attr_sprom_store);
+static DEVICE_ATTR_ADMIN_RW(ssb_sprom);
void ssb_pci_exit(struct ssb_bus *bus)
{
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index d7d730c245c5..45502098e0c7 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -723,9 +723,9 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
return -ENODEV;
}
-static ssize_t ssb_pcmcia_attr_sprom_show(struct device *pcmciadev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t ssb_sprom_show(struct device *pcmciadev,
+ struct device_attribute *attr,
+ char *buf)
{
struct pcmcia_device *pdev =
container_of(pcmciadev, struct pcmcia_device, dev);
@@ -739,9 +739,9 @@ static ssize_t ssb_pcmcia_attr_sprom_show(struct device *pcmciadev,
ssb_pcmcia_sprom_read_all);
}
-static ssize_t ssb_pcmcia_attr_sprom_store(struct device *pcmciadev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t ssb_sprom_store(struct device *pcmciadev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct pcmcia_device *pdev =
container_of(pcmciadev, struct pcmcia_device, dev);
@@ -756,9 +756,7 @@ static ssize_t ssb_pcmcia_attr_sprom_store(struct device *pcmciadev,
ssb_pcmcia_sprom_write_all);
}
-static DEVICE_ATTR(ssb_sprom, 0600,
- ssb_pcmcia_attr_sprom_show,
- ssb_pcmcia_attr_sprom_store);
+static DEVICE_ATTR_ADMIN_RW(ssb_sprom);
static int ssb_pcmcia_cor_setup(struct ssb_bus *bus, u8 cor)
{
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index f49ab1aa2149..4161e5d1f276 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -325,6 +325,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
pr_err("More than %d ssb cores found (%d)\n",
SSB_MAX_NR_CORES, bus->nr_devices);
+ err = -EINVAL;
goto err_unmap;
}
if (bus->bustype == SSB_BUSTYPE_SSB) {
diff --git a/drivers/ssb/sdio.c b/drivers/ssb/sdio.c
index 7fe0afb42234..66c5c2169704 100644
--- a/drivers/ssb/sdio.c
+++ b/drivers/ssb/sdio.c
@@ -411,7 +411,6 @@ static void ssb_sdio_block_write(struct ssb_device *dev, const void *buffer,
sdio_claim_host(bus->host_sdio);
if (unlikely(ssb_sdio_switch_core(bus, dev))) {
error = -EIO;
- memset((void *)buffer, 0xff, count);
goto err_out;
}
offset |= bus->sdio_sbaddr & 0xffff;
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index c1dac6eec59f..a6d731e959a2 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -527,6 +527,9 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &(padapter->securitypriv);
struct sta_priv *pstapriv = &padapter->stapriv;
+ char *grpkey = padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey;
+ char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
+ char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -609,7 +612,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
if (strcmp(param->u.crypt.alg, "WEP") == 0)
{
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -622,12 +625,12 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
+ memcpy(rxkey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
@@ -636,7 +639,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
}
else
{
@@ -713,7 +716,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
if (strcmp(param->u.crypt.alg, "WEP") == 0)
{
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -725,12 +728,12 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
+ memcpy(rxkey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
@@ -739,7 +742,7 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
{
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
}
else
{
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index e98e5388d5c7..5088c3731b6d 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -2963,6 +2963,9 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &(padapter->securitypriv);
struct sta_priv *pstapriv = &padapter->stapriv;
+ char *txkey = padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey;
+ char *rxkey = padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey;
+ char *grpkey = psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey;
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -3064,7 +3067,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
if (param->u.crypt.set_tx == 1) {
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -3073,11 +3076,11 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
@@ -3086,7 +3089,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
} else {
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
@@ -3142,7 +3145,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else { /* group key??? */
if (strcmp(param->u.crypt.alg, "WEP") == 0) {
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
if (param->u.crypt.key_len == 13)
@@ -3150,19 +3153,19 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
} else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
/* DEBUG_ERR("set key length :param->u.crypt.key_len =%d\n", param->u.crypt.key_len); */
/* set mic key */
- memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
- memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ memcpy(txkey, &(param->u.crypt.key[16]), 8);
+ memcpy(rxkey, &(param->u.crypt.key[24]), 8);
psecuritypriv->busetkipkey = true;
} else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
psecuritypriv->dot118021XGrpPrivacy = _AES_;
- memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(grpkey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
} else {
psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
}
diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h
index 337c8d82f74e..6d0f7062bb87 100644
--- a/drivers/tee/amdtee/amdtee_private.h
+++ b/drivers/tee/amdtee/amdtee_private.h
@@ -21,6 +21,7 @@
#define TEEC_SUCCESS 0x00000000
#define TEEC_ERROR_GENERIC 0xFFFF0000
#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
+#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -93,6 +94,18 @@ struct amdtee_shm_data {
u32 buf_id;
};
+/**
+ * struct amdtee_ta_data - Keeps track of all TAs loaded in AMD Secure
+ * Processor
+ * @ta_handle: Handle to TA loaded in TEE
+ * @refcount: Reference count for the loaded TA
+ */
+struct amdtee_ta_data {
+ struct list_head list_node;
+ u32 ta_handle;
+ u32 refcount;
+};
+
#define LOWER_TWO_BYTE_MASK 0x0000FFFF
/**
diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c
index 096dd4d92d39..07f36ac834c8 100644
--- a/drivers/tee/amdtee/call.c
+++ b/drivers/tee/amdtee/call.c
@@ -121,15 +121,69 @@ static int amd_params_to_tee_params(struct tee_param *tee, u32 count,
return ret;
}
+static DEFINE_MUTEX(ta_refcount_mutex);
+static struct list_head ta_list = LIST_HEAD_INIT(ta_list);
+
+static u32 get_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle)
+ return ++ta_data->refcount;
+
+ ta_data = kzalloc(sizeof(*ta_data), GFP_KERNEL);
+ if (ta_data) {
+ ta_data->ta_handle = ta_handle;
+ ta_data->refcount = 1;
+ count = ta_data->refcount;
+ list_add(&ta_data->list_node, &ta_list);
+ }
+
+ return count;
+}
+
+static u32 put_ta_refcount(u32 ta_handle)
+{
+ struct amdtee_ta_data *ta_data;
+ u32 count = 0;
+
+ /* Caller must hold a mutex */
+ list_for_each_entry(ta_data, &ta_list, list_node)
+ if (ta_data->ta_handle == ta_handle) {
+ count = --ta_data->refcount;
+ if (count == 0) {
+ list_del(&ta_data->list_node);
+ kfree(ta_data);
+ break;
+ }
+ }
+
+ return count;
+}
+
int handle_unload_ta(u32 ta_handle)
{
struct tee_cmd_unload_ta cmd = {0};
- u32 status;
+ u32 status, count;
int ret;
if (!ta_handle)
return -EINVAL;
+ mutex_lock(&ta_refcount_mutex);
+
+ count = put_ta_refcount(ta_handle);
+
+ if (count) {
+ pr_debug("unload ta: not unloading %u count %u\n",
+ ta_handle, count);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
cmd.ta_handle = ta_handle;
ret = psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA, (void *)&cmd,
@@ -137,8 +191,12 @@ int handle_unload_ta(u32 ta_handle)
if (!ret && status != 0) {
pr_err("unload ta: status = 0x%x\n", status);
ret = -EBUSY;
+ } else {
+ pr_debug("unloaded ta handle %u\n", ta_handle);
}
+unlock:
+ mutex_unlock(&ta_refcount_mutex);
return ret;
}
@@ -340,7 +398,8 @@ int handle_open_session(struct tee_ioctl_open_session_arg *arg, u32 *info,
int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
{
- struct tee_cmd_load_ta cmd = {0};
+ struct tee_cmd_unload_ta unload_cmd = {};
+ struct tee_cmd_load_ta load_cmd = {};
phys_addr_t blob;
int ret;
@@ -353,21 +412,36 @@ int handle_load_ta(void *data, u32 size, struct tee_ioctl_open_session_arg *arg)
return -EINVAL;
}
- cmd.hi_addr = upper_32_bits(blob);
- cmd.low_addr = lower_32_bits(blob);
- cmd.size = size;
+ load_cmd.hi_addr = upper_32_bits(blob);
+ load_cmd.low_addr = lower_32_bits(blob);
+ load_cmd.size = size;
- ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&cmd,
- sizeof(cmd), &arg->ret);
+ mutex_lock(&ta_refcount_mutex);
+
+ ret = psp_tee_process_cmd(TEE_CMD_ID_LOAD_TA, (void *)&load_cmd,
+ sizeof(load_cmd), &arg->ret);
if (ret) {
arg->ret_origin = TEEC_ORIGIN_COMMS;
arg->ret = TEEC_ERROR_COMMUNICATION;
- } else {
- set_session_id(cmd.ta_handle, 0, &arg->session);
+ } else if (arg->ret == TEEC_SUCCESS) {
+ ret = get_ta_refcount(load_cmd.ta_handle);
+ if (!ret) {
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+
+ /* Unload the TA on error */
+ unload_cmd.ta_handle = load_cmd.ta_handle;
+ psp_tee_process_cmd(TEE_CMD_ID_UNLOAD_TA,
+ (void *)&unload_cmd,
+ sizeof(unload_cmd), &ret);
+ } else {
+ set_session_id(load_cmd.ta_handle, 0, &arg->session);
+ }
}
+ mutex_unlock(&ta_refcount_mutex);
pr_debug("load TA: TA handle = 0x%x, RO = 0x%x, ret = 0x%x\n",
- cmd.ta_handle, arg->ret_origin, arg->ret);
+ load_cmd.ta_handle, arg->ret_origin, arg->ret);
return 0;
}
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 8a6a8f30bb42..da6b88e80dc0 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -59,10 +59,9 @@ static void release_session(struct amdtee_session *sess)
continue;
handle_close_session(sess->ta_handle, sess->session_info[i]);
+ handle_unload_ta(sess->ta_handle);
}
- /* Unload Trusted Application once all sessions are closed */
- handle_unload_ta(sess->ta_handle);
kfree(sess);
}
@@ -224,8 +223,6 @@ static void destroy_session(struct kref *ref)
struct amdtee_session *sess = container_of(ref, struct amdtee_session,
refcount);
- /* Unload the TA from TEE */
- handle_unload_ta(sess->ta_handle);
mutex_lock(&session_list_mutex);
list_del(&sess->list_node);
mutex_unlock(&session_list_mutex);
@@ -238,7 +235,7 @@ int amdtee_open_session(struct tee_context *ctx,
{
struct amdtee_context_data *ctxdata = ctx->data;
struct amdtee_session *sess = NULL;
- u32 session_info;
+ u32 session_info, ta_handle;
size_t ta_size;
int rc, i;
void *ta;
@@ -259,11 +256,14 @@ int amdtee_open_session(struct tee_context *ctx,
if (arg->ret != TEEC_SUCCESS)
goto out;
+ ta_handle = get_ta_handle(arg->session);
+
mutex_lock(&session_list_mutex);
sess = alloc_session(ctxdata, arg->session);
mutex_unlock(&session_list_mutex);
if (!sess) {
+ handle_unload_ta(ta_handle);
rc = -ENOMEM;
goto out;
}
@@ -277,6 +277,7 @@ int amdtee_open_session(struct tee_context *ctx,
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
@@ -289,12 +290,13 @@ int amdtee_open_session(struct tee_context *ctx,
spin_lock(&sess->lock);
clear_bit(i, sess->sess_mask);
spin_unlock(&sess->lock);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
goto out;
}
sess->session_info[i] = session_info;
- set_session_id(sess->ta_handle, i, &arg->session);
+ set_session_id(ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
@@ -329,6 +331,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
/* Close the session */
handle_close_session(ta_handle, session_info);
+ handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8534d6e45a1d..3cbc757d7be7 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1519,6 +1519,8 @@ static int __init max310x_uart_init(void)
#ifdef CONFIG_SPI_MASTER
ret = spi_register_driver(&max310x_spi_driver);
+ if (ret)
+ uart_unregister_driver(&max310x_uart);
#endif
return ret;
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index e0c00a1b0763..51b0ecabf2ec 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -818,9 +818,6 @@ static int mvebu_uart_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!match)
- return -ENODEV;
-
/* Assume that all UART ports have a DT alias or none has */
id = of_alias_get_id(pdev->dev.of_node, "serial");
if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 01645e87b3d5..fa1548d4f94b 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1171,7 +1171,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
/* Resizes the resolution of the display adapater */
int err = 0;
- if (vc->vc_mode != KD_GRAPHICS && vc->vc_sw->con_resize)
+ if (vc->vc_sw->con_resize)
err = vc->vc_sw->con_resize(vc, width, height, user);
return err;
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 89aeaf3c1bca..0e0cd9e9e589 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -671,21 +671,58 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
if (copy_from_user(&v, cs, sizeof(struct vt_consize)))
return -EFAULT;
- if (v.v_vlin)
- pr_info_once("\"struct vt_consize\"->v_vlin is ignored. Please report if you need this.\n");
- if (v.v_clin)
- pr_info_once("\"struct vt_consize\"->v_clin is ignored. Please report if you need this.\n");
+ /* FIXME: Should check the copies properly */
+ if (!v.v_vlin)
+ v.v_vlin = vc->vc_scan_lines;
+
+ if (v.v_clin) {
+ int rows = v.v_vlin / v.v_clin;
+ if (v.v_rows != rows) {
+ if (v.v_rows) /* Parameters don't add up */
+ return -EINVAL;
+ v.v_rows = rows;
+ }
+ }
+
+ if (v.v_vcol && v.v_ccol) {
+ int cols = v.v_vcol / v.v_ccol;
+ if (v.v_cols != cols) {
+ if (v.v_cols)
+ return -EINVAL;
+ v.v_cols = cols;
+ }
+ }
+
+ if (v.v_clin > 32)
+ return -EINVAL;
- console_lock();
for (i = 0; i < MAX_NR_CONSOLES; i++) {
- vc = vc_cons[i].d;
+ struct vc_data *vcp;
- if (vc) {
- vc->vc_resize_user = 1;
- vc_resize(vc, v.v_cols, v.v_rows);
+ if (!vc_cons[i].d)
+ continue;
+ console_lock();
+ vcp = vc_cons[i].d;
+ if (vcp) {
+ int ret;
+ int save_scan_lines = vcp->vc_scan_lines;
+ int save_cell_height = vcp->vc_cell_height;
+
+ if (v.v_vlin)
+ vcp->vc_scan_lines = v.v_vlin;
+ if (v.v_clin)
+ vcp->vc_cell_height = v.v_clin;
+ vcp->vc_resize_user = 1;
+ ret = vc_resize(vcp, v.v_cols, v.v_rows);
+ if (ret) {
+ vcp->vc_scan_lines = save_scan_lines;
+ vcp->vc_cell_height = save_cell_height;
+ console_unlock();
+ return ret;
+ }
}
+ console_unlock();
}
- console_unlock();
return 0;
}
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 0330ba99730e..652fe2547587 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -291,13 +291,15 @@ hv_uio_probe(struct hv_device *dev,
pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
if (pdata->recv_buf == NULL) {
ret = -ENOMEM;
- goto fail_close;
+ goto fail_free_ring;
}
ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
- if (ret)
+ if (ret) {
+ vfree(pdata->recv_buf);
goto fail_close;
+ }
/* put Global Physical Address Label in name */
snprintf(pdata->recv_name, sizeof(pdata->recv_name),
@@ -316,8 +318,10 @@ hv_uio_probe(struct hv_device *dev,
ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
- if (ret)
+ if (ret) {
+ vfree(pdata->send_buf);
goto fail_close;
+ }
snprintf(pdata->send_name, sizeof(pdata->send_name),
"send:%u", pdata->send_gpadl);
@@ -347,6 +351,8 @@ hv_uio_probe(struct hv_device *dev,
fail_close:
hv_uio_cleanup(dev, pdata);
+fail_free_ring:
+ vmbus_free_ring(dev->channel);
return ret;
}
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index c7d681fef198..3bb0b0075467 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -82,7 +82,7 @@ static int probe(struct pci_dev *pdev,
}
if (pdev->irq && !pci_intx_mask_supported(pdev))
- return -ENOMEM;
+ return -ENODEV;
gdev = devm_kzalloc(&pdev->dev, sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
if (!gdev)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 457b00c6e984..8e5490ac13a2 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -335,12 +335,23 @@ exit:
}
-static void kill_urbs(struct wdm_device *desc)
+static void poison_urbs(struct wdm_device *desc)
{
/* the order here is essential */
- usb_kill_urb(desc->command);
- usb_kill_urb(desc->validity);
- usb_kill_urb(desc->response);
+ usb_poison_urb(desc->command);
+ usb_poison_urb(desc->validity);
+ usb_poison_urb(desc->response);
+}
+
+static void unpoison_urbs(struct wdm_device *desc)
+{
+ /*
+ * the order here is not essential
+ * it is symmetrical just to be nice
+ */
+ usb_unpoison_urb(desc->response);
+ usb_unpoison_urb(desc->validity);
+ usb_unpoison_urb(desc->command);
}
static void free_urbs(struct wdm_device *desc)
@@ -760,11 +771,12 @@ static int wdm_release(struct inode *inode, struct file *file)
if (!desc->count) {
if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
dev_dbg(&desc->intf->dev, "wdm_release: cleanup\n");
- kill_urbs(desc);
+ poison_urbs(desc);
spin_lock_irq(&desc->iuspin);
desc->resp_count = 0;
spin_unlock_irq(&desc->iuspin);
desc->manage_power(desc->intf, 0);
+ unpoison_urbs(desc);
} else {
/* must avoid dev_printk here as desc->intf is invalid */
pr_debug(KBUILD_MODNAME " %s: device gone - cleaning up\n", __func__);
@@ -843,10 +855,11 @@ static void wdm_wwan_port_stop(struct wwan_port *port)
struct wdm_device *desc = wwan_port_get_drvdata(port);
/* Stop all transfers and disable WWAN mode */
- kill_urbs(desc);
+ poison_urbs(desc);
desc->manage_power(desc->intf, 0);
clear_bit(WDM_READ, &desc->flags);
clear_bit(WDM_WWAN_IN_USE, &desc->flags);
+ unpoison_urbs(desc);
}
static void wdm_wwan_port_tx_complete(struct urb *urb)
@@ -1209,9 +1222,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
- kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
@@ -1252,9 +1265,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
set_bit(WDM_SUSPENDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
/* callback submits work - order is essential */
- kill_urbs(desc);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ unpoison_urbs(desc);
}
if (!PMSG_IS_AUTO(message)) {
mutex_unlock(&desc->wlock);
@@ -1312,7 +1326,7 @@ static int wdm_pre_reset(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
+ poison_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
return 0;
@@ -1323,6 +1337,7 @@ static int wdm_post_reset(struct usb_interface *intf)
struct wdm_device *desc = wdm_find_device(intf);
int rv;
+ unpoison_urbs(desc);
clear_bit(WDM_OVERFLOW, &desc->flags);
clear_bit(WDM_RESETTING, &desc->flags);
rv = recover_from_urb_loss(desc);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index b2bc4b7c4289..fc7d6cdacf16 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3642,9 +3642,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
* sequence.
*/
status = hub_port_status(hub, port1, &portstatus, &portchange);
-
- /* TRSMRCY = 10 msec */
- msleep(10);
}
SuspendCleared:
@@ -3659,6 +3656,9 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_SUSPEND);
}
+
+ /* TRSMRCY = 10 msec */
+ msleep(10);
}
if (udev->persist_enabled)
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index da5ac4a4595b..ab6b815e0089 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -113,6 +113,7 @@ struct dwc2_hsotg_req;
* @debugfs: File entry for debugfs file for this endpoint.
* @dir_in: Set to true if this endpoint is of the IN direction, which
* means that it is sending data to the Host.
+ * @map_dir: Set to the value of dir_in when the DMA buffer is mapped.
* @index: The index for the endpoint registers.
* @mc: Multi Count - number of transactions per microframe
* @interval: Interval for periodic endpoints, in frames or microframes.
@@ -162,6 +163,7 @@ struct dwc2_hsotg_ep {
unsigned short fifo_index;
unsigned char dir_in;
+ unsigned char map_dir;
unsigned char index;
unsigned char mc;
u16 interval;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e6bb1bdb2760..184964174dc0 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -422,7 +422,7 @@ static void dwc2_hsotg_unmap_dma(struct dwc2_hsotg *hsotg,
{
struct usb_request *req = &hs_req->req;
- usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
+ usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->map_dir);
}
/*
@@ -1242,6 +1242,7 @@ static int dwc2_hsotg_map_dma(struct dwc2_hsotg *hsotg,
{
int ret;
+ hs_ep->map_dir = hs_ep->dir_in;
ret = usb_gadget_map_request(&hsotg->gadget, req, hs_ep->dir_in);
if (ret)
goto dma_error;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 3024785d84cb..520a0beef77c 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -776,7 +776,3 @@ static struct platform_driver dwc2_platform_driver = {
};
module_platform_driver(dwc2_platform_driver);
-
-MODULE_DESCRIPTION("DESIGNWARE HS OTG Platform Glue");
-MODULE_AUTHOR("Matthijs Kooijman <matthijs@stdin.nl>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b1e875c58f20..c5d5760cdf53 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -57,7 +57,7 @@
#define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE 3
#define DWC3_DEVICE_EVENT_WAKEUP 4
#define DWC3_DEVICE_EVENT_HIBER_REQ 5
-#define DWC3_DEVICE_EVENT_EOPF 6
+#define DWC3_DEVICE_EVENT_SUSPEND 6
#define DWC3_DEVICE_EVENT_SOF 7
#define DWC3_DEVICE_EVENT_ERRATIC_ERROR 9
#define DWC3_DEVICE_EVENT_CMD_CMPL 10
@@ -460,7 +460,7 @@
#define DWC3_DEVTEN_CMDCMPLTEN BIT(10)
#define DWC3_DEVTEN_ERRTICERREN BIT(9)
#define DWC3_DEVTEN_SOFEN BIT(7)
-#define DWC3_DEVTEN_EOPFEN BIT(6)
+#define DWC3_DEVTEN_U3L2L1SUSPEN BIT(6)
#define DWC3_DEVTEN_HIBERNATIONREQEVTEN BIT(5)
#define DWC3_DEVTEN_WKUPEVTEN BIT(4)
#define DWC3_DEVTEN_ULSTCNGEN BIT(3)
@@ -850,6 +850,7 @@ struct dwc3_trb {
* @hwparams6: GHWPARAMS6
* @hwparams7: GHWPARAMS7
* @hwparams8: GHWPARAMS8
+ * @hwparams9: GHWPARAMS9
*/
struct dwc3_hwparams {
u32 hwparams0;
@@ -1374,7 +1375,7 @@ struct dwc3_event_depevt {
* 3 - ULStChng
* 4 - WkUpEvt
* 5 - Reserved
- * 6 - EOPF
+ * 6 - Suspend (EOPF on revisions 2.10a and prior)
* 7 - SOF
* 8 - Reserved
* 9 - ErrticErr
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index db231de46bb3..d0ac89c5b317 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -221,8 +221,8 @@ static inline const char *dwc3_gadget_event_string(char *str, size_t size,
snprintf(str, size, "WakeUp [%s]",
dwc3_gadget_link_string(state));
break;
- case DWC3_DEVICE_EVENT_EOPF:
- snprintf(str, size, "End-Of-Frame [%s]",
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ snprintf(str, size, "Suspend [%s]",
dwc3_gadget_link_string(state));
break;
case DWC3_DEVICE_EVENT_SOF:
@@ -353,8 +353,8 @@ static inline const char *dwc3_gadget_event_type_string(u8 event)
return "Wake-Up";
case DWC3_DEVICE_EVENT_HIBER_REQ:
return "Hibernation";
- case DWC3_DEVICE_EVENT_EOPF:
- return "End of Periodic Frame";
+ case DWC3_DEVICE_EVENT_SUSPEND:
+ return "Suspend";
case DWC3_DEVICE_EVENT_SOF:
return "Start of Frame";
case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
index b13cfab89d53..756faa46d33a 100644
--- a/drivers/usb/dwc3/dwc3-imx8mp.c
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -165,8 +165,9 @@ static int dwc3_imx8mp_probe(struct platform_device *pdev)
if (err < 0)
goto disable_rpm;
- dwc3_np = of_get_child_by_name(node, "dwc3");
+ dwc3_np = of_get_compatible_child(node, "snps,dwc3");
if (!dwc3_np) {
+ err = -ENODEV;
dev_err(dev, "failed to find dwc3 core child\n");
goto disable_rpm;
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 3db17806e92e..e196673f5c64 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -437,8 +437,13 @@ static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
if (extcon_get_state(edev, EXTCON_USB) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
if (extcon_get_state(edev, EXTCON_USB_HOST) == true)
dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
omap->edev = edev;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index e7b932dcbf82..1e51460938b8 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -123,6 +123,7 @@ static const struct property_entry dwc3_pci_mrfld_properties[] = {
PROPERTY_ENTRY_STRING("linux,extcon-name", "mrfld_bcove_pwrsrc"),
PROPERTY_ENTRY_BOOL("snps,dis_u3_susphy_quirk"),
PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"),
+ PROPERTY_ENTRY_BOOL("snps,usb2-gadget-lpm-disable"),
PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"),
{}
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index dd80e5ca8c78..49ca5da5e279 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1684,7 +1684,9 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
}
}
- return __dwc3_gadget_kick_transfer(dep);
+ __dwc3_gadget_kick_transfer(dep);
+
+ return 0;
}
static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
@@ -2323,6 +2325,10 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
if (DWC3_VER_IS_PRIOR(DWC3, 250A))
reg |= DWC3_DEVTEN_ULSTCNGEN;
+ /* On 2.30a and above this bit enables U3/L2-L1 Suspend Events */
+ if (!DWC3_VER_IS_PRIOR(DWC3, 230A))
+ reg |= DWC3_DEVTEN_U3L2L1SUSPEN;
+
dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
}
@@ -3740,7 +3746,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
break;
- case DWC3_DEVICE_EVENT_EOPF:
+ case DWC3_DEVICE_EVENT_SUSPEND:
/* It changed to be suspend event for version 2.30a and above */
if (!DWC3_VER_IS_PRIOR(DWC3, 230A)) {
/*
@@ -4058,8 +4064,9 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
- usb_del_gadget_udc(dwc->gadget);
+ usb_del_gadget(dwc->gadget);
dwc3_gadget_free_endpoints(dwc);
+ usb_put_gadget(dwc->gadget);
dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce,
dwc->bounce_addr);
kfree(dwc->setup_buf);
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 6cac642520fc..9c2eda0918e1 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5568,7 +5568,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
struct usb_hcd *hcd;
struct resource *res;
int irq;
- int retval = -ENODEV;
+ int retval;
struct fotg210_hcd *fotg210;
if (usb_disabled())
@@ -5588,7 +5588,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
if (!hcd) {
- dev_err(dev, "failed to create hcd with err %d\n", retval);
+ dev_err(dev, "failed to create hcd\n");
retval = -ENOMEM;
goto fail_create_hcd;
}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index fa59b242cd51..e8af0a125f84 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -7,8 +7,9 @@
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*/
-/* Up to 16 ms to halt an HC */
-#define XHCI_MAX_HALT_USEC (16*1000)
+
+/* HC should halt within 16 ms, but use 32 ms as some hosts take longer */
+#define XHCI_MAX_HALT_USEC (32 * 1000)
/* HC not running - set to 1 when run/stop bit is cleared. */
#define XHCI_STS_HALT (1<<0)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5bbccc9a0179..7bc18cf8042c 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -57,6 +57,7 @@
#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13
#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138
+#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -166,8 +167,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
(pdev->device == 0x15e0 || pdev->device == 0x15e1))
xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND;
- if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5)
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x15e5) {
xhci->quirks |= XHCI_DISABLE_SPARSE;
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
+ }
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
@@ -243,7 +246,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI))
xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 05c38dd3ee36..a8e4189277da 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -862,7 +862,7 @@ done:
return ret;
}
-static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+static int xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
struct xhci_virt_ep *ep, unsigned int stream_id,
struct xhci_td *td,
enum xhci_ep_reset_type reset_type)
@@ -875,7 +875,7 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
* Device will be reset soon to recover the link so don't do anything
*/
if (ep->vdev->flags & VDEV_PORT_ERROR)
- return;
+ return -ENODEV;
/* add td to cancelled list and let reset ep handler take care of it */
if (reset_type == EP_HARD_RESET) {
@@ -888,16 +888,18 @@ static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
if (ep->ep_state & EP_HALTED) {
xhci_dbg(xhci, "Reset ep command already pending\n");
- return;
+ return 0;
}
err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
if (err)
- return;
+ return err;
ep->ep_state |= EP_HALTED;
xhci_ring_cmd_db(xhci);
+
+ return 0;
}
/*
@@ -1014,6 +1016,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
struct xhci_td *td = NULL;
enum xhci_ep_reset_type reset_type;
struct xhci_command *command;
+ int err;
if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
if (!xhci->devs[slot_id])
@@ -1058,7 +1061,10 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
td->status = -EPROTO;
}
/* reset ep, reset handler cleans up cancelled tds */
- xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
+ err = xhci_handle_halted_endpoint(xhci, ep, 0, td,
+ reset_type);
+ if (err)
+ break;
xhci_stop_watchdog_timer_in_irq(xhci, ep);
return;
case EP_STATE_RUNNING:
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ca9385d22f68..27283654ca08 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1514,7 +1514,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, struct urb *urb)
+ unsigned int ep_index, struct urb *urb, gfp_t mem_flags)
{
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
@@ -1545,7 +1545,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
* changes max packet sizes.
*/
- command = xhci_alloc_command(xhci, true, GFP_KERNEL);
+ command = xhci_alloc_command(xhci, true, mem_flags);
if (!command)
return -ENOMEM;
@@ -1639,7 +1639,7 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
- ep_index, urb);
+ ep_index, urb, mem_flags);
if (ret < 0) {
xhci_urb_free_priv(urb_priv);
urb->hcpriv = NULL;
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index eebeadd26946..6b92d037d8fc 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -518,8 +518,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
glue->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(glue->xceiv)) {
- dev_err(dev, "fail to getting usb-phy %d\n", ret);
ret = PTR_ERR(glue->xceiv);
+ dev_err(dev, "fail to getting usb-phy %d\n", ret);
goto err_unregister_usb_phy;
}
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index c4fdc00a3bc8..64133e586c64 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -259,6 +259,7 @@ enum frs_typec_current {
#define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX)
#define GET_SINK_CAP_RETRY_MS 100
+#define SEND_DISCOVER_RETRY_MS 100
struct pd_mode_data {
int svid_index; /* current SVID index */
@@ -366,6 +367,8 @@ struct tcpm_port {
struct kthread_work vdm_state_machine;
struct hrtimer enable_frs_timer;
struct kthread_work enable_frs;
+ struct hrtimer send_discover_timer;
+ struct kthread_work send_discover_work;
bool state_machine_running;
bool vdm_sm_running;
@@ -1178,6 +1181,16 @@ static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int del
}
}
+static void mod_send_discover_delayed_work(struct tcpm_port *port, unsigned int delay_ms)
+{
+ if (delay_ms) {
+ hrtimer_start(&port->send_discover_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL);
+ } else {
+ hrtimer_cancel(&port->send_discover_timer);
+ kthread_queue_work(port->wq, &port->send_discover_work);
+ }
+}
+
static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms)
{
@@ -1855,6 +1868,9 @@ static void vdm_run_state_machine(struct tcpm_port *port)
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0)
port->send_discover = false;
+ else if (res == -EAGAIN)
+ mod_send_discover_delayed_work(port,
+ SEND_DISCOVER_RETRY_MS);
break;
case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS);
@@ -1880,7 +1896,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
}
if (res < 0) {
- port->vdm_sm_running = false;
+ port->vdm_state = VDM_STATE_ERR_BUSY;
return;
}
}
@@ -1896,6 +1912,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
port->vdo_data[0] = port->vdo_retry;
port->vdo_count = 1;
port->vdm_state = VDM_STATE_READY;
+ tcpm_ams_finish(port);
break;
case VDM_STATE_BUSY:
port->vdm_state = VDM_STATE_ERR_TMOUT;
@@ -1961,7 +1978,7 @@ static void vdm_state_machine_work(struct kthread_work *work)
port->vdm_state != VDM_STATE_BUSY &&
port->vdm_state != VDM_STATE_SEND_MESSAGE);
- if (port->vdm_state == VDM_STATE_ERR_TMOUT)
+ if (port->vdm_state < VDM_STATE_READY)
port->vdm_sm_running = false;
mutex_unlock(&port->lock);
@@ -2390,7 +2407,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
port->nr_sink_caps = cnt;
port->sink_cap_done = true;
if (port->ams == GET_SINK_CAPABILITIES)
- tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ tcpm_set_state(port, ready_state(port), 0);
/* Unexpected Sink Capabilities */
else
tcpm_pd_handle_msg(port,
@@ -2552,6 +2569,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->sink_cap_done = true;
tcpm_set_state(port, ready_state(port), 0);
break;
+ case SRC_READY:
+ case SNK_READY:
+ if (port->vdm_state > VDM_STATE_READY) {
+ port->vdm_state = VDM_STATE_DONE;
+ if (tcpm_vdm_ams(port))
+ tcpm_ams_finish(port);
+ mod_vdm_delayed_work(port, 0);
+ break;
+ }
+ fallthrough;
default:
tcpm_pd_handle_state(port,
port->pwr_role == TYPEC_SOURCE ?
@@ -3682,14 +3709,6 @@ static inline enum tcpm_state unattached_state(struct tcpm_port *port)
return SNK_UNATTACHED;
}
-static void tcpm_check_send_discover(struct tcpm_port *port)
-{
- if ((port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20) &&
- port->send_discover && port->pd_capable)
- tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
- port->send_discover = false;
-}
-
static void tcpm_swap_complete(struct tcpm_port *port, int result)
{
if (port->swap_pending) {
@@ -3926,7 +3945,18 @@ static void run_state_machine(struct tcpm_port *port)
break;
}
- tcpm_check_send_discover(port);
+ /*
+ * 6.4.4.3.1 Discover Identity
+ * "The Discover Identity Command Shall only be sent to SOP when there is an
+ * Explicit Contract."
+ * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ * port->explicit_contract to decide whether to send the command.
+ */
+ if (port->explicit_contract)
+ mod_send_discover_delayed_work(port, 0);
+ else
+ port->send_discover = false;
+
/*
* 6.3.5
* Sending ping messages is not necessary if
@@ -4055,7 +4085,7 @@ static void run_state_machine(struct tcpm_port *port)
if (port->vbus_present) {
u32 current_lim = tcpm_get_current_limit(port);
- if (port->slow_charger_loop || (current_lim > PD_P_SNK_STDBY_MW / 5))
+ if (port->slow_charger_loop && (current_lim > PD_P_SNK_STDBY_MW / 5))
current_lim = PD_P_SNK_STDBY_MW / 5;
tcpm_set_current_limit(port, current_lim, 5000);
tcpm_set_charge(port, true);
@@ -4194,7 +4224,18 @@ static void run_state_machine(struct tcpm_port *port)
break;
}
- tcpm_check_send_discover(port);
+ /*
+ * 6.4.4.3.1 Discover Identity
+ * "The Discover Identity Command Shall only be sent to SOP when there is an
+ * Explicit Contract."
+ * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
+ * port->explicit_contract.
+ */
+ if (port->explicit_contract)
+ mod_send_discover_delayed_work(port, 0);
+ else
+ port->send_discover = false;
+
power_supply_changed(port->psy);
break;
@@ -5288,6 +5329,29 @@ unlock:
mutex_unlock(&port->lock);
}
+static void tcpm_send_discover_work(struct kthread_work *work)
+{
+ struct tcpm_port *port = container_of(work, struct tcpm_port, send_discover_work);
+
+ mutex_lock(&port->lock);
+ /* No need to send DISCOVER_IDENTITY anymore */
+ if (!port->send_discover)
+ goto unlock;
+
+ /* Retry if the port is not idle */
+ if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
+ mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+ goto unlock;
+ }
+
+ /* Only send the Message if the port is host for PD rev2.0 */
+ if (port->data_role == TYPEC_HOST || port->negotiated_rev > PD_REV20)
+ tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+
+unlock:
+ mutex_unlock(&port->lock);
+}
+
static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
{
struct tcpm_port *port = typec_get_drvdata(p);
@@ -5754,6 +5818,15 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
if (!fwnode)
return -EINVAL;
+ /*
+ * This fwnode has a "compatible" property, but is never populated as a
+ * struct device. Instead we simply parse it to read the properties.
+ * This it breaks fw_devlink=on. To maintain backward compatibility
+ * with existing DT files, we work around this by deleting any
+ * fwnode_links to/from this fwnode.
+ */
+ fw_devlink_purge_absent_suppliers(fwnode);
+
/* USB data support is optional */
ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
if (ret == 0) {
@@ -6093,6 +6166,14 @@ static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
+static enum hrtimer_restart send_discover_timer_handler(struct hrtimer *timer)
+{
+ struct tcpm_port *port = container_of(timer, struct tcpm_port, send_discover_timer);
+
+ kthread_queue_work(port->wq, &port->send_discover_work);
+ return HRTIMER_NORESTART;
+}
+
struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
{
struct tcpm_port *port;
@@ -6123,12 +6204,15 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work);
kthread_init_work(&port->event_work, tcpm_pd_event_handler);
kthread_init_work(&port->enable_frs, tcpm_enable_frs_work);
+ kthread_init_work(&port->send_discover_work, tcpm_send_discover_work);
hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->state_machine_timer.function = state_machine_timer_handler;
hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler;
hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->enable_frs_timer.function = enable_frs_timer_handler;
+ hrtimer_init(&port->send_discover_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->send_discover_timer.function = send_discover_timer_handler;
spin_lock_init(&port->pd_event_lock);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 282c3c825c13..1d8b7df59ff4 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -495,7 +495,8 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
}
}
-static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
+static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
+ u32 *pdos, int offset, int num_pdos)
{
struct ucsi *ucsi = con->ucsi;
u64 command;
@@ -503,17 +504,39 @@ static void ucsi_get_pdos(struct ucsi_connector *con, int is_partner)
command = UCSI_COMMAND(UCSI_GET_PDOS) | UCSI_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
- command |= UCSI_GET_PDOS_NUM_PDOS(UCSI_MAX_PDOS - 1);
+ command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
+ command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
command |= UCSI_GET_PDOS_SRC_PDOS;
- ret = ucsi_send_command(ucsi, command, con->src_pdos,
- sizeof(con->src_pdos));
- if (ret < 0) {
+ ret = ucsi_send_command(ucsi, command, pdos + offset,
+ num_pdos * sizeof(u32));
+ if (ret < 0)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
+ if (ret == 0 && offset == 0)
+ dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+
+ return ret;
+}
+
+static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
+{
+ int ret;
+
+ /* UCSI max payload means only getting at most 4 PDOs at a time */
+ ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
+ if (ret < 0)
return;
- }
+
con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
- if (ret == 0)
- dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
+ if (con->num_pdos < UCSI_MAX_PDOS)
+ return;
+
+ /* get the remaining PDOs, if any */
+ ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
+ PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
+ if (ret < 0)
+ return;
+
+ con->num_pdos += ret / sizeof(u32);
}
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
@@ -522,7 +545,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
case UCSI_CONSTAT_PWR_OPMODE_PD:
con->rdo = con->status.request_data_obj;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
- ucsi_get_pdos(con, 1);
+ ucsi_get_src_pdos(con, 1);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
@@ -999,6 +1022,7 @@ static const struct typec_operations ucsi_ops = {
.pr_set = ucsi_pr_swap
};
+/* Caller must call fwnode_handle_put() after use */
static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
{
struct fwnode_handle *fwnode;
@@ -1033,7 +1057,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
command |= UCSI_CONNECTOR_NUMBER(con->num);
ret = ucsi_send_command(ucsi, command, &con->cap, sizeof(con->cap));
if (ret < 0)
- goto out;
+ goto out_unlock;
if (con->cap.op_mode & UCSI_CONCAP_OPMODE_DRP)
cap->data = TYPEC_PORT_DRD;
@@ -1151,6 +1175,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
trace_ucsi_register_port(con->num, &con->status);
out:
+ fwnode_handle_put(cap->fwnode);
+out_unlock:
mutex_unlock(&con->lock);
return ret;
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 3920e20a9e9e..cee666790907 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -8,6 +8,7 @@
#include <linux/power_supply.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
+#include <linux/usb/pd.h>
#include <linux/usb/role.h>
/* -------------------------------------------------------------------------- */
@@ -134,7 +135,9 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
/* GET_PDOS command bits */
#define UCSI_GET_PDOS_PARTNER_PDO(_r_) ((u64)(_r_) << 23)
+#define UCSI_GET_PDOS_PDO_OFFSET(_r_) ((u64)(_r_) << 24)
#define UCSI_GET_PDOS_NUM_PDOS(_r_) ((u64)(_r_) << 32)
+#define UCSI_MAX_PDOS (4)
#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
/* -------------------------------------------------------------------------- */
@@ -302,7 +305,6 @@ struct ucsi {
#define UCSI_MAX_SVID 5
#define UCSI_MAX_ALTMODES (UCSI_MAX_SVID * 6)
-#define UCSI_MAX_PDOS (4)
#define UCSI_TYPEC_VSAFE5V 5000
#define UCSI_TYPEC_1_5_CURRENT 1500
@@ -330,7 +332,7 @@ struct ucsi_connector {
struct power_supply *psy;
struct power_supply_desc psy_desc;
u32 rdo;
- u32 src_pdos[UCSI_MAX_PDOS];
+ u32 src_pdos[PDO_MAX_OBJECTS];
int num_pdos;
struct usb_role_switch *usb_role_sw;
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 189e4385df40..dda5dc6f7737 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -15,6 +15,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/mpfs.h>
#include "mlx5_vdpa.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
@@ -1859,11 +1860,16 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
static void mlx5_vdpa_free(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_net *ndev;
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
+ if (!is_zero_ether_addr(ndev->config.mac)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
+ mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
+ }
mlx5_vdpa_free_resources(&ndev->mvdev);
mutex_destroy(&ndev->reslock);
}
@@ -1990,6 +1996,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
+ struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
struct mlx5_core_dev *mdev;
@@ -2023,10 +2030,17 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mtu;
+ if (!is_zero_ether_addr(config->mac)) {
+ pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
+ err = mlx5_mpfs_add_mac(pfmdev, config->mac);
+ if (err)
+ goto err_mtu;
+ }
+
mvdev->vdev.dma_dev = mdev->device;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
- goto err_mtu;
+ goto err_mpfs;
err = alloc_resources(ndev);
if (err)
@@ -2044,6 +2058,9 @@ err_reg:
free_resources(ndev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
+err_mpfs:
+ if (!is_zero_ether_addr(config->mac))
+ mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
put_device(&mvdev->vdev.dev);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index 5e78fb719602..119f08491d3c 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -31,7 +31,8 @@
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES |
- (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
+ (1ULL << VIRTIO_VSOCK_F_SEQPACKET)
};
enum {
@@ -56,6 +57,7 @@ struct vhost_vsock {
atomic_t queued_replies;
u32 guest_cid;
+ bool seqpacket_allow;
};
static u32 vhost_transport_get_local_cid(void)
@@ -112,6 +114,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
size_t nbytes;
size_t iov_len, payload_len;
int head;
+ bool restore_flag = false;
spin_lock_bh(&vsock->send_pkt_list_lock);
if (list_empty(&vsock->send_pkt_list)) {
@@ -168,9 +171,26 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* If the packet is greater than the space available in the
* buffer, we split it using multiple buffers.
*/
- if (payload_len > iov_len - sizeof(pkt->hdr))
+ if (payload_len > iov_len - sizeof(pkt->hdr)) {
payload_len = iov_len - sizeof(pkt->hdr);
+ /* As we are copying pieces of large packet's buffer to
+ * small rx buffers, headers of packets in rx queue are
+ * created dynamically and are initialized with header
+ * of current packet(except length). But in case of
+ * SOCK_SEQPACKET, we also must clear record delimeter
+ * bit(VIRTIO_VSOCK_SEQ_EOR). Otherwise, instead of one
+ * packet with delimeter(which marks end of record),
+ * there will be sequence of packets with delimeter
+ * bit set. After initialized header will be copied to
+ * rx buffer, this bit will be restored.
+ */
+ if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ restore_flag = true;
+ }
+ }
+
/* Set the correct length in the header */
pkt->hdr.len = cpu_to_le32(payload_len);
@@ -204,6 +224,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* to send it with the next available buffer.
*/
if (pkt->off < pkt->len) {
+ if (restore_flag)
+ pkt->hdr.flags |= cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+
/* We are queueing the same virtio_vsock_pkt to handle
* the remaining bytes, and we want to deliver it
* to monitoring devices in the next iteration.
@@ -354,8 +377,7 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
- pkt->len = le32_to_cpu(pkt->hdr.len);
+ pkt->len = le32_to_cpu(pkt->hdr.len);
/* No payload */
if (!pkt->len)
@@ -398,6 +420,8 @@ static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
return val < vq->num;
}
+static bool vhost_transport_seqpacket_allow(u32 remote_cid);
+
static struct virtio_transport vhost_transport = {
.transport = {
.module = THIS_MODULE,
@@ -424,6 +448,11 @@ static struct virtio_transport vhost_transport = {
.stream_is_active = virtio_transport_stream_is_active,
.stream_allow = virtio_transport_stream_allow,
+ .seqpacket_dequeue = virtio_transport_seqpacket_dequeue,
+ .seqpacket_enqueue = virtio_transport_seqpacket_enqueue,
+ .seqpacket_allow = vhost_transport_seqpacket_allow,
+ .seqpacket_has_data = virtio_transport_seqpacket_has_data,
+
.notify_poll_in = virtio_transport_notify_poll_in,
.notify_poll_out = virtio_transport_notify_poll_out,
.notify_recv_init = virtio_transport_notify_recv_init,
@@ -441,6 +470,22 @@ static struct virtio_transport vhost_transport = {
.send_pkt = vhost_transport_send_pkt,
};
+static bool vhost_transport_seqpacket_allow(u32 remote_cid)
+{
+ struct vhost_vsock *vsock;
+ bool seqpacket_allow = false;
+
+ rcu_read_lock();
+ vsock = vhost_vsock_get(remote_cid);
+
+ if (vsock)
+ seqpacket_allow = vsock->seqpacket_allow;
+
+ rcu_read_unlock();
+
+ return seqpacket_allow;
+}
+
static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
{
struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
@@ -785,6 +830,9 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
goto err;
}
+ if (features & (1ULL << VIRTIO_VSOCK_F_SEQPACKET))
+ vsock->seqpacket_allow = true;
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 39258f9d36a0..ef9c57ce0906 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -380,7 +380,7 @@ static void vgacon_init(struct vc_data *c, int init)
vc_resize(c, vga_video_num_columns, vga_video_num_lines);
c->vc_scan_lines = vga_scan_lines;
- c->vc_font.height = vga_video_font_height;
+ c->vc_font.height = c->vc_cell_height = vga_video_font_height;
c->vc_complement_mask = 0x7700;
if (vga_512_chars)
c->vc_hi_font_mask = 0x0800;
@@ -515,32 +515,32 @@ static void vgacon_cursor(struct vc_data *c, int mode)
switch (CUR_SIZE(c->vc_cursor_type)) {
case CUR_UNDERLINE:
vgacon_set_cursor_size(c->state.x,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 2 : 3),
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_TWO_THIRDS:
vgacon_set_cursor_size(c->state.x,
- c->vc_font.height / 3,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height / 3,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_LOWER_THIRD:
vgacon_set_cursor_size(c->state.x,
- (c->vc_font.height * 2) / 3,
- c->vc_font.height -
- (c->vc_font.height <
+ (c->vc_cell_height * 2) / 3,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_LOWER_HALF:
vgacon_set_cursor_size(c->state.x,
- c->vc_font.height / 2,
- c->vc_font.height -
- (c->vc_font.height <
+ c->vc_cell_height / 2,
+ c->vc_cell_height -
+ (c->vc_cell_height <
10 ? 1 : 2));
break;
case CUR_NONE:
@@ -551,7 +551,7 @@ static void vgacon_cursor(struct vc_data *c, int mode)
break;
default:
vgacon_set_cursor_size(c->state.x, 1,
- c->vc_font.height);
+ c->vc_cell_height);
break;
}
break;
@@ -562,13 +562,13 @@ static int vgacon_doresize(struct vc_data *c,
unsigned int width, unsigned int height)
{
unsigned long flags;
- unsigned int scanlines = height * c->vc_font.height;
+ unsigned int scanlines = height * c->vc_cell_height;
u8 scanlines_lo = 0, r7 = 0, vsync_end = 0, mode, max_scan;
raw_spin_lock_irqsave(&vga_lock, flags);
vgacon_xres = width * VGA_FONTWIDTH;
- vgacon_yres = height * c->vc_font.height;
+ vgacon_yres = height * c->vc_cell_height;
if (vga_video_type >= VIDEO_TYPE_VGAC) {
outb_p(VGA_CRTC_MAX_SCAN, vga_video_port_reg);
max_scan = inb_p(vga_video_port_val);
@@ -623,9 +623,9 @@ static int vgacon_doresize(struct vc_data *c,
static int vgacon_switch(struct vc_data *c)
{
int x = c->vc_cols * VGA_FONTWIDTH;
- int y = c->vc_rows * c->vc_font.height;
+ int y = c->vc_rows * c->vc_cell_height;
int rows = screen_info.orig_video_lines * vga_default_font_height/
- c->vc_font.height;
+ c->vc_cell_height;
/*
* We need to save screen size here as it's the only way
* we can spot the screen has been resized and we need to
@@ -1038,7 +1038,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
cursor_size_lastto = 0;
c->vc_sw->con_cursor(c, CM_DRAW);
}
- c->vc_font.height = fontheight;
+ c->vc_font.height = c->vc_cell_height = fontheight;
vc_resize(c, 0, rows); /* Adjust console size */
}
}
@@ -1086,12 +1086,20 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
if ((width << 1) * height > vga_vram_size)
return -EINVAL;
+ if (user) {
+ /*
+ * Ho ho! Someone (svgatextmode, eh?) may have reprogrammed
+ * the video mode! Set the new defaults then and go away.
+ */
+ screen_info.orig_video_cols = width;
+ screen_info.orig_video_lines = height;
+ vga_default_font_height = c->vc_cell_height;
+ return 0;
+ }
if (width % 2 || width > screen_info.orig_video_cols ||
height > (screen_info.orig_video_lines * vga_default_font_height)/
- c->vc_font.height)
- /* let svgatextmode tinker with video timings and
- return success */
- return (user) ? 0 : -EINVAL;
+ c->vc_cell_height)
+ return -EINVAL;
if (con_is_visible(c) && !vga_is_gfx) /* who knows */
vgacon_doresize(c, width, height);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 3406067985b1..22bb3892f6bd 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2019,7 +2019,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return -EINVAL;
pr_debug("resize now %ix%i\n", var.xres, var.yres);
- if (con_is_visible(vc)) {
+ if (con_is_visible(vc) && vc->vc_mode == KD_TEXT) {
var.activate = FB_ACTIVATE_NOW |
FB_ACTIVATE_FORCE;
fb_set_var(info, &var);
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 372b52a2befa..072780b0e570 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -733,7 +733,7 @@ static int fb_seq_show(struct seq_file *m, void *v)
return 0;
}
-static const struct seq_operations proc_fb_seq_ops = {
+static const struct seq_operations __maybe_unused proc_fb_seq_ops = {
.start = fb_seq_start,
.next = fb_seq_next,
.stop = fb_seq_stop,
diff --git a/drivers/video/fbdev/hgafb.c b/drivers/video/fbdev/hgafb.c
index 8bbac7182ad3..cc8e62ae93f6 100644
--- a/drivers/video/fbdev/hgafb.c
+++ b/drivers/video/fbdev/hgafb.c
@@ -286,7 +286,7 @@ static int hga_card_detect(void)
hga_vram = ioremap(0xb0000, hga_vram_len);
if (!hga_vram)
- goto error;
+ return -ENOMEM;
if (request_region(0x3b0, 12, "hgafb"))
release_io_ports = 1;
@@ -346,13 +346,18 @@ static int hga_card_detect(void)
hga_type_name = "Hercules";
break;
}
- return 1;
+ return 0;
error:
if (release_io_ports)
release_region(0x3b0, 12);
if (release_io_port)
release_region(0x3bf, 1);
- return 0;
+
+ iounmap(hga_vram);
+
+ pr_err("hgafb: HGA card not detected.\n");
+
+ return -EINVAL;
}
/**
@@ -550,13 +555,11 @@ static const struct fb_ops hgafb_ops = {
static int hgafb_probe(struct platform_device *pdev)
{
struct fb_info *info;
+ int ret;
- if (! hga_card_detect()) {
- printk(KERN_INFO "hgafb: HGA card not detected.\n");
- if (hga_vram)
- iounmap(hga_vram);
- return -EINVAL;
- }
+ ret = hga_card_detect();
+ if (!ret)
+ return ret;
printk(KERN_INFO "hgafb: %s with %ldK of memory detected.\n",
hga_type_name, hga_vram_len/1024);
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 3ac053b88495..16f272a50811 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1469,6 +1469,7 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct imstt_par *par;
struct fb_info *info;
struct device_node *dp;
+ int ret = -ENOMEM;
dp = pci_device_to_OF_node(pdev);
if(dp)
@@ -1504,28 +1505,37 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
default:
printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
"contact maintainer.\n", pdev->device);
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENODEV;
+ ret = -ENODEV;
+ goto error;
}
info->fix.smem_start = addr;
info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
0x400000 : 0x800000);
- if (!info->screen_base) {
- release_mem_region(addr, size);
- framebuffer_release(info);
- return -ENOMEM;
- }
+ if (!info->screen_base)
+ goto error;
info->fix.mmio_start = addr + 0x800000;
par->dc_regs = ioremap(addr + 0x800000, 0x1000);
+ if (!par->dc_regs)
+ goto error;
par->cmap_regs_phys = addr + 0x840000;
par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
+ if (!par->cmap_regs)
+ goto error;
info->pseudo_palette = par->palette;
init_imstt(info);
pci_set_drvdata(pdev, info);
return 0;
+
+error:
+ if (par->dc_regs)
+ iounmap(par->dc_regs);
+ if (info->screen_base)
+ iounmap(info->screen_base);
+ release_mem_region(addr, size);
+ framebuffer_release(info);
+ return ret;
}
static void imsttfb_remove(struct pci_dev *pdev)
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index f01d58c7a042..a3e7be96527d 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1017,8 +1017,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
err = mmu_interval_notifier_insert_locked(
&map->notifier, vma->vm_mm, vma->vm_start,
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
- if (err)
+ if (err) {
+ map->vma = NULL;
goto out_unlock_put;
+ }
}
mutex_unlock(&priv->lock);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4c89afc0df62..24d11861ac7d 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -164,6 +164,11 @@ int __ref xen_swiotlb_init(void)
int rc = -ENOMEM;
char *start;
+ if (io_tlb_default_mem != NULL) {
+ pr_warn("swiotlb buffer already initialized\n");
+ return -EEXIST;
+ }
+
retry:
m_ret = XEN_SWIOTLB_ENOMEM;
order = get_order(bytes);
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index e64e6befc63b..87e6b7db892f 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -39,8 +39,10 @@ static int fill_list(unsigned int nr_pages)
}
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
- if (!pgmap)
+ if (!pgmap) {
+ ret = -ENOMEM;
goto err_pgmap;
+ }
pgmap->type = MEMORY_DEVICE_GENERIC;
pgmap->range = (struct range) {
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 4162d0e7e00d..cc7450f2b2a9 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -70,7 +70,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb)
{
- int err = 0, slot, func = -1;
+ int err = 0, slot, func = PCI_FUNC(dev->devfn);
struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@@ -95,22 +95,25 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
/*
* Keep multi-function devices together on the virtual PCI bus, except
- * virtual functions.
+ * that we want to keep virtual functions at func 0 on their own. They
+ * aren't multi-function devices and hence their presence at func 0
+ * may cause guests to not scan the other functions.
*/
- if (!dev->is_virtfn) {
+ if (!dev->is_virtfn || func) {
for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot]))
continue;
t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list);
+ if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
+ continue;
if (match_slot(dev, t->dev)) {
dev_info(&dev->dev, "vpci: assign to virtual slot %d func %d\n",
- slot, PCI_FUNC(dev->devfn));
+ slot, func);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
- func = PCI_FUNC(dev->devfn);
goto unlock;
}
}
@@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
slot);
list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]);
- func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock;
}
}
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 5188f02e75fb..c09c7ebd6968 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -359,7 +359,8 @@ out:
return err;
}
-static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev,
+ enum xenbus_state state)
{
int err = 0;
int num_devs;
@@ -373,9 +374,7 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
mutex_lock(&pdev->dev_lock);
- /* Make sure we only reconfigure once */
- if (xenbus_read_driver_state(pdev->xdev->nodename) !=
- XenbusStateReconfiguring)
+ if (xenbus_read_driver_state(pdev->xdev->nodename) != state)
goto out;
err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
@@ -500,6 +499,10 @@ static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
}
}
+ if (state != XenbusStateReconfiguring)
+ /* Make sure we only reconfigure once. */
+ goto out;
+
err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
if (err) {
xenbus_dev_fatal(pdev->xdev, err,
@@ -525,7 +528,7 @@ static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
break;
case XenbusStateReconfiguring:
- xen_pcibk_reconfigure(pdev);
+ xen_pcibk_reconfigure(pdev, XenbusStateReconfiguring);
break;
case XenbusStateConnected:
@@ -664,6 +667,15 @@ static void xen_pcibk_be_watch(struct xenbus_watch *watch,
xen_pcibk_setup_backend(pdev);
break;
+ case XenbusStateInitialised:
+ /*
+ * We typically move to Initialised when the first device was
+ * added. Hence subsequent devices getting added may need
+ * reconfiguring.
+ */
+ xen_pcibk_reconfigure(pdev, XenbusStateInitialised);
+ break;
+
default:
break;
}